diff --git a/core/src/com/cloud/storage/template/IsoProcessor.java b/core/src/com/cloud/storage/template/IsoProcessor.java index 4d0a331f547..7828f782567 100644 --- a/core/src/com/cloud/storage/template/IsoProcessor.java +++ b/core/src/com/cloud/storage/template/IsoProcessor.java @@ -25,7 +25,6 @@ import java.util.Map; import javax.ejb.Local; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageLayer; @@ -33,21 +32,20 @@ import com.cloud.utils.component.AdapterBase; @Local(value = Processor.class) public class IsoProcessor extends AdapterBase implements Processor { - private static final Logger s_logger = Logger.getLogger(IsoProcessor.class); StorageLayer _storage; @Override public FormatInfo process(String templatePath, ImageFormat format, String templateName) { if (format != null) { - s_logger.debug("We don't handle conversion from " + format + " to ISO."); + logger.debug("We don't handle conversion from " + format + " to ISO."); return null; } String isoPath = templatePath + File.separator + templateName + "." + ImageFormat.ISO.getFileExtension(); if (!_storage.exists(isoPath)) { - s_logger.debug("Unable to find the iso file: " + isoPath); + logger.debug("Unable to find the iso file: " + isoPath); return null; } diff --git a/core/src/com/cloud/storage/template/OVAProcessor.java b/core/src/com/cloud/storage/template/OVAProcessor.java index 3d7f7a23bd7..9c61487c2af 100644 --- a/core/src/com/cloud/storage/template/OVAProcessor.java +++ b/core/src/com/cloud/storage/template/OVAProcessor.java @@ -26,7 +26,6 @@ import javax.ejb.Local; import javax.naming.ConfigurationException; import javax.xml.parsers.DocumentBuilderFactory; -import org.apache.log4j.Logger; import org.w3c.dom.Document; import org.w3c.dom.Element; @@ -38,39 +37,38 @@ import com.cloud.utils.script.Script; @Local(value = Processor.class) public class OVAProcessor extends AdapterBase implements Processor { - private static final Logger s_logger = Logger.getLogger(OVAProcessor.class); StorageLayer _storage; @Override public FormatInfo process(String templatePath, ImageFormat format, String templateName) throws InternalErrorException { if (format != null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("We currently don't handle conversion from " + format + " to OVA."); + if (logger.isInfoEnabled()) { + logger.info("We currently don't handle conversion from " + format + " to OVA."); } return null; } - s_logger.info("Template processing. templatePath: " + templatePath + ", templateName: " + templateName); + logger.info("Template processing. templatePath: " + templatePath + ", templateName: " + templateName); String templateFilePath = templatePath + File.separator + templateName + "." + ImageFormat.OVA.getFileExtension(); if (!_storage.exists(templateFilePath)) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Unable to find the vmware template file: " + templateFilePath); + if (logger.isInfoEnabled()) { + logger.info("Unable to find the vmware template file: " + templateFilePath); } return null; } - s_logger.info("Template processing - untar OVA package. templatePath: " + templatePath + ", templateName: " + templateName); + logger.info("Template processing - untar OVA package. templatePath: " + templatePath + ", templateName: " + templateName); String templateFileFullPath = templatePath + File.separator + templateName + "." + ImageFormat.OVA.getFileExtension(); File templateFile = new File(templateFileFullPath); - Script command = new Script("tar", 0, s_logger); + Script command = new Script("tar", 0, logger); command.add("--no-same-owner"); command.add("-xf", templateFileFullPath); command.setWorkDir(templateFile.getParent()); String result = command.execute(); if (result != null) { - s_logger.info("failed to untar OVA package due to " + result + ". templatePath: " + templatePath + ", templateName: " + templateName); + logger.info("failed to untar OVA package due to " + result + ". templatePath: " + templatePath + ", templateName: " + templateName); return null; } @@ -91,7 +89,7 @@ public class OVAProcessor extends AdapterBase implements Processor { long size = getTemplateVirtualSize(file.getParent(), file.getName()); return size; } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "failed to get virtual template size for ova: " + e.getLocalizedMessage()); } return file.length(); @@ -105,7 +103,7 @@ public class OVAProcessor extends AdapterBase implements Processor { String ovfFileName = getOVFFilePath(templateFileFullPath); if (ovfFileName == null) { String msg = "Unable to locate OVF file in template package directory: " + templatePath; - s_logger.error(msg); + logger.error(msg); throw new InternalErrorException(msg); } try { @@ -130,7 +128,7 @@ public class OVAProcessor extends AdapterBase implements Processor { return virtualSize; } catch (Exception e) { String msg = "Unable to parse OVF XML document to get the virtual disk size due to" + e; - s_logger.error(msg); + logger.error(msg); throw new InternalErrorException(msg); } } diff --git a/core/src/com/cloud/storage/template/QCOW2Processor.java b/core/src/com/cloud/storage/template/QCOW2Processor.java index 2c66415bf96..59560050f0e 100644 --- a/core/src/com/cloud/storage/template/QCOW2Processor.java +++ b/core/src/com/cloud/storage/template/QCOW2Processor.java @@ -27,7 +27,6 @@ import java.util.Map; import javax.ejb.Local; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageLayer; @@ -36,7 +35,6 @@ import com.cloud.utils.component.AdapterBase; @Local(value = Processor.class) public class QCOW2Processor extends AdapterBase implements Processor { - private static final Logger s_logger = Logger.getLogger(QCOW2Processor.class); private static final int VIRTUALSIZE_HEADER_LOCATION = 24; private StorageLayer _storage; @@ -44,14 +42,14 @@ public class QCOW2Processor extends AdapterBase implements Processor { @Override public FormatInfo process(String templatePath, ImageFormat format, String templateName) { if (format != null) { - s_logger.debug("We currently don't handle conversion from " + format + " to QCOW2."); + logger.debug("We currently don't handle conversion from " + format + " to QCOW2."); return null; } String qcow2Path = templatePath + File.separator + templateName + "." + ImageFormat.QCOW2.getFileExtension(); if (!_storage.exists(qcow2Path)) { - s_logger.debug("Unable to find the qcow2 file: " + qcow2Path); + logger.debug("Unable to find the qcow2 file: " + qcow2Path); return null; } @@ -66,7 +64,7 @@ public class QCOW2Processor extends AdapterBase implements Processor { try { info.virtualSize = getVirtualSize(qcow2File); } catch (IOException e) { - s_logger.error("Unable to get virtual size from " + qcow2File.getName()); + logger.error("Unable to get virtual size from " + qcow2File.getName()); return null; } diff --git a/core/src/com/cloud/storage/template/RawImageProcessor.java b/core/src/com/cloud/storage/template/RawImageProcessor.java index 820ef1986f5..74b7428102b 100644 --- a/core/src/com/cloud/storage/template/RawImageProcessor.java +++ b/core/src/com/cloud/storage/template/RawImageProcessor.java @@ -25,7 +25,6 @@ import java.util.Map; import javax.ejb.Local; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.exception.InternalErrorException; import com.cloud.storage.Storage.ImageFormat; @@ -34,7 +33,6 @@ import com.cloud.utils.component.AdapterBase; @Local(value = Processor.class) public class RawImageProcessor extends AdapterBase implements Processor { - private static final Logger s_logger = Logger.getLogger(RawImageProcessor.class); StorageLayer _storage; @Override @@ -50,13 +48,13 @@ public class RawImageProcessor extends AdapterBase implements Processor { @Override public FormatInfo process(String templatePath, ImageFormat format, String templateName) throws InternalErrorException { if (format != null) { - s_logger.debug("We currently don't handle conversion from " + format + " to raw image."); + logger.debug("We currently don't handle conversion from " + format + " to raw image."); return null; } String imgPath = templatePath + File.separator + templateName + "." + ImageFormat.RAW.getFileExtension(); if (!_storage.exists(imgPath)) { - s_logger.debug("Unable to find raw image:" + imgPath); + logger.debug("Unable to find raw image:" + imgPath); return null; } FormatInfo info = new FormatInfo(); @@ -64,7 +62,7 @@ public class RawImageProcessor extends AdapterBase implements Processor { info.filename = templateName + "." + ImageFormat.RAW.getFileExtension(); info.size = _storage.getSize(imgPath); info.virtualSize = info.size; - s_logger.debug("Process raw image " + info.filename + " successfully"); + logger.debug("Process raw image " + info.filename + " successfully"); return info; } diff --git a/core/src/com/cloud/storage/template/TARProcessor.java b/core/src/com/cloud/storage/template/TARProcessor.java index 96c866e4a03..e120f89cd5f 100644 --- a/core/src/com/cloud/storage/template/TARProcessor.java +++ b/core/src/com/cloud/storage/template/TARProcessor.java @@ -22,7 +22,6 @@ package com.cloud.storage.template; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageLayer; import com.cloud.utils.component.AdapterBase; -import org.apache.log4j.Logger; import javax.ejb.Local; import javax.naming.ConfigurationException; @@ -31,21 +30,20 @@ import java.util.Map; @Local(value = Processor.class) public class TARProcessor extends AdapterBase implements Processor { - private static final Logger s_logger = Logger.getLogger(TARProcessor.class); private StorageLayer _storage; @Override public FormatInfo process(String templatePath, ImageFormat format, String templateName) { if (format != null) { - s_logger.debug("We currently don't handle conversion from " + format + " to TAR."); + logger.debug("We currently don't handle conversion from " + format + " to TAR."); return null; } String tarPath = templatePath + File.separator + templateName + "." + ImageFormat.TAR.getFileExtension(); if (!_storage.exists(tarPath)) { - s_logger.debug("Unable to find the tar file: " + tarPath); + logger.debug("Unable to find the tar file: " + tarPath); return null; } diff --git a/core/src/com/cloud/storage/template/VhdProcessor.java b/core/src/com/cloud/storage/template/VhdProcessor.java index 2974c75c190..adece96e0c7 100644 --- a/core/src/com/cloud/storage/template/VhdProcessor.java +++ b/core/src/com/cloud/storage/template/VhdProcessor.java @@ -27,7 +27,6 @@ import java.util.Map; import javax.ejb.Local; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageLayer; @@ -43,7 +42,6 @@ import com.cloud.utils.component.AdapterBase; @Local(value = Processor.class) public class VhdProcessor extends AdapterBase implements Processor { - private static final Logger s_logger = Logger.getLogger(VhdProcessor.class); StorageLayer _storage; private int vhdFooterSize = 512; private int vhdFooterCreatorAppOffset = 28; @@ -54,13 +52,13 @@ public class VhdProcessor extends AdapterBase implements Processor { @Override public FormatInfo process(String templatePath, ImageFormat format, String templateName) { if (format != null) { - s_logger.debug("We currently don't handle conversion from " + format + " to VHD."); + logger.debug("We currently don't handle conversion from " + format + " to VHD."); return null; } String vhdPath = templatePath + File.separator + templateName + "." + ImageFormat.VHD.getFileExtension(); if (!_storage.exists(vhdPath)) { - s_logger.debug("Unable to find the vhd file: " + vhdPath); + logger.debug("Unable to find the vhd file: " + vhdPath); return null; } @@ -74,7 +72,7 @@ public class VhdProcessor extends AdapterBase implements Processor { try { info.virtualSize = getVirtualSize(vhdFile); } catch (IOException e) { - s_logger.error("Unable to get the virtual size for " + vhdPath); + logger.error("Unable to get the virtual size for " + vhdPath); return null; } diff --git a/core/src/com/cloud/storage/template/VmdkProcessor.java b/core/src/com/cloud/storage/template/VmdkProcessor.java index 3d399f5791b..ca03d4fc9e6 100644 --- a/core/src/com/cloud/storage/template/VmdkProcessor.java +++ b/core/src/com/cloud/storage/template/VmdkProcessor.java @@ -31,7 +31,6 @@ import java.util.regex.Pattern; import javax.ejb.Local; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.exception.InternalErrorException; import com.cloud.storage.Storage.ImageFormat; @@ -40,24 +39,23 @@ import com.cloud.utils.component.AdapterBase; @Local(value = Processor.class) public class VmdkProcessor extends AdapterBase implements Processor { - private static final Logger s_logger = Logger.getLogger(VmdkProcessor.class); StorageLayer _storage; @Override public FormatInfo process(String templatePath, ImageFormat format, String templateName) throws InternalErrorException { if (format != null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("We currently don't handle conversion from " + format + " to VMDK."); + if (logger.isInfoEnabled()) { + logger.info("We currently don't handle conversion from " + format + " to VMDK."); } return null; } - s_logger.info("Template processing. templatePath: " + templatePath + ", templateName: " + templateName); + logger.info("Template processing. templatePath: " + templatePath + ", templateName: " + templateName); String templateFilePath = templatePath + File.separator + templateName + "." + ImageFormat.VMDK.getFileExtension(); if (!_storage.exists(templateFilePath)) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Unable to find the vmware template file: " + templateFilePath); + if (logger.isInfoEnabled()) { + logger.info("Unable to find the vmware template file: " + templateFilePath); } return null; } @@ -77,7 +75,7 @@ public class VmdkProcessor extends AdapterBase implements Processor { long size = getTemplateVirtualSize(file.getParent(), file.getName()); return size; } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "failed to get template virtual size for vmdk: " + e.getLocalizedMessage()); } return file.length(); @@ -103,15 +101,15 @@ public class VmdkProcessor extends AdapterBase implements Processor { } } catch(FileNotFoundException ex) { String msg = "Unable to open file '" + templateFileFullPath + "' " + ex.toString(); - s_logger.error(msg); + logger.error(msg); throw new InternalErrorException(msg); } catch(IOException ex) { String msg = "Unable read open file '" + templateFileFullPath + "' " + ex.toString(); - s_logger.error(msg); + logger.error(msg); throw new InternalErrorException(msg); } - s_logger.debug("vmdk file had size="+virtualSize); + logger.debug("vmdk file had size="+virtualSize); return virtualSize; } diff --git a/engine/orchestration/src/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/com/cloud/agent/manager/AgentManagerImpl.java index a38fd0881c2..de1d53c99e6 100644 --- a/engine/orchestration/src/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/com/cloud/agent/manager/AgentManagerImpl.java @@ -45,7 +45,6 @@ import org.apache.cloudstack.framework.jobs.AsyncJob; import org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.utils.identity.ManagementServerNode; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -116,8 +115,6 @@ import com.cloud.utils.time.InaccurateClock; **/ @Local(value = {AgentManager.class}) public class AgentManagerImpl extends ManagerBase implements AgentManager, HandlerFactory, Configurable { - protected static final Logger s_logger = Logger.getLogger(AgentManagerImpl.class); - protected static final Logger status_logger = Logger.getLogger(Status.class); /** * _agents is a ConcurrentHashMap, but it is used from within a synchronized block. @@ -200,12 +197,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public boolean configure(final String name, final Map params) throws ConfigurationException { - s_logger.info("Ping Timeout is " + PingTimeout.value()); + logger.info("Ping Timeout is " + PingTimeout.value()); final int threads = DirectAgentLoadSize.value(); _nodeId = ManagementServerNode.getManagementServerId(); - s_logger.info("Configuring AgentManagerImpl. management server node id(msid): " + _nodeId); + logger.info("Configuring AgentManagerImpl. management server node id(msid): " + _nodeId); final long lastPing = (System.currentTimeMillis() >> 10) - (long)(PingTimeout.value() * PingInterval.value()); _hostDao.markHostsAsDisconnected(_nodeId, lastPing); @@ -219,13 +216,13 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl _connectExecutor.allowCoreThreadTimeOut(true); _connection = new NioServer("AgentManager", Port.value(), Workers.value() + 10, this); - s_logger.info("Listening on " + Port.value() + " with " + Workers.value() + " workers"); + logger.info("Listening on " + Port.value() + " with " + Workers.value() + " workers"); // executes all agent commands other than cron and ping _directAgentExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgent")); // executes cron and ping agent commands _cronJobExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgentCronJob")); - s_logger.debug("Created DirectAgentAttache pool with size: " + DirectAgentPoolSize.value()); + logger.debug("Created DirectAgentAttache pool with size: " + DirectAgentPoolSize.value()); _directAgentThreadCap = Math.round(DirectAgentPoolSize.value() * DirectAgentThreadCap.value()) + 1; // add 1 to always make the value > 0 _monitorExecutor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("AgentMonitor")); @@ -260,8 +257,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl _cmdMonitors.add(new Pair(_monitorId, listener)); } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Registering listener " + listener.getClass().getSimpleName() + " with id " + _monitorId); + if (logger.isDebugEnabled()) { + logger.debug("Registering listener " + listener.getClass().getSimpleName() + " with id " + _monitorId); } return _monitorId; } @@ -282,7 +279,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public void unregisterForHostEvents(final int id) { - s_logger.debug("Deregistering " + id); + logger.debug("Deregistering " + id); _hostMonitors.remove(id); } @@ -297,15 +294,15 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } - s_logger.warn("No handling of agent control command: " + cmd + " sent from " + attache.getId()); + logger.warn("No handling of agent control command: " + cmd + " sent from " + attache.getId()); return new AgentControlAnswer(cmd); } public void handleCommands(final AgentAttache attache, final long sequence, final Command[] cmds) { for (final Pair listener : _cmdMonitors) { final boolean processed = listener.second().processCommands(attache.getId(), sequence, cmds); - if (s_logger.isTraceEnabled()) { - s_logger.trace("SeqA " + attache.getId() + "-" + sequence + ": " + (processed ? "processed" : "not processed") + " by " + listener.getClass()); + if (logger.isTraceEnabled()) { + logger.trace("SeqA " + attache.getId() + "-" + sequence + ": " + (processed ? "processed" : "not processed") + " by " + listener.getClass()); } } } @@ -365,7 +362,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } if (answers != null && answers[0] instanceof UnsupportedAnswer) { - s_logger.warn("Unsupported Command: " + answers[0].getDetails()); + logger.warn("Unsupported Command: " + answers[0].getDetails()); return answers[0]; } @@ -458,14 +455,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final Long hostId = agent.getId(); final HostVO host = _hostDao.findById(hostId); if (host != null && host.getType() != null && !host.getType().isVirtual()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("checking if agent (" + hostId + ") is alive"); + if (logger.isDebugEnabled()) { + logger.debug("checking if agent (" + hostId + ") is alive"); } final Answer answer = easySend(hostId, new CheckHealthCommand()); if (answer != null && answer.getResult()) { final Status status = Status.Up; - if (s_logger.isDebugEnabled()) { - s_logger.debug("agent (" + hostId + ") responded to checkHeathCommand, reporting that agent is " + status); + if (logger.isDebugEnabled()) { + logger.debug("agent (" + hostId + ") responded to checkHeathCommand, reporting that agent is " + status); } return status; } @@ -480,7 +477,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } final AgentAttache agent = findAttache(hostId); if (agent == null) { - s_logger.debug("Unable to find agent for " + hostId); + logger.debug("Unable to find agent for " + hostId); throw new AgentUnavailableException("Unable to find agent ", hostId); } @@ -508,8 +505,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return; } final long hostId = attache.getId(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Remove Agent : " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Remove Agent : " + hostId); } AgentAttache removed = null; boolean conflict = false; @@ -522,15 +519,15 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } if (conflict) { - s_logger.debug("Agent for host " + hostId + " is created when it is being disconnected"); + logger.debug("Agent for host " + hostId + " is created when it is being disconnected"); } if (removed != null) { removed.disconnect(nextState); } for (final Pair monitor : _hostMonitors) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName()); + if (logger.isDebugEnabled()) { + logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName()); } monitor.second().processDisconnect(hostId, nextState); } @@ -540,8 +537,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final long hostId = attache.getId(); final HostVO host = _hostDao.findById(hostId); for (final Pair monitor : _hostMonitors) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending Connect to listener: " + monitor.second().getClass().getSimpleName()); + if (logger.isDebugEnabled()) { + logger.debug("Sending Connect to listener: " + monitor.second().getClass().getSimpleName()); } for (int i = 0; i < cmd.length; i++) { try { @@ -550,12 +547,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (e instanceof ConnectionException) { final ConnectionException ce = (ConnectionException)e; if (ce.isSetupError()) { - s_logger.warn("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + + logger.warn("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage()); handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); throw ce; } else { - s_logger.info("Monitor " + monitor.second().getClass().getSimpleName() + " says not to continue the connect process for " + hostId + + logger.info("Monitor " + monitor.second().getClass().getSimpleName() + " says not to continue the connect process for " + hostId + " due to " + e.getMessage()); handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); return attache; @@ -564,7 +561,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); } else { - s_logger.error("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + + logger.error("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage(), e); handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); @@ -616,19 +613,19 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final Constructor constructor = clazz.getConstructor(); resource = (ServerResource)constructor.newInstance(); } catch (final ClassNotFoundException e) { - s_logger.warn("Unable to find class " + host.getResource(), e); + logger.warn("Unable to find class " + host.getResource(), e); } catch (final InstantiationException e) { - s_logger.warn("Unablet to instantiate class " + host.getResource(), e); + logger.warn("Unablet to instantiate class " + host.getResource(), e); } catch (final IllegalAccessException e) { - s_logger.warn("Illegal access " + host.getResource(), e); + logger.warn("Illegal access " + host.getResource(), e); } catch (final SecurityException e) { - s_logger.warn("Security error on " + host.getResource(), e); + logger.warn("Security error on " + host.getResource(), e); } catch (final NoSuchMethodException e) { - s_logger.warn("NoSuchMethodException error on " + host.getResource(), e); + logger.warn("NoSuchMethodException error on " + host.getResource(), e); } catch (final IllegalArgumentException e) { - s_logger.warn("IllegalArgumentException error on " + host.getResource(), e); + logger.warn("IllegalArgumentException error on " + host.getResource(), e); } catch (final InvocationTargetException e) { - s_logger.warn("InvocationTargetException error on " + host.getResource(), e); + logger.warn("InvocationTargetException error on " + host.getResource(), e); } if (resource != null) { @@ -662,12 +659,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl try { resource.configure(host.getName(), params); } catch (final ConfigurationException e) { - s_logger.warn("Unable to configure resource due to " + e.getMessage()); + logger.warn("Unable to configure resource due to " + e.getMessage()); return null; } if (!resource.start()) { - s_logger.warn("Unable to start the resource"); + logger.warn("Unable to start the resource"); return null; } } @@ -685,14 +682,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl //load the respective discoverer final Discoverer discoverer = _resourceMgr.getMatchingDiscover(host.getHypervisorType()); if (discoverer == null) { - s_logger.info("Could not to find a Discoverer to load the resource: " + host.getId() + " for hypervisor type: " + host.getHypervisorType()); + logger.info("Could not to find a Discoverer to load the resource: " + host.getId() + " for hypervisor type: " + host.getHypervisorType()); resource = loadResourcesWithoutHypervisor(host); } else { resource = discoverer.reloadResource(host); } if (resource == null) { - s_logger.warn("Unable to load the resource: " + host.getId()); + logger.warn("Unable to load the resource: " + host.getId()); return false; } @@ -718,7 +715,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) throws ConnectionException { - s_logger.debug("create DirectAgentAttache for " + host.getId()); + logger.debug("create DirectAgentAttache for " + host.getId()); final DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getName(), resource, host.isInMaintenanceStates()); AgentAttache old = null; @@ -739,13 +736,13 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl _connection.stop(); } - s_logger.info("Disconnecting agents: " + _agents.size()); + logger.info("Disconnecting agents: " + _agents.size()); synchronized (_agents) { for (final AgentAttache agent : _agents.values()) { final HostVO host = _hostDao.findById(agent.getId()); if (host == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cant not find host " + agent.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Cant not find host " + agent.getId()); } } else { if (!agent.forForward()) { @@ -763,17 +760,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl protected boolean handleDisconnectWithoutInvestigation(final AgentAttache attache, final Status.Event event, final boolean transitState, final boolean removeAgent) { final long hostId = attache.getId(); - s_logger.info("Host " + hostId + " is disconnecting with event " + event); + logger.info("Host " + hostId + " is disconnecting with event " + event); Status nextStatus = null; final HostVO host = _hostDao.findById(hostId); if (host == null) { - s_logger.warn("Can't find host with " + hostId); + logger.warn("Can't find host with " + hostId); nextStatus = Status.Removed; } else { final Status currentStatus = host.getStatus(); if (currentStatus == Status.Down || currentStatus == Status.Alert || currentStatus == Status.Removed) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host " + hostId + " is already " + currentStatus); + if (logger.isDebugEnabled()) { + logger.debug("Host " + hostId + " is already " + currentStatus); } nextStatus = currentStatus; } else { @@ -781,18 +778,18 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl nextStatus = currentStatus.getNextStatus(event); } catch (final NoTransitionException e) { final String err = "Cannot find next status for " + event + " as current status is " + currentStatus + " for agent " + hostId; - s_logger.debug(err); + logger.debug(err); throw new CloudRuntimeException(err); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("The next status of agent " + hostId + "is " + nextStatus + ", current status is " + currentStatus); + if (logger.isDebugEnabled()) { + logger.debug("The next status of agent " + hostId + "is " + nextStatus + ", current status is " + currentStatus); } } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deregistering link for " + hostId + " with state " + nextStatus); + if (logger.isDebugEnabled()) { + logger.debug("Deregistering link for " + hostId + " with state " + nextStatus); } removeAgent(attache, nextStatus); @@ -817,48 +814,48 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl * Why this can happen? Ask God not me. I hate there was no piece of comment for code handling race condition. * God knew what race condition the code dealt with! */ - s_logger.debug("Caught exception while getting agent's next status", ne); + logger.debug("Caught exception while getting agent's next status", ne); } if (nextStatus == Status.Alert) { /* OK, we are going to the bad status, let's see what happened */ - s_logger.info("Investigating why host " + hostId + " has disconnected with event " + event); + logger.info("Investigating why host " + hostId + " has disconnected with event " + event); Status determinedState = investigate(attache); // if state cannot be determined do nothing and bail out if (determinedState == null) { if (((System.currentTimeMillis() >> 10) - host.getLastPinged()) > AlertWait.value()) { - s_logger.warn("Agent " + hostId + " state cannot be determined for more than " + AlertWait + "(" + AlertWait.value() + ") seconds, will go to Alert state"); + logger.warn("Agent " + hostId + " state cannot be determined for more than " + AlertWait + "(" + AlertWait.value() + ") seconds, will go to Alert state"); determinedState = Status.Alert; } else { - s_logger.warn("Agent " + hostId + " state cannot be determined, do nothing"); + logger.warn("Agent " + hostId + " state cannot be determined, do nothing"); return false; } } final Status currentStatus = host.getStatus(); - s_logger.info("The agent " + hostId + " state determined is " + determinedState); + logger.info("The agent " + hostId + " state determined is " + determinedState); if (determinedState == Status.Down) { String message = "Host is down: " + host.getId() + "-" + host.getName() + ". Starting HA on the VMs"; - s_logger.error(message); + logger.error(message); if (host.getType() != Host.Type.SecondaryStorage && host.getType() != Host.Type.ConsoleProxy) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host down, " + host.getId(), message); } event = Status.Event.HostDown; } else if (determinedState == Status.Up) { /* Got ping response from host, bring it back */ - s_logger.info("Agent is determined to be up and running"); + logger.info("Agent is determined to be up and running"); agentStatusTransitTo(host, Status.Event.Ping, _nodeId); return false; } else if (determinedState == Status.Disconnected) { - s_logger.warn("Agent is disconnected but the host is still up: " + host.getId() + "-" + host.getName()); + logger.warn("Agent is disconnected but the host is still up: " + host.getId() + "-" + host.getName()); if (currentStatus == Status.Disconnected) { if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) { - s_logger.warn("Host " + host.getId() + " has been disconnected past the wait time it should be disconnected."); + logger.warn("Host " + host.getId() + " has been disconnected past the wait time it should be disconnected."); event = Status.Event.WaitedTooLong; } else { - s_logger.debug("Host " + host.getId() + " has been determined to be disconnected but it hasn't passed the wait time yet."); + logger.debug("Host " + host.getId() + " has been determined to be disconnected but it hasn't passed the wait time yet."); return false; } } else if (currentStatus == Status.Up) { @@ -880,7 +877,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl "In availability zone " + host.getDataCenterId() + ", host is in alert state: " + host.getId() + "-" + host.getName()); } } else { - s_logger.debug("The next status of agent " + host.getId() + " is not Alert, no need to investigate what happened"); + logger.debug("The next status of agent " + host.getId() + " is not Alert, no need to investigate what happened"); } } handleDisconnectWithoutInvestigation(attache, event, true, true); @@ -911,7 +908,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl handleDisconnectWithoutInvestigation(_attache, _event, true, false); } } catch (final Exception e) { - s_logger.error("Exception caught while handling disconnect: ", e); + logger.error("Exception caught while handling disconnect: ", e); } } } @@ -921,34 +918,34 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl try { final Host h = _hostDao.findById(hostId); if (h == null || h.getRemoved() != null) { - s_logger.debug("Host with id " + hostId + " doesn't exist"); + logger.debug("Host with id " + hostId + " doesn't exist"); return null; } final Status status = h.getStatus(); if (!status.equals(Status.Up) && !status.equals(Status.Connecting)) { - s_logger.debug("Can not send command " + cmd + " due to Host " + hostId + " is not up"); + logger.debug("Can not send command " + cmd + " due to Host " + hostId + " is not up"); return null; } final Answer answer = send(hostId, cmd); if (answer == null) { - s_logger.warn("send returns null answer"); + logger.warn("send returns null answer"); return null; } - if (s_logger.isDebugEnabled() && answer.getDetails() != null) { - s_logger.debug("Details from executing " + cmd.getClass() + ": " + answer.getDetails()); + if (logger.isDebugEnabled() && answer.getDetails() != null) { + logger.debug("Details from executing " + cmd.getClass() + ": " + answer.getDetails()); } return answer; } catch (final AgentUnavailableException e) { - s_logger.warn(e.getMessage()); + logger.warn(e.getMessage()); return null; } catch (final OperationTimedoutException e) { - s_logger.warn("Operation timed out: " + e.getMessage()); + logger.warn("Operation timed out: " + e.getMessage()); return null; } catch (final Exception e) { - s_logger.warn("Exception while sending", e); + logger.warn("Exception while sending", e); return null; } } @@ -970,23 +967,23 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl host = _hostDao.findById(hostId); if (host == null || host.getRemoved() != null) { - s_logger.warn("Unable to find host " + hostId); + logger.warn("Unable to find host " + hostId); return false; } if (host.getStatus() == Status.Disconnected) { - s_logger.info("Host is already disconnected, no work to be done"); + logger.info("Host is already disconnected, no work to be done"); return true; } if (host.getStatus() != Status.Up && host.getStatus() != Status.Alert && host.getStatus() != Status.Rebalancing) { - s_logger.info("Unable to disconnect host because it is not in the correct state: host=" + hostId + "; Status=" + host.getStatus()); + logger.info("Unable to disconnect host because it is not in the correct state: host=" + hostId + "; Status=" + host.getStatus()); return false; } final AgentAttache attache = findAttache(hostId); if (attache == null) { - s_logger.info("Unable to disconnect host because it is not connected to this server: " + hostId); + logger.info("Unable to disconnect host because it is not connected to this server: " + hostId); return false; } @@ -996,8 +993,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException { if (event == Event.AgentDisconnected) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Received agent disconnect event for host " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Received agent disconnect event for host " + hostId); } AgentAttache attache = null; attache = findAttache(hostId); @@ -1018,7 +1015,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) throws ConnectionException { - s_logger.debug("create ConnectedAgentAttache for " + host.getId()); + logger.debug("create ConnectedAgentAttache for " + host.getId()); final AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); @@ -1044,7 +1041,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl attache = notifyMonitorsOfConnection(attache, startup, false); } } catch (final Exception e) { - s_logger.debug("Failed to handle host connection: " + e.toString()); + logger.debug("Failed to handle host connection: " + e.toString()); ready = new ReadyCommand(null); ready.setDetails(e.toString()); } finally { @@ -1061,7 +1058,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl easySend(attache.getId(), ready); } } catch (final Exception e) { - s_logger.debug("Failed to send ready command:" + e.toString()); + logger.debug("Failed to send ready command:" + e.toString()); } return attache; } @@ -1080,28 +1077,28 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override protected void runInContext() { try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Simulating start for resource " + resource.getName() + " id " + id); + if (logger.isDebugEnabled()) { + logger.debug("Simulating start for resource " + resource.getName() + " id " + id); } if (tapLoadingAgents(id, TapAgentsAction.Add)) { try { final AgentAttache agentattache = findAttache(id); if (agentattache == null) { - s_logger.debug("Creating agent for host " + id); + logger.debug("Creating agent for host " + id); _resourceMgr.createHostAndAgent(id, resource, details, false, null, false); - s_logger.debug("Completed creating agent for host " + id); + logger.debug("Completed creating agent for host " + id); } else { - s_logger.debug("Agent already created in another thread for host " + id + ", ignore this"); + logger.debug("Agent already created in another thread for host " + id + ", ignore this"); } } finally { tapLoadingAgents(id, TapAgentsAction.Del); } } else { - s_logger.debug("Agent creation already getting processed in another thread for host " + id + ", ignore this"); + logger.debug("Agent creation already getting processed in another thread for host " + id + ", ignore this"); } } catch (final Exception e) { - s_logger.warn("Unable to simulate start on resource " + id + " name " + resource.getName(), e); + logger.warn("Unable to simulate start on resource " + id + " name " + resource.getName(), e); } } } @@ -1127,7 +1124,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final AgentAttache attache = handleConnectedAgent(_link, startups, _request); if (attache == null) { - s_logger.warn("Unable to create attache for agent: " + _request); + logger.warn("Unable to create attache for agent: " + _request); } } } @@ -1149,7 +1146,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl try { link.send(response.toBytes()); } catch (final ClosedChannelException e) { - s_logger.debug("Failed to send startupanswer: " + e.toString()); + logger.debug("Failed to send startupanswer: " + e.toString()); } _connectExecutor.execute(new HandleAgentConnectTask(link, cmds, request)); } @@ -1167,7 +1164,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (attache == null) { if (!(cmd instanceof StartupCommand)) { - s_logger.warn("Throwing away a request because it came through as the first command on a connect: " + request); + logger.warn("Throwing away a request because it came through as the first command on a connect: " + request); } else { //submit the task for execution request.logD("Scheduling the first command "); @@ -1178,17 +1175,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final long hostId = attache.getId(); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (cmd instanceof PingRoutingCommand) { logD = false; - s_logger.debug("Ping from " + hostId); - s_logger.trace("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request); + logger.debug("Ping from " + hostId); + logger.trace("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request); } else if (cmd instanceof PingCommand) { logD = false; - s_logger.debug("Ping from " + hostId); - s_logger.trace("SeqA " + attache.getId() + "-" + request.getSequence() + ": Processing " + request); + logger.debug("Ping from " + hostId); + logger.trace("SeqA " + attache.getId() + "-" + request.getSequence() + ": Processing " + request); } else { - s_logger.debug("SeqA " + attache.getId() + "-" + request.getSequence() + ": Processing " + request); + logger.debug("SeqA " + attache.getId() + "-" + request.getSequence() + ": Processing " + request); } } @@ -1212,7 +1209,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } else if (cmd instanceof ShutdownCommand) { final ShutdownCommand shutdown = (ShutdownCommand)cmd; final String reason = shutdown.getReason(); - s_logger.info("Host " + attache.getId() + " has informed us that it is shutting down with reason " + reason + " and detail " + + logger.info("Host " + attache.getId() + " has informed us that it is shutting down with reason " + reason + " and detail " + shutdown.getDetail()); if (reason.equals(ShutdownCommand.Update)) { //disconnectWithoutInvestigation(attache, Event.UpdateNeeded); @@ -1250,7 +1247,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl _alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId()); } } else { - s_logger.debug("Not processing " + PingRoutingCommand.class.getSimpleName() + " for agent id=" + cmdHostId + + logger.debug("Not processing " + PingRoutingCommand.class.getSimpleName() + " for agent id=" + cmdHostId + "; can't find the host in the DB"); } } @@ -1258,8 +1255,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } else if (cmd instanceof ReadyAnswer) { final HostVO host = _hostDao.findById(attache.getId()); if (host == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cant not find host " + attache.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Cant not find host " + attache.getId()); } } answer = new Answer(cmd); @@ -1268,33 +1265,33 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } } catch (final Throwable th) { - s_logger.warn("Caught: ", th); + logger.warn("Caught: ", th); answer = new Answer(cmd, false, th.getMessage()); } answers[i] = answer; } final Response response = new Response(request, answers, _nodeId, attache.getId()); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (logD) { - s_logger.debug("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response); + logger.debug("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response); } else { - s_logger.trace("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response); + logger.trace("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response); } } try { link.send(response.toBytes()); } catch (final ClosedChannelException e) { - s_logger.warn("Unable to send response because connection is closed: " + response); + logger.warn("Unable to send response because connection is closed: " + response); } } protected void processResponse(final Link link, final Response response) { final AgentAttache attache = (AgentAttache)link.attachment(); if (attache == null) { - s_logger.warn("Unable to process: " + response); + logger.warn("Unable to process: " + response); } else if (!attache.processAnswers(response.getSequence(), response)) { - s_logger.info("Host " + attache.getId() + " - Seq " + response.getSequence() + ": Response is not processed: " + response); + logger.info("Host " + attache.getId() + " - Seq " + response.getSequence() + ": Response is not processed: " + response); } } @@ -1313,7 +1310,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl processRequest(task.getLink(), event); } } catch (final UnsupportedVersionException e) { - s_logger.warn(e.getMessage()); + logger.warn(e.getMessage()); // upgradeAgent(task.getLink(), data, e.getReason()); } } else if (type == Task.Type.CONNECT) { @@ -1323,7 +1320,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (attache != null) { disconnectWithInvestigation(attache, Event.AgentDisconnected); } else { - s_logger.info("Connection from " + link.getIpAddress() + " closed but no cleanup was done."); + logger.info("Connection from " + link.getIpAddress() + " closed but no cleanup was done."); link.close(); link.terminated(); } @@ -1360,20 +1357,20 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl public boolean agentStatusTransitTo(final HostVO host, final Status.Event e, final long msId) { try { _agentStatusLock.lock(); - if (status_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { final ResourceState state = host.getResourceState(); final StringBuilder msg = new StringBuilder("Transition:"); msg.append("[Resource state = ").append(state); msg.append(", Agent event = ").append(e.toString()); msg.append(", Host id = ").append(host.getId()).append(", name = " + host.getName()).append("]"); - status_logger.debug(msg); + logger.debug(msg); } host.setManagementServerId(msId); try { return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao); } catch (final NoTransitionException e1) { - status_logger.debug("Cannot transit agent status with event " + e + " for host " + host.getId() + ", name=" + host.getName() + + logger.debug("Cannot transit agent status with event " + e + " for host " + host.getId() + ", name=" + host.getName() + ", mangement server id is " + msId); throw new CloudRuntimeException("Cannot transit agent status with event " + e + " for host " + host.getId() + ", mangement server id is " + msId + "," + e1.getMessage()); @@ -1404,7 +1401,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl protected boolean isHostOwnerSwitched(final long hostId) { final HostVO host = _hostDao.findById(hostId); if (host == null) { - s_logger.warn("Can't find the host " + hostId); + logger.warn("Can't find the host " + hostId); return false; } return isHostOwnerSwitched(host); @@ -1429,7 +1426,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } else { /* Agent is still in connecting process, don't allow to disconnect right away */ if (tapLoadingAgents(hostId, TapAgentsAction.Contains)) { - s_logger.info("Host " + hostId + " is being loaded so no disconnects needed."); + logger.info("Host " + hostId + " is being loaded so no disconnects needed."); return; } @@ -1501,14 +1498,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl public void pingBy(final long agentId) { // Update PingMap with the latest time if agent entry exists in the PingMap if (_pingMap.replace(agentId, InaccurateClock.getTimeInSeconds()) == null) { - s_logger.info("PingMap for agent: " + agentId + " will not be updated because agent is no longer in the PingMap"); + logger.info("PingMap for agent: " + agentId + " will not be updated because agent is no longer in the PingMap"); } } protected class MonitorTask extends ManagedContextRunnable { @Override protected void runInContext() { - s_logger.trace("Agent Monitor is started."); + logger.trace("Agent Monitor is started."); try { final List behindAgents = findAgentsBehindOnPing(); @@ -1524,17 +1521,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl * investigation and direct put agent to * Disconnected */ - status_logger.debug("Ping timeout but host " + agentId + " is in resource state of " + resourceState + ", so no investigation"); + logger.debug("Ping timeout but host " + agentId + " is in resource state of " + resourceState + ", so no investigation"); disconnectWithoutInvestigation(agentId, Event.ShutdownRequested); } else { final HostVO host = _hostDao.findById(agentId); if (host != null && (host.getType() == Host.Type.ConsoleProxy || host.getType() == Host.Type.SecondaryStorageVM || host.getType() == Host.Type.SecondaryStorageCmdExecutor)) { - s_logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: " + host.getId()); + logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: " + host.getId()); disconnectWithoutInvestigation(agentId, Event.ShutdownRequested); } else { - status_logger.debug("Ping timeout for host " + agentId + ", do invstigation"); + logger.debug("Ping timeout for host " + agentId + ", do invstigation"); disconnectWithInvestigation(agentId, Event.PingTimeout); } } @@ -1555,10 +1552,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } } catch (final Throwable th) { - s_logger.error("Caught the following exception: ", th); + logger.error("Caught the following exception: ", th); } - s_logger.trace("Agent Monitor is leaving the building!"); + logger.trace("Agent Monitor is leaving the building!"); } protected List findAgentsBehindOnPing() { @@ -1571,7 +1568,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } if (agentsBehind.size() > 0) { - s_logger.info("Found the following agents behind on ping: " + agentsBehind); + logger.info("Found the following agents behind on ping: " + agentsBehind); } return agentsBehind; diff --git a/engine/orchestration/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index 2bc4f6859c5..440cfc39c77 100644 --- a/engine/orchestration/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/engine/orchestration/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -43,10 +43,6 @@ import javax.naming.ConfigurationException; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLEngine; -import org.apache.log4j.Logger; - -import com.google.gson.Gson; - import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -95,10 +91,10 @@ import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.nio.Link; import com.cloud.utils.nio.Task; +import com.google.gson.Gson; @Local(value = {AgentManager.class, ClusteredAgentRebalanceService.class}) public class ClusteredAgentManagerImpl extends AgentManagerImpl implements ClusterManagerListener, ClusteredAgentRebalanceService { - final static Logger s_logger = Logger.getLogger(ClusteredAgentManagerImpl.class); private static final ScheduledExecutorService s_transferExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("Cluster-AgentRebalancingExecutor")); private final long rebalanceTimeOut = 300000; // 5 mins - after this time remove the agent from the transfer list @@ -144,7 +140,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust _sslEngines = new HashMap(7); _nodeId = ManagementServerNode.getManagementServerId(); - s_logger.info("Configuring ClusterAgentManagerImpl. management server node id(msid): " + _nodeId); + logger.info("Configuring ClusterAgentManagerImpl. management server node id(msid): " + _nodeId); ClusteredAgentAttache.initialize(this); @@ -162,8 +158,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust return false; } _timer.schedule(new DirectAgentScanTimerTask(), STARTUP_DELAY, ScanInterval.value()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Scheduled direct agent scan task to run at an interval of " + ScanInterval.value() + " seconds"); + if (logger.isDebugEnabled()) { + logger.debug("Scheduled direct agent scan task to run at an interval of " + ScanInterval.value() + " seconds"); } // Schedule tasks for agent rebalancing @@ -177,8 +173,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust public void scheduleHostScanTask() { _timer.schedule(new DirectAgentScanTimerTask(), 0); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Scheduled a direct agent scan task"); + if (logger.isDebugEnabled()) { + logger.debug("Scheduled a direct agent scan task"); } } @@ -187,8 +183,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } private void scanDirectAgentToLoad() { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Begin scanning directly connected hosts"); + if (logger.isTraceEnabled()) { + logger.trace("Begin scanning directly connected hosts"); } // for agents that are self-managed, threshold to be considered as disconnected after pingtimeout @@ -196,18 +192,18 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust List hosts = _hostDao.findAndUpdateDirectAgentToLoad(cutSeconds, LoadSize.value().longValue(), _nodeId); List appliances = _hostDao.findAndUpdateApplianceToLoad(cutSeconds, _nodeId); - if (hosts != null) { + if (hosts != null) { hosts.addAll(appliances); if (hosts.size() > 0) { - s_logger.debug("Found " + hosts.size() + " unmanaged direct hosts, processing connect for them..."); + logger.debug("Found " + hosts.size() + " unmanaged direct hosts, processing connect for them..."); for (HostVO host : hosts) { try { AgentAttache agentattache = findAttache(host.getId()); if (agentattache != null) { // already loaded, skip if (agentattache.forForward()) { - if (s_logger.isInfoEnabled()) { - s_logger.info(host + " is detected down, but we have a forward attache running, disconnect this one before launching the host"); + if (logger.isInfoEnabled()) { + logger.info(host + " is detected down, but we have a forward attache running, disconnect this one before launching the host"); } removeAgent(agentattache, Status.Disconnected); } else { @@ -215,18 +211,18 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ")"); + if (logger.isDebugEnabled()) { + logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ")"); } loadDirectlyConnectedHost(host, false); } catch (Throwable e) { - s_logger.warn(" can not load directly connected host " + host.getId() + "(" + host.getName() + ") due to ", e); + logger.warn(" can not load directly connected host " + host.getId() + "(" + host.getName() + ") due to ", e); } } } } - if (s_logger.isTraceEnabled()) { - s_logger.trace("End scanning directly connected hosts"); + if (logger.isTraceEnabled()) { + logger.trace("End scanning directly connected hosts"); } } @@ -236,7 +232,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust try { runDirectAgentScanTimerTask(); } catch (Throwable e) { - s_logger.error("Unexpected exception " + e.getMessage(), e); + logger.error("Unexpected exception " + e.getMessage(), e); } } } @@ -247,7 +243,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } protected AgentAttache createAttache(long id) { - s_logger.debug("create forwarding ClusteredAgentAttache for " + id); + logger.debug("create forwarding ClusteredAgentAttache for " + id); HostVO host = _hostDao.findById(id); final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getName()); AgentAttache old = null; @@ -256,8 +252,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust _agents.put(id, attache); } if (old != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Remove stale agent attache from current management server"); + if (logger.isDebugEnabled()) { + logger.debug("Remove stale agent attache from current management server"); } removeAgent(old, Status.Removed); } @@ -266,7 +262,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override protected AgentAttache createAttacheForConnect(HostVO host, Link link) { - s_logger.debug("create ClusteredAgentAttache for " + host.getId()); + logger.debug("create ClusteredAgentAttache for " + host.getId()); final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); AgentAttache old = null; @@ -282,7 +278,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override protected AgentAttache createAttacheForDirectConnect(Host host, ServerResource resource) { - s_logger.debug("create ClusteredDirectAgentAttache for " + host.getId()); + logger.debug("create ClusteredDirectAgentAttache for " + host.getId()); final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), host.getName(), _nodeId, resource, host.isInMaintenanceStates()); AgentAttache old = null; synchronized (_agents) { @@ -326,8 +322,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override public boolean executeUserRequest(long hostId, Event event) throws AgentUnavailableException { if (event == Event.AgentDisconnected) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Received agent disconnect event for host " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Received agent disconnect event for host " + hostId); } AgentAttache attache = findAttache(hostId); if (attache != null) { @@ -336,7 +332,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust HostTransferMapVO transferVO = _hostTransferDao.findById(hostId); if (transferVO != null) { if (transferVO.getFutureOwner() == _nodeId && transferVO.getState() == HostTransferState.TransferStarted) { - s_logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId + " as the host is being connected to " + + logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId + " as the host is being connected to " + _nodeId); return true; } @@ -346,7 +342,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust // don't process disconnect if the disconnect came for the host via delayed cluster notification, // but the host has already reconnected to the current management server if (!attache.forForward()) { - s_logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId + + logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId + " as the host is directly connected to the current management server " + _nodeId); return true; } @@ -369,7 +365,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust return result; } } catch (AgentUnavailableException e) { - s_logger.debug("cannot propagate agent reconnect because agent is not available", e); + logger.debug("cannot propagate agent reconnect because agent is not available", e); return false; } @@ -377,32 +373,27 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } public void notifyNodesInCluster(AgentAttache attache) { - s_logger.debug("Notifying other nodes of to disconnect"); + logger.debug("Notifying other nodes of to disconnect"); Command[] cmds = new Command[] {new ChangeAgentCommand(attache.getId(), Event.AgentDisconnected)}; _clusterMgr.broadcast(attache.getId(), _gson.toJson(cmds)); } // notifies MS peers to schedule a host scan task immediately, triggered during addHost operation public void notifyNodesInClusterToScheduleHostScanTask() { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Notifying other MS nodes to run host scan task"); + if (logger.isDebugEnabled()) { + logger.debug("Notifying other MS nodes to run host scan task"); } Command[] cmds = new Command[] {new ScheduleHostScanTaskCommand()}; _clusterMgr.broadcast(0, _gson.toJson(cmds)); } - protected static void logT(byte[] bytes, final String msg) { - s_logger.trace("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " + + protected void logD(byte[] bytes, final String msg) { + logger.debug("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " + (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg); } - protected static void logD(byte[] bytes, final String msg) { - s_logger.debug("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " + - (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg); - } - - protected static void logI(byte[] bytes, final String msg) { - s_logger.info("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " + + protected void logI(byte[] bytes, final String msg) { + logger.info("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " + (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg); } @@ -427,7 +418,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust return false; } try { - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { logD(bytes, "Routing to peer"); } Link.write(ch, new ByteBuffer[] {ByteBuffer.wrap(bytes)}, sslEngine); @@ -467,7 +458,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust try { ch.close(); } catch (IOException e) { - s_logger.warn("Unable to close peer socket connection to " + peerName); + logger.warn("Unable to close peer socket connection to " + peerName); } } _peers.remove(peerName); @@ -483,14 +474,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust try { prevCh.close(); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "failed to get close resource for previous channel Socket: " + e.getLocalizedMessage()); } } if (ch == null || ch == prevCh) { ManagementServerHost ms = _clusterMgr.getPeer(peerName); if (ms == null) { - s_logger.info("Unable to find peer: " + peerName); + logger.info("Unable to find peer: " + peerName); return null; } String ip = ms.getServiceIP(); @@ -513,13 +504,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust sslEngine.setEnabledProtocols(SSLUtils.getSupportedProtocols(sslEngine.getEnabledProtocols())); Link.doHandshake(ch1, sslEngine, true); - s_logger.info("SSL: Handshake done"); + logger.info("SSL: Handshake done"); } catch (Exception e) { ch1.close(); throw new IOException("SSL: Fail to init SSL! " + e); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Connection to peer opened: " + peerName + ", ip: " + ip); + if (logger.isDebugEnabled()) { + logger.debug("Connection to peer opened: " + peerName + ", ip: " + ip); } _peers.put(peerName, ch1); _sslEngines.put(peerName, sslEngine); @@ -528,15 +519,15 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust try { ch1.close(); } catch (IOException ex) { - s_logger.error("failed to close failed peer socket: " + ex); + logger.error("failed to close failed peer socket: " + ex); } - s_logger.warn("Unable to connect to peer management server: " + peerName + ", ip: " + ip + " due to " + e.getMessage(), e); + logger.warn("Unable to connect to peer management server: " + peerName + ", ip: " + ip + " due to " + e.getMessage(), e); return null; } } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Found open channel for peer: " + peerName); + if (logger.isTraceEnabled()) { + logger.trace("Found open channel for peer: " + peerName); } return ch; } @@ -562,8 +553,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust AgentAttache agent = findAttache(hostId); if (agent == null || !agent.forForward()) { if (isHostOwnerSwitched(host)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host " + hostId + " has switched to another management server, need to update agent map with a forwarding agent attache"); + if (logger.isDebugEnabled()) { + logger.debug("Host " + hostId + " has switched to another management server, need to update agent map with a forwarding agent attache"); } agent = createAttache(hostId); } @@ -582,10 +573,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust if (_peers != null) { for (SocketChannel ch : _peers.values()) { try { - s_logger.info("Closing: " + ch.toString()); + logger.info("Closing: " + ch.toString()); ch.close(); } catch (IOException e) { - s_logger.info("[ignored] error on closing channel: " +ch.toString(), e); + logger.info("[ignored] error on closing channel: " +ch.toString(), e); } } } @@ -622,7 +613,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust final byte[] data = task.getData(); Version ver = Request.getVersion(data); if (ver.ordinal() != Version.v1.ordinal() && ver.ordinal() != Version.v3.ordinal()) { - s_logger.warn("Wrong version for clustered agent request"); + logger.warn("Wrong version for clustered agent request"); super.doTask(task); return; } @@ -642,7 +633,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust Request req = Request.parse(data); Command[] cmds = req.getCommands(); CancelCommand cancel = (CancelCommand)cmds[0]; - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { logD(data, "Cancel request received"); } agent.cancel(cancel.getSequence()); @@ -690,7 +681,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust AgentAttache attache = (AgentAttache)link.attachment(); if (attache != null) { attache.sendNext(Request.getSequence(data)); - } else if (s_logger.isDebugEnabled()) { + } else if (logger.isDebugEnabled()) { logD(data, "No attache to process " + Request.parse(data).toString()); } } @@ -703,11 +694,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust final Response response = Response.parse(data); AgentAttache attache = findAttache(response.getAgentId()); if (attache == null) { - s_logger.info("SeqA " + response.getAgentId() + "-" + response.getSequence() + "Unable to find attache to forward " + response.toString()); + logger.info("SeqA " + response.getAgentId() + "-" + response.getSequence() + "Unable to find attache to forward " + response.toString()); return; } if (!attache.processAnswers(response.getSequence(), response)) { - s_logger.info("SeqA " + attache.getId() + "-" + response.getSequence() + ": Response is not processed: " + response.toString()); + logger.info("SeqA " + attache.getId() + "-" + response.getSequence() + ": Response is not processed: " + response.toString()); } } return; @@ -726,10 +717,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override public void onManagementNodeLeft(List nodeList, long selfNodeId) { for (ManagementServerHost vo : nodeList) { - s_logger.info("Marking hosts as disconnected on Management server" + vo.getMsid()); + logger.info("Marking hosts as disconnected on Management server" + vo.getMsid()); long lastPing = (System.currentTimeMillis() >> 10) - getTimeout(); _hostDao.markHostsAsDisconnected(vo.getMsid(), lastPing); - s_logger.info("Deleting entries from op_host_transfer table for Management server " + vo.getMsid()); + logger.info("Deleting entries from op_host_transfer table for Management server " + vo.getMsid()); cleanupTransferMap(vo.getMsid()); } } @@ -757,7 +748,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust try { result = rebalanceHost(agentId, currentOwnerId, futureOwnerId); } catch (Exception e) { - s_logger.warn("Unable to rebalance host id=" + agentId, e); + logger.warn("Unable to rebalance host id=" + agentId, e); } } return result; @@ -772,14 +763,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust protected volatile boolean cancelled = false; public AgentLoadBalancerTask() { - s_logger.debug("Agent load balancer task created"); + logger.debug("Agent load balancer task created"); } @Override public synchronized boolean cancel() { if (!cancelled) { cancelled = true; - s_logger.debug("Agent load balancer task cancelled"); + logger.debug("Agent load balancer task cancelled"); return super.cancel(); } return true; @@ -790,19 +781,19 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust try { if (!cancelled) { startRebalanceAgents(); - if (s_logger.isInfoEnabled()) { - s_logger.info("The agent load balancer task is now being cancelled"); + if (logger.isInfoEnabled()) { + logger.info("The agent load balancer task is now being cancelled"); } cancelled = true; } } catch (Throwable e) { - s_logger.error("Unexpected exception " + e.toString(), e); + logger.error("Unexpected exception " + e.toString(), e); } } } public void startRebalanceAgents() { - s_logger.debug("Management server " + _nodeId + " is asking other peers to rebalance their agents"); + logger.debug("Management server " + _nodeId + " is asking other peers to rebalance their agents"); List allMS = _mshostDao.listBy(ManagementServerHost.State.Up); QueryBuilder sc = QueryBuilder.create(HostVO.class); sc.and(sc.entity().getManagementServerId(), Op.NNULL); @@ -814,16 +805,16 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust if (!allManagedAgents.isEmpty() && !allMS.isEmpty()) { avLoad = allManagedAgents.size() / allMS.size(); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("There are no hosts to rebalance in the system. Current number of active management server nodes in the system is " + allMS.size() + + if (logger.isDebugEnabled()) { + logger.debug("There are no hosts to rebalance in the system. Current number of active management server nodes in the system is " + allMS.size() + "; number of managed agents is " + allManagedAgents.size()); } return; } if (avLoad == 0L) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("As calculated average load is less than 1, rounding it to 1"); + if (logger.isDebugEnabled()) { + logger.debug("As calculated average load is less than 1, rounding it to 1"); } avLoad = 1; } @@ -837,19 +828,19 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) { break; } else { - s_logger.debug("Agent load balancer planner " + lbPlanner.getName() + " found no hosts to be rebalanced from management server " + node.getMsid()); + logger.debug("Agent load balancer planner " + lbPlanner.getName() + " found no hosts to be rebalanced from management server " + node.getMsid()); } } if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) { - s_logger.debug("Found " + hostsToRebalance.size() + " hosts to rebalance from management server " + node.getMsid()); + logger.debug("Found " + hostsToRebalance.size() + " hosts to rebalance from management server " + node.getMsid()); for (HostVO host : hostsToRebalance) { long hostId = host.getId(); - s_logger.debug("Asking management server " + node.getMsid() + " to give away host id=" + hostId); + logger.debug("Asking management server " + node.getMsid() + " to give away host id=" + hostId); boolean result = true; if (_hostTransferDao.findById(hostId) != null) { - s_logger.warn("Somebody else is already rebalancing host id: " + hostId); + logger.warn("Somebody else is already rebalancing host id: " + hostId); continue; } @@ -858,18 +849,18 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust transfer = _hostTransferDao.startAgentTransfering(hostId, node.getMsid(), _nodeId); Answer[] answer = sendRebalanceCommand(node.getMsid(), hostId, node.getMsid(), _nodeId, Event.RequestAgentRebalance); if (answer == null) { - s_logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid()); + logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid()); result = false; } } catch (Exception ex) { - s_logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid(), ex); + logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid(), ex); result = false; } finally { if (transfer != null) { HostTransferMapVO transferState = _hostTransferDao.findByIdAndFutureOwnerId(transfer.getId(), _nodeId); if (!result && transferState != null && transferState.getState() == HostTransferState.TransferRequested) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Removing mapping from op_host_transfer as it failed to be set to transfer mode"); + if (logger.isDebugEnabled()) { + logger.debug("Removing mapping from op_host_transfer as it failed to be set to transfer mode"); } // just remove the mapping (if exists) as nothing was done on the peer management // server yet @@ -879,7 +870,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } } else { - s_logger.debug("Found no hosts to rebalance from the management server " + node.getMsid()); + logger.debug("Found no hosts to rebalance from the management server " + node.getMsid()); } } } @@ -893,8 +884,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust Command[] cmds = commands.toCommands(); try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Forwarding " + cmds[0].toString() + " to " + peer); + if (logger.isDebugEnabled()) { + logger.debug("Forwarding " + cmds[0].toString() + " to " + peer); } String peerName = Long.toString(peer); String cmdStr = _gson.toJson(cmds); @@ -902,7 +893,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust Answer[] answers = _gson.fromJson(ansStr, Answer[].class); return answers; } catch (Exception e) { - s_logger.warn("Caught exception while talking to " + currentOwnerId, e); + logger.warn("Caught exception while talking to " + currentOwnerId, e); return null; } } @@ -926,8 +917,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust return null; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Propagating agent change request event:" + event.toString() + " to agent:" + agentId); + if (logger.isDebugEnabled()) { + logger.debug("Propagating agent change request event:" + event.toString() + " to agent:" + agentId); } Command[] cmds = new Command[1]; cmds[0] = new ChangeAgentCommand(agentId, event); @@ -939,8 +930,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust Answer[] answers = _gson.fromJson(ansStr, Answer[].class); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Result for agent change is " + answers[0].getResult()); + if (logger.isDebugEnabled()) { + logger.debug("Result for agent change is " + answers[0].getResult()); } return answers[0].getResult(); @@ -951,12 +942,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override protected void runInContext() { try { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Clustered agent transfer scan check, management server id:" + _nodeId); + if (logger.isTraceEnabled()) { + logger.trace("Clustered agent transfer scan check, management server id:" + _nodeId); } synchronized (_agentToTransferIds) { if (_agentToTransferIds.size() > 0) { - s_logger.debug("Found " + _agentToTransferIds.size() + " agents to transfer"); + logger.debug("Found " + _agentToTransferIds.size() + " agents to transfer"); // for (Long hostId : _agentToTransferIds) { for (Iterator iterator = _agentToTransferIds.iterator(); iterator.hasNext();) { Long hostId = iterator.next(); @@ -973,14 +964,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust _hostTransferDao.findActiveHostTransferMapByHostId(hostId, new Date(cutTime.getTime() - rebalanceTimeOut)); if (transferMap == null) { - s_logger.debug("Timed out waiting for the host id=" + hostId + " to be ready to transfer, skipping rebalance for the host"); + logger.debug("Timed out waiting for the host id=" + hostId + " to be ready to transfer, skipping rebalance for the host"); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; } if (transferMap.getInitialOwner() != _nodeId || attache == null || attache.forForward()) { - s_logger.debug("Management server " + _nodeId + " doesn't own host id=" + hostId + " any more, skipping rebalance for the host"); + logger.debug("Management server " + _nodeId + " doesn't own host id=" + hostId + " any more, skipping rebalance for the host"); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; @@ -988,7 +979,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust ManagementServerHostVO ms = _mshostDao.findByMsid(transferMap.getFutureOwner()); if (ms != null && ms.getState() != ManagementServerHost.State.Up) { - s_logger.debug("Can't transfer host " + hostId + " as it's future owner is not in UP state: " + ms + + logger.debug("Can't transfer host " + hostId + " as it's future owner is not in UP state: " + ms + ", skipping rebalance for the host"); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); @@ -1000,31 +991,31 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust try { _executor.execute(new RebalanceTask(hostId, transferMap.getInitialOwner(), transferMap.getFutureOwner())); } catch (RejectedExecutionException ex) { - s_logger.warn("Failed to submit rebalance task for host id=" + hostId + "; postponing the execution"); + logger.warn("Failed to submit rebalance task for host id=" + hostId + "; postponing the execution"); continue; } } else { - s_logger.debug("Agent " + hostId + " can't be transfered yet as its request queue size is " + attache.getQueueSize() + + logger.debug("Agent " + hostId + " can't be transfered yet as its request queue size is " + attache.getQueueSize() + " and listener queue size is " + attache.getNonRecurringListenersSize()); } } } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Found no agents to be transfered by the management server " + _nodeId); + if (logger.isTraceEnabled()) { + logger.trace("Found no agents to be transfered by the management server " + _nodeId); } } } } catch (Throwable e) { - s_logger.error("Problem with the clustered agent transfer scan check!", e); + logger.error("Problem with the clustered agent transfer scan check!", e); } } }; } private boolean setToWaitForRebalance(final long hostId, long currentOwnerId, long futureOwnerId) { - s_logger.debug("Adding agent " + hostId + " to the list of agents to transfer"); + logger.debug("Adding agent " + hostId + " to the list of agents to transfer"); synchronized (_agentToTransferIds) { return _agentToTransferIds.add(hostId); } @@ -1035,7 +1026,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust boolean result = true; if (currentOwnerId == _nodeId) { if (!startRebalance(hostId)) { - s_logger.debug("Failed to start agent rebalancing"); + logger.debug("Failed to start agent rebalancing"); finishRebalance(hostId, futureOwnerId, Event.RebalanceFailed); return false; } @@ -1046,23 +1037,23 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } catch (Exception ex) { - s_logger.warn("Host " + hostId + " failed to connect to the management server " + futureOwnerId + " as a part of rebalance process", ex); + logger.warn("Host " + hostId + " failed to connect to the management server " + futureOwnerId + " as a part of rebalance process", ex); result = false; } if (result) { - s_logger.debug("Successfully transfered host id=" + hostId + " to management server " + futureOwnerId); + logger.debug("Successfully transfered host id=" + hostId + " to management server " + futureOwnerId); finishRebalance(hostId, futureOwnerId, Event.RebalanceCompleted); } else { - s_logger.warn("Failed to transfer host id=" + hostId + " to management server " + futureOwnerId); + logger.warn("Failed to transfer host id=" + hostId + " to management server " + futureOwnerId); finishRebalance(hostId, futureOwnerId, Event.RebalanceFailed); } } else if (futureOwnerId == _nodeId) { HostVO host = _hostDao.findById(hostId); try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Disconnecting host " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification"); + if (logger.isDebugEnabled()) { + logger.debug("Disconnecting host " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification"); } AgentAttache attache = findAttache(hostId); @@ -1071,26 +1062,26 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } if (result) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + + if (logger.isDebugEnabled()) { + logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process"); } result = loadDirectlyConnectedHost(host, true); } else { - s_logger.warn("Failed to disconnect " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification"); + logger.warn("Failed to disconnect " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification"); } } catch (Exception ex) { - s_logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + + logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process due to:", ex); result = false; } if (result) { - s_logger.debug("Successfully loaded directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + + logger.debug("Successfully loaded directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process"); } else { - s_logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + + logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process"); } } @@ -1101,13 +1092,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust protected void finishRebalance(final long hostId, long futureOwnerId, Event event) { boolean success = (event == Event.RebalanceCompleted) ? true : false; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Finishing rebalancing for the agent " + hostId + " with event " + event); + if (logger.isDebugEnabled()) { + logger.debug("Finishing rebalancing for the agent " + hostId + " with event " + event); } AgentAttache attache = findAttache(hostId); if (attache == null || !(attache instanceof ClusteredAgentAttache)) { - s_logger.debug("Unable to find forward attache for the host id=" + hostId + ", assuming that the agent disconnected already"); + logger.debug("Unable to find forward attache for the host id=" + hostId + ", assuming that the agent disconnected already"); _hostTransferDao.completeAgentTransfer(hostId); return; } @@ -1122,7 +1113,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust // 2) Get all transfer requests and route them to peer Request requestToTransfer = forwardAttache.getRequestToTransfer(); while (requestToTransfer != null) { - s_logger.debug("Forwarding request " + requestToTransfer.getSequence() + " held in transfer attache " + hostId + " from the management server " + + logger.debug("Forwarding request " + requestToTransfer.getSequence() + " held in transfer attache " + hostId + " from the management server " + _nodeId + " to " + futureOwnerId); boolean routeResult = routeToPeer(Long.toString(futureOwnerId), requestToTransfer.getBytes()); if (!routeResult) { @@ -1132,23 +1123,23 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust requestToTransfer = forwardAttache.getRequestToTransfer(); } - s_logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance to " + futureOwnerId); + logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance to " + futureOwnerId); } else { failRebalance(hostId); } - s_logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance"); + logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance"); _hostTransferDao.completeAgentTransfer(hostId); } protected void failRebalance(final long hostId) { try { - s_logger.debug("Management server " + _nodeId + " failed to rebalance agent " + hostId); + logger.debug("Management server " + _nodeId + " failed to rebalance agent " + hostId); _hostTransferDao.completeAgentTransfer(hostId); handleDisconnectWithoutInvestigation(findAttache(hostId), Event.RebalanceFailed, true, true); } catch (Exception ex) { - s_logger.warn("Failed to reconnect host id=" + hostId + " as a part of failed rebalance task cleanup"); + logger.warn("Failed to reconnect host id=" + hostId + " as a part of failed rebalance task cleanup"); } } @@ -1156,7 +1147,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust HostVO host = _hostDao.findById(hostId); if (host == null || host.getRemoved() != null) { - s_logger.warn("Unable to find host record, fail start rebalancing process"); + logger.warn("Unable to find host record, fail start rebalancing process"); return false; } @@ -1166,17 +1157,17 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true, true); ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(hostId); if (forwardAttache == null) { - s_logger.warn("Unable to create a forward attache for the host " + hostId + " as a part of rebalance process"); + logger.warn("Unable to create a forward attache for the host " + hostId + " as a part of rebalance process"); return false; } - s_logger.debug("Putting agent id=" + hostId + " to transfer mode"); + logger.debug("Putting agent id=" + hostId + " to transfer mode"); forwardAttache.setTransferMode(true); _agents.put(hostId, forwardAttache); } else { if (attache == null) { - s_logger.warn("Attache for the agent " + hostId + " no longer exists on management server " + _nodeId + ", can't start host rebalancing"); + logger.warn("Attache for the agent " + hostId + " no longer exists on management server " + _nodeId + ", can't start host rebalancing"); } else { - s_logger.warn("Attache for the agent " + hostId + " has request queue size= " + attache.getQueueSize() + " and listener queue size " + + logger.warn("Attache for the agent " + hostId + " has request queue size= " + attache.getQueueSize() + " and listener queue size " + attache.getNonRecurringListenersSize() + ", can't start host rebalancing"); } return false; @@ -1213,19 +1204,19 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override protected void runInContext() { try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Rebalancing host id=" + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Rebalancing host id=" + hostId); } rebalanceHost(hostId, currentOwnerId, futureOwnerId); } catch (Exception e) { - s_logger.warn("Unable to rebalance host id=" + hostId, e); + logger.warn("Unable to rebalance host id=" + hostId, e); } } } private String handleScheduleHostScanTaskCommand(ScheduleHostScanTaskCommand cmd) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Intercepting resource manager command: " + _gson.toJson(cmd)); + if (logger.isDebugEnabled()) { + logger.debug("Intercepting resource manager command: " + _gson.toJson(cmd)); } try { @@ -1233,7 +1224,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } catch (Exception e) { // Scheduling host scan task in peer MS is a best effort operation during host add, regular host scan // happens at fixed intervals anyways. So handling any exceptions that may be thrown - s_logger.warn("Exception happened while trying to schedule host scan task on mgmt server " + _clusterMgr.getSelfPeerName() + + logger.warn("Exception happened while trying to schedule host scan task on mgmt server " + _clusterMgr.getSelfPeerName() + ", ignoring as regular host scan happens at fixed interval anyways", e); return null; } @@ -1260,8 +1251,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override public String dispatch(ClusterServicePdu pdu) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Dispatch ->" + pdu.getAgentId() + ", json: " + pdu.getJsonPackage()); + if (logger.isDebugEnabled()) { + logger.debug("Dispatch ->" + pdu.getAgentId() + ", json: " + pdu.getJsonPackage()); } Command[] cmds = null; @@ -1269,24 +1260,24 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust cmds = _gson.fromJson(pdu.getJsonPackage(), Command[].class); } catch (Throwable e) { assert (false); - s_logger.error("Excection in gson decoding : ", e); + logger.error("Excection in gson decoding : ", e); } if (cmds.length == 1 && cmds[0] instanceof ChangeAgentCommand) { // intercepted ChangeAgentCommand cmd = (ChangeAgentCommand)cmds[0]; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Intercepting command for agent change: agent " + cmd.getAgentId() + " event: " + cmd.getEvent()); + if (logger.isDebugEnabled()) { + logger.debug("Intercepting command for agent change: agent " + cmd.getAgentId() + " event: " + cmd.getEvent()); } boolean result = false; try { result = executeAgentUserRequest(cmd.getAgentId(), cmd.getEvent()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Result is " + result); + if (logger.isDebugEnabled()) { + logger.debug("Result is " + result); } } catch (AgentUnavailableException e) { - s_logger.warn("Agent is unavailable", e); + logger.warn("Agent is unavailable", e); return null; } @@ -1296,21 +1287,21 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } else if (cmds.length == 1 && cmds[0] instanceof TransferAgentCommand) { TransferAgentCommand cmd = (TransferAgentCommand)cmds[0]; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Intercepting command for agent rebalancing: agent " + cmd.getAgentId() + " event: " + cmd.getEvent()); + if (logger.isDebugEnabled()) { + logger.debug("Intercepting command for agent rebalancing: agent " + cmd.getAgentId() + " event: " + cmd.getEvent()); } boolean result = false; try { result = rebalanceAgent(cmd.getAgentId(), cmd.getEvent(), cmd.getCurrentOwner(), cmd.getFutureOwner()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Result is " + result); + if (logger.isDebugEnabled()) { + logger.debug("Result is " + result); } } catch (AgentUnavailableException e) { - s_logger.warn("Agent is unavailable", e); + logger.warn("Agent is unavailable", e); return null; } catch (OperationTimedoutException e) { - s_logger.warn("Operation timed out", e); + logger.warn("Operation timed out", e); return null; } Answer[] answers = new Answer[1]; @@ -1319,14 +1310,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } else if (cmds.length == 1 && cmds[0] instanceof PropagateResourceEventCommand) { PropagateResourceEventCommand cmd = (PropagateResourceEventCommand)cmds[0]; - s_logger.debug("Intercepting command to propagate event " + cmd.getEvent().name() + " for host " + cmd.getHostId()); + logger.debug("Intercepting command to propagate event " + cmd.getEvent().name() + " for host " + cmd.getHostId()); boolean result = false; try { result = _resourceMgr.executeUserRequest(cmd.getHostId(), cmd.getEvent()); - s_logger.debug("Result is " + result); + logger.debug("Result is " + result); } catch (AgentUnavailableException ex) { - s_logger.warn("Agent is unavailable", ex); + logger.warn("Agent is unavailable", ex); return null; } @@ -1341,30 +1332,30 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust try { long startTick = System.currentTimeMillis(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Dispatch -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage()); + if (logger.isDebugEnabled()) { + logger.debug("Dispatch -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage()); } Answer[] answers = sendToAgent(pdu.getAgentId(), cmds, pdu.isStopOnError()); if (answers != null) { String jsonReturn = _gson.toJson(answers); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " + + if (logger.isDebugEnabled()) { + logger.debug("Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " + (System.currentTimeMillis() - startTick) + " ms, return result: " + jsonReturn); } return jsonReturn; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " + + if (logger.isDebugEnabled()) { + logger.debug("Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " + (System.currentTimeMillis() - startTick) + " ms, return null result"); } } } catch (AgentUnavailableException e) { - s_logger.warn("Agent is unavailable", e); + logger.warn("Agent is unavailable", e); } catch (OperationTimedoutException e) { - s_logger.warn("Timed Out", e); + logger.warn("Timed Out", e); } return null; @@ -1389,8 +1380,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override protected void runInContext() { try { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Agent rebalance task check, management server id:" + _nodeId); + if (logger.isTraceEnabled()) { + logger.trace("Agent rebalance task check, management server id:" + _nodeId); } // initiate agent lb task will be scheduled and executed only once, and only when number of agents // loaded exceeds _connectedAgentsThreshold @@ -1408,18 +1399,18 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust if (allHostsCount > 0.0) { double load = managedHostsCount / allHostsCount; if (load >= ConnectedAgentThreshold.value()) { - s_logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " + + logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " + ConnectedAgentThreshold.value()); scheduleRebalanceAgents(); _agentLbHappened = true; } else { - s_logger.debug("Not scheduling agent rebalancing task as the averages load " + load + " is less than the threshold " + + logger.debug("Not scheduling agent rebalancing task as the averages load " + load + " is less than the threshold " + ConnectedAgentThreshold.value()); } } } } catch (Throwable e) { - s_logger.error("Problem with the clustered agent transfer scan check!", e); + logger.error("Problem with the clustered agent transfer scan check!", e); } } }; @@ -1428,13 +1419,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override public void rescan() { // schedule a scan task immediately - if (s_logger.isDebugEnabled()) { - s_logger.debug("Scheduling a host scan task"); + if (logger.isDebugEnabled()) { + logger.debug("Scheduling a host scan task"); } // schedule host scan task on current MS scheduleHostScanTask(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Notifying all peer MS to schedule host scan task"); + if (logger.isDebugEnabled()) { + logger.debug("Notifying all peer MS to schedule host scan task"); } } diff --git a/engine/orchestration/src/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java b/engine/orchestration/src/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java index 254f1bf0bbb..f5c699b471b 100644 --- a/engine/orchestration/src/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java +++ b/engine/orchestration/src/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java @@ -27,7 +27,6 @@ import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.host.Host; @@ -41,7 +40,6 @@ import com.cloud.utils.db.SearchCriteria.Op; @Component @Local(value = AgentLoadBalancerPlanner.class) public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements AgentLoadBalancerPlanner { - private static final Logger s_logger = Logger.getLogger(AgentLoadBalancerPlanner.class); @Inject HostDao _hostDao = null; @@ -54,7 +52,7 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements List allHosts = sc.list(); if (allHosts.size() <= avLoad) { - s_logger.debug("Agent load = " + allHosts.size() + " for management server " + msId + " doesn't exceed average system agent load = " + avLoad + + logger.debug("Agent load = " + allHosts.size() + " for management server " + msId + " doesn't exceed average system agent load = " + avLoad + "; so it doesn't participate in agent rebalancing process"); return null; } @@ -66,7 +64,7 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements List directHosts = sc.list(); if (directHosts.isEmpty()) { - s_logger.debug("No direct agents in status " + Status.Up + " exist for the management server " + msId + + logger.debug("No direct agents in status " + Status.Up + " exist for the management server " + msId + "; so it doesn't participate in agent rebalancing process"); return null; } @@ -92,23 +90,23 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements int hostsLeft = directHosts.size(); List hostsToReturn = new ArrayList(); - s_logger.debug("Management server " + msId + " can give away " + hostsToGive + " as it currently owns " + allHosts.size() + + logger.debug("Management server " + msId + " can give away " + hostsToGive + " as it currently owns " + allHosts.size() + " and the average agent load in the system is " + avLoad + "; finalyzing list of hosts to give away..."); for (Long cluster : hostToClusterMap.keySet()) { List hostsInCluster = hostToClusterMap.get(cluster); hostsLeft = hostsLeft - hostsInCluster.size(); if (hostsToReturn.size() < hostsToGive) { - s_logger.debug("Trying cluster id=" + cluster); + logger.debug("Trying cluster id=" + cluster); if (hostsInCluster.size() > hostsLeftToGive) { - s_logger.debug("Skipping cluster id=" + cluster + " as it has more hosts than we need: " + hostsInCluster.size() + " vs " + hostsLeftToGive); + logger.debug("Skipping cluster id=" + cluster + " as it has more hosts than we need: " + hostsInCluster.size() + " vs " + hostsLeftToGive); if (hostsLeft >= hostsLeftToGive) { continue; } else { break; } } else { - s_logger.debug("Taking all " + hostsInCluster.size() + " hosts: " + hostsInCluster + " from cluster id=" + cluster); + logger.debug("Taking all " + hostsInCluster.size() + " hosts: " + hostsInCluster + " from cluster id=" + cluster); hostsToReturn.addAll(hostsInCluster); hostsLeftToGive = hostsLeftToGive - hostsInCluster.size(); } @@ -117,7 +115,7 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements } } - s_logger.debug("Management server " + msId + " is ready to give away " + hostsToReturn.size() + " hosts"); + logger.debug("Management server " + msId + " is ready to give away " + hostsToReturn.size() + " hosts"); return hostsToReturn; } diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java index 9dea90a41d1..7d373a29fd8 100644 --- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -67,7 +67,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.utils.identity.ManagementServerNode; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -206,7 +205,6 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDao; @Local(value = VirtualMachineManager.class) public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMachineManager, VmWorkJobHandler, Listener, Configurable { - private static final Logger s_logger = Logger.getLogger(VirtualMachineManagerImpl.class); public static final String VM_WORK_JOB_HANDLER = VirtualMachineManagerImpl.class.getSimpleName(); @@ -382,8 +380,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final VMInstanceVO vm = _vmDao.findVMByInstanceName(vmInstanceName); final Account owner = _entityMgr.findById(Account.class, vm.getAccountId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Allocating entries for VM: " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Allocating entries for VM: " + vm); } vm.setDataCenterId(plan.getDataCenterId()); @@ -398,8 +396,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) throws InsufficientCapacityException { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Allocating nics for " + vmFinal); + if (logger.isDebugEnabled()) { + logger.debug("Allocating nics for " + vmFinal); } try { @@ -408,8 +406,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac throw new CloudRuntimeException("Concurrent operation while trying to allocate resources for the VM", e); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Allocating disks for " + vmFinal); + if (logger.isDebugEnabled()) { + logger.debug("Allocating disks for " + vmFinal); } if (template.getFormat() == ImageFormat.ISO) { @@ -431,8 +429,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } }); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Allocation completed for VM: " + vmFinal); + if (logger.isDebugEnabled()) { + logger.debug("Allocation completed for VM: " + vmFinal); } } @@ -468,8 +466,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac protected void advanceExpunge(VMInstanceVO vm) throws ResourceUnavailableException, OperationTimedoutException, ConcurrentOperationException { if (vm == null || vm.getRemoved() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find vm or vm is destroyed: " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find vm or vm is destroyed: " + vm); } return; } @@ -479,28 +477,28 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { if (!stateTransitTo(vm, VirtualMachine.Event.ExpungeOperation, vm.getHostId())) { - s_logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm); + logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm); throw new CloudRuntimeException("Unable to destroy " + vm); } } catch (final NoTransitionException e) { - s_logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm); + logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm); throw new CloudRuntimeException("Unable to destroy " + vm, e); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Destroying vm " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Destroying vm " + vm); } final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType()); - s_logger.debug("Cleaning up NICS"); + logger.debug("Cleaning up NICS"); final List nicExpungeCommands = hvGuru.finalizeExpungeNics(vm, profile.getNics()); _networkMgr.cleanupNics(profile); - s_logger.debug("Cleaning up hypervisor data structures (ex. SRs in XenServer) for managed storage"); + logger.debug("Cleaning up hypervisor data structures (ex. SRs in XenServer) for managed storage"); final List volumeExpungeCommands = hvGuru.finalizeExpungeVolumes(vm); @@ -518,7 +516,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (!cmds.isSuccessful()) { for (final Answer answer : cmds.getAnswers()) { if (!answer.getResult()) { - s_logger.warn("Failed to expunge vm due to: " + answer.getDetails()); + logger.warn("Failed to expunge vm due to: " + answer.getDetails()); throw new CloudRuntimeException("Unable to expunge " + vm + " due to " + answer.getDetails()); } @@ -555,7 +553,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (!cmds.isSuccessful()) { for (final Answer answer : cmds.getAnswers()) { if (!answer.getResult()) { - s_logger.warn("Failed to expunge vm due to: " + answer.getDetails()); + logger.warn("Failed to expunge vm due to: " + answer.getDetails()); throw new CloudRuntimeException("Unable to expunge " + vm + " due to " + answer.getDetails()); } } @@ -563,8 +561,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Expunged " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Expunged " + vm); } } @@ -633,15 +631,15 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac while (true) { final ItWorkVO vo = _workDao.findByOutstandingWork(vm.getId(), state); if (vo == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find work for VM: " + vm + " and state: " + state); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find work for VM: " + vm + " and state: " + state); } return true; } if (vo.getStep() == Step.Done) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Work for " + vm + " is " + vo.getStep()); + if (logger.isDebugEnabled()) { + logger.debug("Work for " + vm + " is " + vo.getStep()); } return true; } @@ -649,24 +647,24 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac // also check DB to get latest VM state to detect vm update from concurrent process before idle waiting to get an early exit final VMInstanceVO instance = _vmDao.findById(vm.getId()); if (instance != null && instance.getState() == State.Running) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is already started in DB: " + vm); + if (logger.isDebugEnabled()) { + logger.debug("VM is already started in DB: " + vm); } return true; } if (vo.getSecondsTaskIsInactive() > VmOpCancelInterval.value()) { - s_logger.warn("The task item for vm " + vm + " has been inactive for " + vo.getSecondsTaskIsInactive()); + logger.warn("The task item for vm " + vm + " has been inactive for " + vo.getSecondsTaskIsInactive()); return false; } try { Thread.sleep(VmOpWaitInterval.value()*1000); } catch (final InterruptedException e) { - s_logger.info("Waiting for " + vm + " but is interrupted"); + logger.info("Waiting for " + vm + " but is interrupted"); throw new ConcurrentOperationException("Waiting for " + vm + " but is interrupted"); } - s_logger.debug("Waiting some more to make sure there's no activity on " + vm); + logger.debug("Waiting some more to make sure there's no activity on " + vm); } } @@ -685,13 +683,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Transaction.execute(new TransactionCallbackWithException, NoTransitionException>() { @Override public Ternary doInTransaction(final TransactionStatus status) throws NoTransitionException { - final Journal journal = new Journal.LogJournal("Creating " + vm, s_logger); + final Journal journal = new Journal.LogJournal("Creating " + vm, logger); final ItWorkVO work = _workDao.persist(workFinal); final ReservationContextImpl context = new ReservationContextImpl(work.getId(), journal, caller, account); if (stateTransitTo(vm, Event.StartRequested, null, work.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully transitioned to start state for " + vm + " reservation id = " + work.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully transitioned to start state for " + vm + " reservation id = " + work.getId()); } return new Ternary(vm, context, work); } @@ -705,8 +703,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return result; } } catch (final NoTransitionException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to transition into Starting state due to " + e.getMessage()); + if (logger.isDebugEnabled()) { + logger.debug("Unable to transition into Starting state due to " + e.getMessage()); } } @@ -715,14 +713,14 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac throw new ConcurrentOperationException("Unable to acquire lock on " + vm); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Determining why we're unable to update the state to Starting for " + instance + ". Retry=" + retry); + if (logger.isDebugEnabled()) { + logger.debug("Determining why we're unable to update the state to Starting for " + instance + ". Retry=" + retry); } final State state = instance.getState(); if (state == State.Running) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is already started: " + vm); + if (logger.isDebugEnabled()) { + logger.debug("VM is already started: " + vm); } return null; } @@ -736,7 +734,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } if (state != State.Stopped) { - s_logger.debug("VM " + vm + " is not in a state to be started: " + state); + logger.debug("VM " + vm + " is not in a state to be started: " + state); return null; } } @@ -846,13 +844,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final ServiceOfferingVO offering = _offeringDao.findById(vm.getId(), vm.getServiceOfferingId()); final VirtualMachineTemplate template = _entityMgr.findByIdIncludingRemoved(VirtualMachineTemplate.class, vm.getTemplateId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Trying to deploy VM, vm has dcId: " + vm.getDataCenterId() + " and podId: " + vm.getPodIdToDeployIn()); + if (logger.isDebugEnabled()) { + logger.debug("Trying to deploy VM, vm has dcId: " + vm.getDataCenterId() + " and podId: " + vm.getPodIdToDeployIn()); } DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), vm.getPodIdToDeployIn(), null, null, null, null, ctx); if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("advanceStart: DeploymentPlan is provided, using dcId:" + planToDeploy.getDataCenterId() + ", podId: " + planToDeploy.getPodId() + + if (logger.isDebugEnabled()) { + logger.debug("advanceStart: DeploymentPlan is provided, using dcId:" + planToDeploy.getDataCenterId() + ", podId: " + planToDeploy.getPodId() + ", clusterId: " + planToDeploy.getClusterId() + ", hostId: " + planToDeploy.getHostId() + ", poolId: " + planToDeploy.getPoolId()); } plan = @@ -873,8 +871,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (avoids == null) { avoids = new ExcludeList(); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid()); + if (logger.isDebugEnabled()) { + logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid()); } boolean planChangedByVolume = false; @@ -893,16 +891,16 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac // reassign pool for the volume even if it ready. final Long volTemplateId = vol.getTemplateId(); if (volTemplateId != null && volTemplateId.longValue() != template.getId()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(vol + " of " + vm + " is READY, but template ids don't match, let the planner reassign a new pool"); + if (logger.isDebugEnabled()) { + logger.debug(vol + " of " + vm + " is READY, but template ids don't match, let the planner reassign a new pool"); } continue; } final StoragePool pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(vol.getPoolId()); if (!pool.isInMaintenance()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Root volume is ready, need to place VM in volume's cluster"); + if (logger.isDebugEnabled()) { + logger.debug("Root volume is ready, need to place VM in volume's cluster"); } final long rootVolDcId = pool.getDataCenterId(); final Long rootVolPodId = pool.getPodId(); @@ -913,8 +911,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (rootVolClusterId.longValue() != clusterIdSpecified.longValue()) { // cannot satisfy the plan passed in to the // planner - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot satisfy the deployment plan passed in since the ready Root volume is in different cluster. volume's cluster: " + + if (logger.isDebugEnabled()) { + logger.debug("Cannot satisfy the deployment plan passed in since the ready Root volume is in different cluster. volume's cluster: " + rootVolClusterId + ", cluster specified: " + clusterIdSpecified); } throw new ResourceUnavailableException( @@ -927,8 +925,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac planToDeploy.getHostId(), vol.getPoolId(), null, ctx); } else { plan = new DataCenterDeployment(rootVolDcId, rootVolPodId, rootVolClusterId, null, vol.getPoolId(), null, ctx); - if (s_logger.isDebugEnabled()) { - s_logger.debug(vol + " is READY, changing deployment plan to use this pool's dcId: " + rootVolDcId + " , podId: " + rootVolPodId + + if (logger.isDebugEnabled()) { + logger.debug(vol + " is READY, changing deployment plan to use this pool's dcId: " + rootVolDcId + " , podId: " + rootVolPodId + " , and clusterId: " + rootVolClusterId); } planChangedByVolume = true; @@ -943,7 +941,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { dest = _dpMgr.planDeployment(vmProfile, plan, avoids, planner); } catch (final AffinityConflictException e2) { - s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2); + logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2); throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict"); } @@ -992,8 +990,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is being created in podId: " + vm.getPodIdToDeployIn()); + if (logger.isDebugEnabled()) { + logger.debug("VM is being created in podId: " + vm.getPodIdToDeployIn()); } _networkMgr.prepare(vmProfile, dest, ctx); if (vm.getHypervisorType() != HypervisorType.BareMetal) { @@ -1054,13 +1052,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } startedVm = vm; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Start completed for VM " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Start completed for VM " + vm); } return; } else { - if (s_logger.isDebugEnabled()) { - s_logger.info("The guru did not like the answers so stopping " + vm); + if (logger.isDebugEnabled()) { + logger.info("The guru did not like the answers so stopping " + vm); } final StopCommand cmd = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), false); @@ -1078,49 +1076,49 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } if (answer == null || !answer.getResult()) { - s_logger.warn("Unable to stop " + vm + " due to " + (answer != null ? answer.getDetails() : "no answers")); + logger.warn("Unable to stop " + vm + " due to " + (answer != null ? answer.getDetails() : "no answers")); _haMgr.scheduleStop(vm, destHostId, WorkType.ForceStop); throw new ExecutionException("Unable to stop " + vm + " so we are unable to retry the start operation"); } throw new ExecutionException("Unable to start " + vm + " due to error in finalizeStart, not retrying"); } } - s_logger.info("Unable to start VM on " + dest.getHost() + " due to " + (startAnswer == null ? " no start answer" : startAnswer.getDetails())); + logger.info("Unable to start VM on " + dest.getHost() + " due to " + (startAnswer == null ? " no start answer" : startAnswer.getDetails())); if (startAnswer != null && startAnswer.getContextParam("stopRetry") != null) { break; } } catch (final OperationTimedoutException e) { - s_logger.debug("Unable to send the start command to host " + dest.getHost()); + logger.debug("Unable to send the start command to host " + dest.getHost()); if (e.isActive()) { _haMgr.scheduleStop(vm, destHostId, WorkType.CheckStop); } canRetry = false; throw new AgentUnavailableException("Unable to start " + vm.getHostName(), destHostId, e); } catch (final ResourceUnavailableException e) { - s_logger.info("Unable to contact resource.", e); + logger.info("Unable to contact resource.", e); if (!avoids.add(e)) { if (e.getScope() == Volume.class || e.getScope() == Nic.class) { throw e; } else { - s_logger.warn("unexpected ResourceUnavailableException : " + e.getScope().getName(), e); + logger.warn("unexpected ResourceUnavailableException : " + e.getScope().getName(), e); throw e; } } } catch (final InsufficientCapacityException e) { - s_logger.info("Insufficient capacity ", e); + logger.info("Insufficient capacity ", e); if (!avoids.add(e)) { if (e.getScope() == Volume.class || e.getScope() == Nic.class) { throw e; } else { - s_logger.warn("unexpected InsufficientCapacityException : " + e.getScope().getName(), e); + logger.warn("unexpected InsufficientCapacityException : " + e.getScope().getName(), e); } } } catch (final ExecutionException e) { - s_logger.error("Failed to start instance " + vm, e); + logger.error("Failed to start instance " + vm, e); throw new AgentUnavailableException("Unable to start instance due to " + e.getMessage(), destHostId, e); } catch (final NoTransitionException e) { - s_logger.error("Failed to start instance " + vm, e); + logger.error("Failed to start instance " + vm, e); throw new AgentUnavailableException("Unable to start instance due to " + e.getMessage(), destHostId, e); } finally { if (startedVm == null && canRetry) { @@ -1277,13 +1275,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } if (!answer.getResult()) { final String details = answer.getDetails(); - s_logger.debug("Unable to stop VM due to " + details); + logger.debug("Unable to stop VM due to " + details); return false; } guru.finalizeStop(profile, answer); } else { - s_logger.error("Invalid answer received in response to a StopCommand for " + vm.getInstanceName()); + logger.error("Invalid answer received in response to a StopCommand for " + vm.getInstanceName()); return false; } @@ -1303,33 +1301,33 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac protected boolean cleanup(final VirtualMachineGuru guru, final VirtualMachineProfile profile, final ItWorkVO work, final Event event, final boolean cleanUpEvenIfUnableToStop) { final VirtualMachine vm = profile.getVirtualMachine(); final State state = vm.getState(); - s_logger.debug("Cleaning up resources for the vm " + vm + " in " + state + " state"); + logger.debug("Cleaning up resources for the vm " + vm + " in " + state + " state"); try { if (state == State.Starting) { if (work != null) { final Step step = work.getStep(); if (step == Step.Starting && !cleanUpEvenIfUnableToStop) { - s_logger.warn("Unable to cleanup vm " + vm + "; work state is incorrect: " + step); + logger.warn("Unable to cleanup vm " + vm + "; work state is incorrect: " + step); return false; } if (step == Step.Started || step == Step.Starting || step == Step.Release) { if (vm.getHostId() != null) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { - s_logger.warn("Failed to stop vm " + vm + " in " + State.Starting + " state as a part of cleanup process"); + logger.warn("Failed to stop vm " + vm + " in " + State.Starting + " state as a part of cleanup process"); return false; } } } if (step != Step.Release && step != Step.Prepare && step != Step.Started && step != Step.Starting) { - s_logger.debug("Cleanup is not needed for vm " + vm + "; work state is incorrect: " + step); + logger.debug("Cleanup is not needed for vm " + vm + "; work state is incorrect: " + step); return true; } } else { if (vm.getHostId() != null) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { - s_logger.warn("Failed to stop vm " + vm + " in " + State.Starting + " state as a part of cleanup process"); + logger.warn("Failed to stop vm " + vm + " in " + State.Starting + " state as a part of cleanup process"); return false; } } @@ -1338,39 +1336,39 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } else if (state == State.Stopping) { if (vm.getHostId() != null) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { - s_logger.warn("Failed to stop vm " + vm + " in " + State.Stopping + " state as a part of cleanup process"); + logger.warn("Failed to stop vm " + vm + " in " + State.Stopping + " state as a part of cleanup process"); return false; } } } else if (state == State.Migrating) { if (vm.getHostId() != null) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { - s_logger.warn("Failed to stop vm " + vm + " in " + State.Migrating + " state as a part of cleanup process"); + logger.warn("Failed to stop vm " + vm + " in " + State.Migrating + " state as a part of cleanup process"); return false; } } if (vm.getLastHostId() != null) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { - s_logger.warn("Failed to stop vm " + vm + " in " + State.Migrating + " state as a part of cleanup process"); + logger.warn("Failed to stop vm " + vm + " in " + State.Migrating + " state as a part of cleanup process"); return false; } } } else if (state == State.Running) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { - s_logger.warn("Failed to stop vm " + vm + " in " + State.Running + " state as a part of cleanup process"); + logger.warn("Failed to stop vm " + vm + " in " + State.Running + " state as a part of cleanup process"); return false; } } } finally { try { _networkMgr.release(profile, cleanUpEvenIfUnableToStop); - s_logger.debug("Successfully released network resources for the vm " + vm); + logger.debug("Successfully released network resources for the vm " + vm); } catch (final Exception e) { - s_logger.warn("Unable to release some network resources.", e); + logger.warn("Unable to release some network resources.", e); } volumeMgr.release(profile); - s_logger.debug("Successfully cleanued up resources for the vm " + vm + " in " + state + " state"); + logger.debug("Successfully cleanued up resources for the vm " + vm + " in " + state + " state"); } return true; @@ -1433,42 +1431,42 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac ConcurrentOperationException { final State state = vm.getState(); if (state == State.Stopped) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is already stopped: " + vm); + if (logger.isDebugEnabled()) { + logger.debug("VM is already stopped: " + vm); } return; } if (state == State.Destroyed || state == State.Expunging || state == State.Error) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Stopped called on " + vm + " but the state is " + state); + if (logger.isDebugEnabled()) { + logger.debug("Stopped called on " + vm + " but the state is " + state); } return; } // grab outstanding work item if any final ItWorkVO work = _workDao.findByOutstandingWork(vm.getId(), vm.getState()); if (work != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found an outstanding work item for this vm " + vm + " with state:" + vm.getState() + ", work id:" + work.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Found an outstanding work item for this vm " + vm + " with state:" + vm.getState() + ", work id:" + work.getId()); } } final Long hostId = vm.getHostId(); if (hostId == null) { if (!cleanUpEvenIfUnableToStop) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("HostId is null but this is not a forced stop, cannot stop vm " + vm + " with state:" + vm.getState()); + if (logger.isDebugEnabled()) { + logger.debug("HostId is null but this is not a forced stop, cannot stop vm " + vm + " with state:" + vm.getState()); } throw new CloudRuntimeException("Unable to stop " + vm); } try { stateTransitTo(vm, Event.AgentReportStopped, null, null); } catch (final NoTransitionException e) { - s_logger.warn(e.getMessage()); + logger.warn(e.getMessage()); } // mark outstanding work item if any as done if (work != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Updating work item to Done, id:" + work.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Updating work item to Done, id:" + work.getId()); } work.setStep(Step.Done); _workDao.update(work.getId(), work); @@ -1488,26 +1486,26 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac throw new CloudRuntimeException("We cannot stop " + vm + " when it is in state " + vm.getState()); } final boolean doCleanup = true; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to transition the state but we're moving on because it's forced stop"); + if (logger.isDebugEnabled()) { + logger.debug("Unable to transition the state but we're moving on because it's forced stop"); } if (doCleanup) { if (cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.StopRequested, cleanUpEvenIfUnableToStop)) { try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Updating work item to Done, id:" + work.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Updating work item to Done, id:" + work.getId()); } if (!changeState(vm, Event.AgentReportStopped, null, work, Step.Done)) { throw new CloudRuntimeException("Unable to stop " + vm); } } catch (final NoTransitionException e) { - s_logger.warn("Unable to cleanup " + vm); + logger.warn("Unable to cleanup " + vm); throw new CloudRuntimeException("Unable to stop " + vm, e); } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to cleanup VM: " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Failed to cleanup VM: " + vm); } throw new CloudRuntimeException("Failed to cleanup " + vm + " , current state " + vm.getState()); } @@ -1553,50 +1551,50 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } catch (final AgentUnavailableException e) { - s_logger.warn("Unable to stop vm, agent unavailable: " + e.toString()); + logger.warn("Unable to stop vm, agent unavailable: " + e.toString()); } catch (final OperationTimedoutException e) { - s_logger.warn("Unable to stop vm, operation timed out: " + e.toString()); + logger.warn("Unable to stop vm, operation timed out: " + e.toString()); } finally { if (!stopped) { if (!cleanUpEvenIfUnableToStop) { - s_logger.warn("Unable to stop vm " + vm); + logger.warn("Unable to stop vm " + vm); try { stateTransitTo(vm, Event.OperationFailed, vm.getHostId()); } catch (final NoTransitionException e) { - s_logger.warn("Unable to transition the state " + vm); + logger.warn("Unable to transition the state " + vm); } throw new CloudRuntimeException("Unable to stop " + vm); } else { - s_logger.warn("Unable to actually stop " + vm + " but continue with release because it's a force stop"); + logger.warn("Unable to actually stop " + vm + " but continue with release because it's a force stop"); vmGuru.finalizeStop(profile, answer); } } } - if (s_logger.isDebugEnabled()) { - s_logger.debug(vm + " is stopped on the host. Proceeding to release resource held."); + if (logger.isDebugEnabled()) { + logger.debug(vm + " is stopped on the host. Proceeding to release resource held."); } try { _networkMgr.release(profile, cleanUpEvenIfUnableToStop); - s_logger.debug("Successfully released network resources for the vm " + vm); + logger.debug("Successfully released network resources for the vm " + vm); } catch (final Exception e) { - s_logger.warn("Unable to release some network resources.", e); + logger.warn("Unable to release some network resources.", e); } try { if (vm.getHypervisorType() != HypervisorType.BareMetal) { volumeMgr.release(profile); - s_logger.debug("Successfully released storage resources for the vm " + vm); + logger.debug("Successfully released storage resources for the vm " + vm); } } catch (final Exception e) { - s_logger.warn("Unable to release storage resources.", e); + logger.warn("Unable to release storage resources.", e); } try { if (work != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Updating the outstanding work item to Done, id:" + work.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Updating the outstanding work item to Done, id:" + work.getId()); } work.setStep(Step.Done); _workDao.update(work.getId(), work); @@ -1606,7 +1604,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac throw new CloudRuntimeException("unable to stop " + vm); } } catch (final NoTransitionException e) { - s_logger.warn(e.getMessage()); + logger.warn(e.getMessage()); throw new CloudRuntimeException("Unable to stop " + vm); } } @@ -1622,7 +1620,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac // hacking it here at general VM manager /* if (_vmSnapshotMgr.hasActiveVMSnapshotTasks(vm.getId())) { - s_logger.error("State transit with event: " + e + " failed due to: " + vm.getInstanceName() + " has active VM snapshots tasks"); + logger.error("State transit with event: " + e + " failed due to: " + vm.getInstanceName() + " has active VM snapshots tasks"); return false; } */ @@ -1638,7 +1636,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac * Remove the hacking logic here. // if there are active vm snapshots task, state change is not allowed if (_vmSnapshotMgr.hasActiveVMSnapshotTasks(vm.getId())) { - s_logger.error("State transit with event: " + e + " failed due to: " + vm.getInstanceName() + " has active VM snapshots tasks"); + logger.error("State transit with event: " + e + " failed due to: " + vm.getInstanceName() + " has active VM snapshots tasks"); return false; } */ @@ -1660,20 +1658,20 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac public void destroy(final String vmUuid) throws AgentUnavailableException, OperationTimedoutException, ConcurrentOperationException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); if (vm == null || vm.getState() == State.Destroyed || vm.getState() == State.Expunging || vm.getRemoved() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find vm or vm is destroyed: " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find vm or vm is destroyed: " + vm); } return; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Destroying vm " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Destroying vm " + vm); } advanceStop(vmUuid, VmDestroyForcestop.value()); if (!_vmSnapshotMgr.deleteAllVMSnapshots(vm.getId(), null)) { - s_logger.debug("Unable to delete all snapshots for " + vm); + logger.debug("Unable to delete all snapshots for " + vm); throw new CloudRuntimeException("Unable to delete vm snapshots for " + vm); } @@ -1681,11 +1679,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac vm = _vmDao.findByUuid(vmUuid); try { if (!stateTransitTo(vm, VirtualMachine.Event.DestroyRequested, vm.getHostId())) { - s_logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm); + logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm); throw new CloudRuntimeException("Unable to destroy " + vm); } } catch (final NoTransitionException e) { - s_logger.debug(e.getMessage()); + logger.debug(e.getMessage()); throw new CloudRuntimeException("Unable to destroy " + vm, e); } } @@ -1751,7 +1749,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { stateTransitTo(vm, VirtualMachine.Event.StorageMigrationRequested, null); } catch (final NoTransitionException e) { - s_logger.debug("Unable to migrate vm: " + e.toString()); + logger.debug("Unable to migrate vm: " + e.toString()); throw new CloudRuntimeException("Unable to migrate vm: " + e.toString()); } @@ -1781,7 +1779,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId); final String destDcName = _clusterDetailsDao.getVmwareDcName(destClusterId); if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) { - s_logger.debug("Since VM's storage was successfully migrated across VMware Datacenters, unregistering VM: " + vm.getInstanceName() + + logger.debug("Since VM's storage was successfully migrated across VMware Datacenters, unregistering VM: " + vm.getInstanceName() + " from source host: " + srcHost.getId()); final UnregisterVMCommand uvc = new UnregisterVMCommand(vm.getInstanceName()); uvc.setCleanupVmFiles(true); @@ -1796,28 +1794,28 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } else { - s_logger.debug("Storage migration failed"); + logger.debug("Storage migration failed"); } } catch (final ConcurrentOperationException e) { - s_logger.debug("Failed to migration: " + e.toString()); + logger.debug("Failed to migration: " + e.toString()); throw new CloudRuntimeException("Failed to migration: " + e.toString()); } catch (final InsufficientVirtualNetworkCapacityException e) { - s_logger.debug("Failed to migration: " + e.toString()); + logger.debug("Failed to migration: " + e.toString()); throw new CloudRuntimeException("Failed to migration: " + e.toString()); } catch (final InsufficientAddressCapacityException e) { - s_logger.debug("Failed to migration: " + e.toString()); + logger.debug("Failed to migration: " + e.toString()); throw new CloudRuntimeException("Failed to migration: " + e.toString()); } catch (final InsufficientCapacityException e) { - s_logger.debug("Failed to migration: " + e.toString()); + logger.debug("Failed to migration: " + e.toString()); throw new CloudRuntimeException("Failed to migration: " + e.toString()); } catch (final StorageUnavailableException e) { - s_logger.debug("Failed to migration: " + e.toString()); + logger.debug("Failed to migration: " + e.toString()); throw new CloudRuntimeException("Failed to migration: " + e.toString()); } finally { try { stateTransitTo(vm, VirtualMachine.Event.AgentReportStopped, null); } catch (final NoTransitionException e) { - s_logger.debug("Failed to change vm state: " + e.toString()); + logger.debug("Failed to change vm state: " + e.toString()); throw new CloudRuntimeException("Failed to change vm state: " + e.toString()); } } @@ -1870,8 +1868,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private void orchestrateMigrate(final String vmUuid, final long srcHostId, final DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException { final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); if (vm == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find the vm " + vmUuid); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find the vm " + vmUuid); } throw new CloudRuntimeException("Unable to find a virtual machine with id " + vmUuid); } @@ -1879,12 +1877,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } protected void migrate(final VMInstanceVO vm, final long srcHostId, final DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException { - s_logger.info("Migrating " + vm + " to " + dest); + logger.info("Migrating " + vm + " to " + dest); final long dstHostId = dest.getHost().getId(); final Host fromHost = _hostDao.findById(srcHostId); if (fromHost == null) { - s_logger.info("Unable to find the host to migrate from: " + srcHostId); + logger.info("Unable to find the host to migrate from: " + srcHostId); throw new CloudRuntimeException("Unable to find the host to migrate from: " + srcHostId); } @@ -1892,7 +1890,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final List volumes = _volsDao.findCreatedByInstance(vm.getId()); for (final VolumeVO volume : volumes) { if (!_storagePoolDao.findById(volume.getPoolId()).getScope().equals(ScopeType.ZONE)) { - s_logger.info("Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: " + logger.info("Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: " + dest.getHost().getId()); throw new CloudRuntimeException( "Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: " @@ -1904,8 +1902,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final VirtualMachineGuru vmGuru = getVmGuru(vm); if (vm.getState() != State.Running) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is not Running, unable to migrate the vm " + vm); + if (logger.isDebugEnabled()) { + logger.debug("VM is not Running, unable to migrate the vm " + vm); } throw new CloudRuntimeException("VM is not Running, unable to migrate the vm currently " + vm + " , current state: " + vm.getState().toString()); } @@ -1959,12 +1957,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { if (vm == null || vm.getHostId() == null || vm.getHostId() != srcHostId || !changeState(vm, Event.MigrationRequested, dstHostId, work, Step.Migrating)) { _networkMgr.rollbackNicForMigration(vmSrc, profile); - s_logger.info("Migration cancelled because state has changed: " + vm); + logger.info("Migration cancelled because state has changed: " + vm); throw new ConcurrentOperationException("Migration cancelled because state has changed: " + vm); } } catch (final NoTransitionException e1) { _networkMgr.rollbackNicForMigration(vmSrc, profile); - s_logger.info("Migration cancelled because " + e1.getMessage()); + logger.info("Migration cancelled because " + e1.getMessage()); throw new ConcurrentOperationException("Migration cancelled because " + e1.getMessage()); } @@ -1982,7 +1980,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } catch (final OperationTimedoutException e) { if (e.isActive()) { - s_logger.warn("Active migration command so scheduling a restart for " + vm); + logger.warn("Active migration command so scheduling a restart for " + vm); _haMgr.scheduleRestart(vm, true); } throw new AgentUnavailableException("Operation timed out on migrating " + vm, dstHostId); @@ -1998,23 +1996,23 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { if (!checkVmOnHost(vm, dstHostId)) { - s_logger.error("Unable to complete migration for " + vm); + logger.error("Unable to complete migration for " + vm); try { _agentMgr.send(srcHostId, new Commands(cleanup(vm)), null); } catch (final AgentUnavailableException e) { - s_logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId); + logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId); } cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true); throw new CloudRuntimeException("Unable to complete migration for " + vm); } } catch (final OperationTimedoutException e) { - s_logger.debug("Error while checking the vm " + vm + " on host " + dstHostId, e); + logger.debug("Error while checking the vm " + vm + " on host " + dstHostId, e); } migrated = true; } finally { if (!migrated) { - s_logger.info("Migration was unsuccessful. Cleaning up: " + vm); + logger.info("Migration was unsuccessful. Cleaning up: " + vm); _networkMgr.rollbackNicForMigration(vmSrc, profile); _alertMgr.sendAlert(alertType, fromHost.getDataCenterId(), fromHost.getPodId(), @@ -2023,13 +2021,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { _agentMgr.send(dstHostId, new Commands(cleanup(vm)), null); } catch (final AgentUnavailableException ae) { - s_logger.info("Looks like the destination Host is unavailable for cleanup"); + logger.info("Looks like the destination Host is unavailable for cleanup"); } try { stateTransitTo(vm, Event.OperationFailed, srcHostId); } catch (final NoTransitionException e) { - s_logger.warn(e.getMessage()); + logger.warn(e.getMessage()); } } else { _networkMgr.commitNicForMigration(vmSrc, profile); @@ -2109,11 +2107,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac // Put the vm in migrating state. try { if (!changeState(vm, Event.MigrationRequested, hostId, work, Step.Migrating)) { - s_logger.info("Migration cancelled because state has changed: " + vm); + logger.info("Migration cancelled because state has changed: " + vm); throw new ConcurrentOperationException("Migration cancelled because state has changed: " + vm); } } catch (final NoTransitionException e) { - s_logger.info("Migration cancelled because " + e.getMessage()); + logger.info("Migration cancelled because " + e.getMessage()); throw new ConcurrentOperationException("Migration cancelled because " + e.getMessage()); } } @@ -2122,11 +2120,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac // Put the vm in running state. try { if (!changeState(vm, Event.OperationSucceeded, hostId, work, Step.Started)) { - s_logger.error("Unable to change the state for " + vm); + logger.error("Unable to change the state for " + vm); throw new ConcurrentOperationException("Unable to change the state for " + vm); } } catch (final NoTransitionException e) { - s_logger.error("Unable to change state due to " + e.getMessage()); + logger.error("Unable to change state due to " + e.getMessage()); throw new ConcurrentOperationException("Unable to change state due to " + e.getMessage()); } } @@ -2258,9 +2256,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac AttachOrDettachConfigDriveCommand dettachCommand = new AttachOrDettachConfigDriveCommand(vm.getInstanceName(), vmData, VmConfigDriveLabel.value(), false); try { _agentMgr.send(srcHost.getId(), dettachCommand); - s_logger.debug("Deleted config drive ISO for vm " + vm.getInstanceName() + " In host " + srcHost); + logger.debug("Deleted config drive ISO for vm " + vm.getInstanceName() + " In host " + srcHost); } catch (OperationTimedoutException e) { - s_logger.debug("TIme out occured while exeuting command AttachOrDettachConfigDrive " + e.getMessage()); + logger.debug("TIme out occured while exeuting command AttachOrDettachConfigDrive " + e.getMessage()); } @@ -2275,23 +2273,23 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { if (!checkVmOnHost(vm, destHostId)) { - s_logger.error("Vm not found on destination host. Unable to complete migration for " + vm); + logger.error("Vm not found on destination host. Unable to complete migration for " + vm); try { _agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null); } catch (final AgentUnavailableException e) { - s_logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId); + logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId); } cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true); throw new CloudRuntimeException("VM not found on desintation host. Unable to complete migration for " + vm); } } catch (final OperationTimedoutException e) { - s_logger.warn("Error while checking the vm " + vm + " is on host " + destHost, e); + logger.warn("Error while checking the vm " + vm + " is on host " + destHost, e); } migrated = true; } finally { if (!migrated) { - s_logger.info("Migration was unsuccessful. Cleaning up: " + vm); + logger.info("Migration was unsuccessful. Cleaning up: " + vm); _alertMgr.sendAlert(alertType, srcHost.getDataCenterId(), srcHost.getPodId(), "Unable to migrate vm " + vm.getInstanceName() + " from host " + srcHost.getName() + " in zone " + dc.getName() + " and pod " + dc.getName(), "Migrate Command failed. Please check logs."); @@ -2299,9 +2297,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac _agentMgr.send(destHostId, new Commands(cleanup(vm.getInstanceName())), null); stateTransitTo(vm, Event.OperationFailed, srcHostId); } catch (final AgentUnavailableException e) { - s_logger.warn("Looks like the destination Host is unavailable for cleanup.", e); + logger.warn("Looks like the destination Host is unavailable for cleanup.", e); } catch (final NoTransitionException e) { - s_logger.error("Error while transitioning vm from migrating to running state.", e); + logger.error("Error while transitioning vm from migrating to running state.", e); } } @@ -2325,7 +2323,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { final List works = _workDao.listWorkInProgressFor(nodeId); for (final ItWorkVO work : works) { - s_logger.info("Handling unfinished work item: " + work); + logger.info("Handling unfinished work item: " + work); try { final VMInstanceVO vm = _vmDao.findById(work.getInstanceId()); if (vm != null) { @@ -2346,7 +2344,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } } catch (final Exception e) { - s_logger.error("Error while handling " + work, e); + logger.error("Error while handling " + work, e); } } } finally { @@ -2371,7 +2369,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { orchestrateMigrateAway(vmUuid, srcHostId, null); } catch (final InsufficientServerCapacityException e) { - s_logger.warn("Failed to deploy vm " + vmUuid + " with original planner, sending HAPlanner"); + logger.warn("Failed to deploy vm " + vmUuid + " with original planner, sending HAPlanner"); orchestrateMigrateAway(vmUuid, srcHostId, _haMgr.getHAPlanner()); } } finally { @@ -2406,7 +2404,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private void orchestrateMigrateAway(final String vmUuid, final long srcHostId, final DeploymentPlanner planner) throws InsufficientServerCapacityException { final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); if (vm == null) { - s_logger.debug("Unable to find a VM for " + vmUuid); + logger.debug("Unable to find a VM for " + vmUuid); throw new CloudRuntimeException("Unable to find " + vmUuid); } @@ -2415,7 +2413,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final Long hostId = vm.getHostId(); if (hostId == null) { - s_logger.debug("Unable to migrate because the VM doesn't have a host id: " + vm); + logger.debug("Unable to migrate because the VM doesn't have a host id: " + vm); throw new CloudRuntimeException("Unable to migrate " + vmUuid); } @@ -2439,17 +2437,17 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { dest = _dpMgr.planDeployment(profile, plan, excludes, planner); } catch (final AffinityConflictException e2) { - s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2); + logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2); throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict"); } if (dest != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found destination " + dest + " for migrating to."); + if (logger.isDebugEnabled()) { + logger.debug("Found destination " + dest + " for migrating to."); } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find destination for migrating the vm " + profile); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find destination for migrating the vm " + profile); } throw new InsufficientServerCapacityException("Unable to find a server to migrate to.", host.getClusterId()); } @@ -2459,22 +2457,22 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac migrate(vm, srcHostId, dest); return; } catch (final ResourceUnavailableException e) { - s_logger.debug("Unable to migrate to unavailable " + dest); + logger.debug("Unable to migrate to unavailable " + dest); } catch (final ConcurrentOperationException e) { - s_logger.debug("Unable to migrate VM due to: " + e.getMessage()); + logger.debug("Unable to migrate VM due to: " + e.getMessage()); } try { advanceStop(vmUuid, true); throw new CloudRuntimeException("Unable to migrate " + vm); } catch (final ResourceUnavailableException e) { - s_logger.debug("Unable to stop VM due to " + e.getMessage()); + logger.debug("Unable to stop VM due to " + e.getMessage()); throw new CloudRuntimeException("Unable to migrate " + vm); } catch (final ConcurrentOperationException e) { - s_logger.debug("Unable to stop VM due to " + e.getMessage()); + logger.debug("Unable to stop VM due to " + e.getMessage()); throw new CloudRuntimeException("Unable to migrate " + vm); } catch (final OperationTimedoutException e) { - s_logger.debug("Unable to stop VM due to " + e.getMessage()); + logger.debug("Unable to stop VM due to " + e.getMessage()); throw new CloudRuntimeException("Unable to migrate " + vm); } } @@ -2483,7 +2481,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac protected class CleanupTask extends ManagedContextRunnable { @Override protected void runInContext() { - s_logger.trace("VM Operation Thread Running"); + logger.trace("VM Operation Thread Running"); try { _workDao.cleanup(VmOpCleanupWait.value()); @@ -2491,7 +2489,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final Date cutDate = new Date(new Date().getTime() - 3600000); _workJobDao.expungeCompletedWorkJobs(cutDate); } catch (final Exception e) { - s_logger.error("VM Operations failed due to ", e); + logger.error("VM Operations failed due to ", e); } } } @@ -2589,9 +2587,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (rebootAnswer != null && rebootAnswer.getResult()) { return; } - s_logger.info("Unable to reboot VM " + vm + " on " + dest.getHost() + " due to " + (rebootAnswer == null ? " no reboot answer" : rebootAnswer.getDetails())); + logger.info("Unable to reboot VM " + vm + " on " + dest.getHost() + " due to " + (rebootAnswer == null ? " no reboot answer" : rebootAnswer.getDetails())); } catch (final OperationTimedoutException e) { - s_logger.warn("Unable to send the reboot command to host " + dest.getHost() + " for the vm " + vm + " due to operation timeout", e); + logger.warn("Unable to send the reboot command to host " + dest.getHost() + " for the vm " + vm + " due to operation timeout", e); throw new CloudRuntimeException("Failed to reboot the vm on host " + dest.getHost()); } } @@ -2668,24 +2666,24 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac NoTransitionException, InsufficientAddressCapacityException { final VirtualMachineGuru vmGuru = getVmGuru(vm); - s_logger.debug("VM state is starting on full sync so updating it to running"); + logger.debug("VM state is starting on full sync so updating it to running"); vm = _vmDao.findById(vm.getId()); // grab outstanding work item if any final ItWorkVO work = _workDao.findByOutstandingWork(vm.getId(), vm.getState()); if (work != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found an outstanding work item for this vm " + vm + " in state:" + vm.getState() + ", work id:" + work.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Found an outstanding work item for this vm " + vm + " in state:" + vm.getState() + ", work id:" + work.getId()); } } try { stateTransitTo(vm, cause, hostId); } catch (final NoTransitionException e1) { - s_logger.warn(e1.getMessage()); + logger.warn(e1.getMessage()); } - s_logger.debug("VM's " + vm + " state is starting on full sync so updating it to Running"); + logger.debug("VM's " + vm + " state is starting on full sync so updating it to Running"); vm = _vmDao.findById(vm.getId()); // this should ensure vm has the most // up to date info @@ -2700,7 +2698,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } final Commands cmds = new Commands(Command.OnError.Stop); - s_logger.debug("Finalizing commands that need to be send to complete Start process for the vm " + vm); + logger.debug("Finalizing commands that need to be send to complete Start process for the vm " + vm); if (vmGuru.finalizeCommandsOnStart(cmds, profile)) { if (cmds.size() != 0) { @@ -2710,15 +2708,15 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (vmGuru.finalizeStart(profile, vm.getHostId(), cmds, null)) { stateTransitTo(vm, cause, vm.getHostId()); } else { - s_logger.error("Unable to finish finialization for running vm: " + vm); + logger.error("Unable to finish finialization for running vm: " + vm); } } else { - s_logger.error("Unable to finalize commands on start for vm: " + vm); + logger.error("Unable to finalize commands on start for vm: " + vm); } if (work != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Updating outstanding work item to Done, id:" + work.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Updating outstanding work item to Done, id:" + work.getId()); } work.setStep(Step.Done); _workDao.update(work.getId(), work); @@ -2789,14 +2787,14 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return; } - if(s_logger.isDebugEnabled()) { - s_logger.debug("Received startup command from hypervisor host. host id: " + agent.getId()); + if(logger.isDebugEnabled()) { + logger.debug("Received startup command from hypervisor host. host id: " + agent.getId()); } _syncMgr.resetHostSyncState(agent.getId()); if (forRebalance) { - s_logger.debug("Not processing listener " + this + " as connect happens on rebalance process"); + logger.debug("Not processing listener " + this + " as connect happens on rebalance process"); return; } final Long clusterId = agent.getClusterId(); @@ -2807,9 +2805,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final ClusterVMMetaDataSyncCommand syncVMMetaDataCmd = new ClusterVMMetaDataSyncCommand(ClusterVMMetaDataSyncInterval.value(), clusterId); try { final long seq_no = _agentMgr.send(agentId, new Commands(syncVMMetaDataCmd), this); - s_logger.debug("Cluster VM metadata sync started with jobid " + seq_no); + logger.debug("Cluster VM metadata sync started with jobid " + seq_no); } catch (final AgentUnavailableException e) { - s_logger.fatal("The Cluster VM metadata sync process failed for cluster id " + clusterId + " with ", e); + logger.fatal("The Cluster VM metadata sync process failed for cluster id " + clusterId + " with ", e); } } } @@ -2819,12 +2817,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac protected void runInContext() { final GlobalLock lock = GlobalLock.getInternLock("TransitionChecking"); if (lock == null) { - s_logger.debug("Couldn't get the global lock"); + logger.debug("Couldn't get the global lock"); return; } if (!lock.lock(30)) { - s_logger.debug("Couldn't lock the db"); + logger.debug("Couldn't lock the db"); return; } try { @@ -2840,7 +2838,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } } catch (final Exception e) { - s_logger.warn("Caught the following exception on transition checking", e); + logger.warn("Caught the following exception on transition checking", e); } finally { lock.unlock(); } @@ -2860,15 +2858,15 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac // Check that the VM is stopped / running if (!(vmInstance.getState().equals(State.Stopped) || vmInstance.getState().equals(State.Running))) { - s_logger.warn("Unable to upgrade virtual machine " + vmInstance.toString() + " in state " + vmInstance.getState()); + logger.warn("Unable to upgrade virtual machine " + vmInstance.toString() + " in state " + vmInstance.getState()); throw new InvalidParameterValueException("Unable to upgrade virtual machine " + vmInstance.toString() + " " + " in state " + vmInstance.getState() + "; make sure the virtual machine is stopped/running"); } // Check if the service offering being upgraded to is what the VM is already running with if (!newServiceOffering.isDynamic() && vmInstance.getServiceOfferingId() == newServiceOffering.getId()) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Not upgrading vm " + vmInstance.toString() + " since it already has the requested " + "service offering (" + newServiceOffering.getName() + + if (logger.isInfoEnabled()) { + logger.info("Not upgrading vm " + vmInstance.toString() + " since it already has the requested " + "service offering (" + newServiceOffering.getName() + ")"); } @@ -2978,7 +2976,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac InsufficientCapacityException { final CallContext cctx = CallContext.current(); - s_logger.debug("Adding vm " + vm + " to network " + network + "; requested nic profile " + requested); + logger.debug("Adding vm " + vm + " to network " + network + "; requested nic profile " + requested); final VMInstanceVO vmVO = _vmDao.findById(vm.getId()); final ReservationContext context = new ReservationContextImpl(null, null, cctx.getCallingUser(), cctx.getCallingAccount()); @@ -3001,13 +2999,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final NicTO nicTO = toNicTO(nic, vmProfile.getVirtualMachine().getHypervisorType()); //4) plug the nic to the vm - s_logger.debug("Plugging nic for vm " + vm + " in network " + network); + logger.debug("Plugging nic for vm " + vm + " in network " + network); boolean result = false; try { result = plugNic(network, nicTO, vmTO, context, dest); if (result) { - s_logger.debug("Nic is plugged successfully for vm " + vm + " in network " + network + ". Vm is a part of network now"); + logger.debug("Nic is plugged successfully for vm " + vm + " in network " + network + ". Vm is a part of network now"); final long isDefault = nic.isDefaultNic() ? 1 : 0; // insert nic's Id into DB as resource_name if(VirtualMachine.Type.User.equals(vmVO.getType())) { @@ -3017,12 +3015,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } return nic; } else { - s_logger.warn("Failed to plug nic to the vm " + vm + " in network " + network); + logger.warn("Failed to plug nic to the vm " + vm + " in network " + network); return null; } } finally { if (!result) { - s_logger.debug("Removing nic " + nic + " from vm " + vmProfile.getVirtualMachine() + " as nic plug failed on the backend"); + logger.debug("Removing nic " + nic + " from vm " + vmProfile.getVirtualMachine() + " as nic plug failed on the backend"); _networkMgr.removeNic(vmProfile, _nicsDao.findById(nic.getId())); } } @@ -3030,7 +3028,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac //1) allocate nic return _networkMgr.createNicForVm(network, requested, context, vmProfile, false); } else { - s_logger.warn("Unable to add vm " + vm + " to network " + network); + logger.warn("Unable to add vm " + vm + " to network " + network); throw new ResourceUnavailableException("Unable to add vm " + vm + " to network, is not in the right state", DataCenter.class, vm.getDataCenterId()); } } @@ -3111,25 +3109,25 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac //1) Unplug the nic if (vm.getState() == State.Running) { final NicTO nicTO = toNicTO(nicProfile, vmProfile.getVirtualMachine().getHypervisorType()); - s_logger.debug("Un-plugging nic " + nic + " for vm " + vm + " from network " + network); + logger.debug("Un-plugging nic " + nic + " for vm " + vm + " from network " + network); final boolean result = unplugNic(network, nicTO, vmTO, context, dest); if (result) { - s_logger.debug("Nic is unplugged successfully for vm " + vm + " in network " + network); + logger.debug("Nic is unplugged successfully for vm " + vm + " in network " + network); final long isDefault = nic.isDefaultNic() ? 1 : 0; UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_REMOVE, vm.getAccountId(), vm.getDataCenterId(), vm.getId(), Long.toString(nic.getId()), network.getNetworkOfferingId(), null, isDefault, VirtualMachine.class.getName(), vm.getUuid(), vm.isDisplay()); } else { - s_logger.warn("Failed to unplug nic for the vm " + vm + " from network " + network); + logger.warn("Failed to unplug nic for the vm " + vm + " from network " + network); return false; } } else if (vm.getState() != State.Stopped) { - s_logger.warn("Unable to remove vm " + vm + " from network " + network); + logger.warn("Unable to remove vm " + vm + " from network " + network); throw new ResourceUnavailableException("Unable to remove vm " + vm + " from network, is not in the right state", DataCenter.class, vm.getDataCenterId()); } //2) Release the nic _networkMgr.releaseNic(vmProfile, nic); - s_logger.debug("Successfully released nic " + nic + "for vm " + vm); + logger.debug("Successfully released nic " + nic + "for vm " + vm); //3) Remove the nic _networkMgr.removeNic(vmProfile, nic); @@ -3166,13 +3164,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } if (nic == null) { - s_logger.warn("Could not get a nic with " + network); + logger.warn("Could not get a nic with " + network); return false; } // don't delete default NIC on a user VM if (nic.isDefaultNic() && vm.getType() == VirtualMachine.Type.User) { - s_logger.warn("Failed to remove nic from " + vm + " in " + network + ", nic is default."); + logger.warn("Failed to remove nic from " + vm + " in " + network + ", nic is default."); throw new CloudRuntimeException("Failed to remove nic from " + vm + " in " + network + ", nic is default."); } @@ -3181,16 +3179,16 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (lock == null) { //check if nic is still there. Return if it was released already if (_nicsDao.findById(nic.getId()) == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Not need to remove the vm " + vm + " from network " + network + " as the vm doesn't have nic in this network"); + if (logger.isDebugEnabled()) { + logger.debug("Not need to remove the vm " + vm + " from network " + network + " as the vm doesn't have nic in this network"); } return true; } throw new ConcurrentOperationException("Unable to lock nic " + nic.getId()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Lock is acquired for nic id " + lock.getId() + " as a part of remove vm " + vm + " from network " + network); + if (logger.isDebugEnabled()) { + logger.debug("Lock is acquired for nic id " + lock.getId() + " as a part of remove vm " + vm + " from network " + network); } try { @@ -3201,22 +3199,22 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac //1) Unplug the nic if (vm.getState() == State.Running) { final NicTO nicTO = toNicTO(nicProfile, vmProfile.getVirtualMachine().getHypervisorType()); - s_logger.debug("Un-plugging nic for vm " + vm + " from network " + network); + logger.debug("Un-plugging nic for vm " + vm + " from network " + network); final boolean result = unplugNic(network, nicTO, vmTO, context, dest); if (result) { - s_logger.debug("Nic is unplugged successfully for vm " + vm + " in network " + network); + logger.debug("Nic is unplugged successfully for vm " + vm + " in network " + network); } else { - s_logger.warn("Failed to unplug nic for the vm " + vm + " from network " + network); + logger.warn("Failed to unplug nic for the vm " + vm + " from network " + network); return false; } } else if (vm.getState() != State.Stopped) { - s_logger.warn("Unable to remove vm " + vm + " from network " + network); + logger.warn("Unable to remove vm " + vm + " from network " + network); throw new ResourceUnavailableException("Unable to remove vm " + vm + " from network, is not in the right state", DataCenter.class, vm.getDataCenterId()); } //2) Release the nic _networkMgr.releaseNic(vmProfile, nic); - s_logger.debug("Successfully released nic " + nic + "for vm " + vm); + logger.debug("Successfully released nic " + nic + "for vm " + vm); //3) Remove the nic _networkMgr.removeNic(vmProfile, nic); @@ -3224,8 +3222,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } finally { if (lock != null) { _nicsDao.releaseFromLockTable(lock.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Lock is released for nic id " + lock.getId() + " as a part of remove vm " + vm + " from network " + network); + if (logger.isDebugEnabled()) { + logger.debug("Lock is released for nic id " + lock.getId() + " as a part of remove vm " + vm + " from network " + network); } } } @@ -3257,13 +3255,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { dest = _dpMgr.planDeployment(profile, plan, excludes, null); } catch (final AffinityConflictException e2) { - s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2); + logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2); throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict"); } if (dest != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(" Found " + dest + " for scaling the vm to."); + if (logger.isDebugEnabled()) { + logger.debug(" Found " + dest + " for scaling the vm to."); } } @@ -3275,10 +3273,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { migrateForScale(vm.getUuid(), srcHostId, dest, oldSvcOfferingId); } catch (final ResourceUnavailableException e) { - s_logger.debug("Unable to migrate to unavailable " + dest); + logger.debug("Unable to migrate to unavailable " + dest); throw e; } catch (final ConcurrentOperationException e) { - s_logger.debug("Unable to migrate VM due to: " + e.getMessage()); + logger.debug("Unable to migrate VM due to: " + e.getMessage()); throw e; } } @@ -3329,18 +3327,18 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac throws ResourceUnavailableException, ConcurrentOperationException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); - s_logger.info("Migrating " + vm + " to " + dest); + logger.info("Migrating " + vm + " to " + dest); vm.getServiceOfferingId(); final long dstHostId = dest.getHost().getId(); final Host fromHost = _hostDao.findById(srcHostId); if (fromHost == null) { - s_logger.info("Unable to find the host to migrate from: " + srcHostId); + logger.info("Unable to find the host to migrate from: " + srcHostId); throw new CloudRuntimeException("Unable to find the host to migrate from: " + srcHostId); } if (fromHost.getClusterId().longValue() != dest.getCluster().getId()) { - s_logger.info("Source and destination host are not in same cluster, unable to migrate to host: " + dest.getHost().getId()); + logger.info("Source and destination host are not in same cluster, unable to migrate to host: " + dest.getHost().getId()); throw new CloudRuntimeException("Source and destination host are not in same cluster, unable to migrate to host: " + dest.getHost().getId()); } @@ -3349,15 +3347,15 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final long vmId = vm.getId(); vm = _vmDao.findByUuid(vmUuid); if (vm == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find the vm " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find the vm " + vm); } throw new CloudRuntimeException("Unable to find a virtual machine with id " + vmId); } if (vm.getState() != State.Running) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is not Running, unable to migrate the vm " + vm); + if (logger.isDebugEnabled()) { + logger.debug("VM is not Running, unable to migrate the vm " + vm); } throw new CloudRuntimeException("VM is not Running, unable to migrate the vm currently " + vm + " , current state: " + vm.getState().toString()); } @@ -3404,11 +3402,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac vm.setLastHostId(srcHostId); try { if (vm == null || vm.getHostId() == null || vm.getHostId() != srcHostId || !changeState(vm, Event.MigrationRequested, dstHostId, work, Step.Migrating)) { - s_logger.info("Migration cancelled because state has changed: " + vm); + logger.info("Migration cancelled because state has changed: " + vm); throw new ConcurrentOperationException("Migration cancelled because state has changed: " + vm); } } catch (final NoTransitionException e1) { - s_logger.info("Migration cancelled because " + e1.getMessage()); + logger.info("Migration cancelled because " + e1.getMessage()); throw new ConcurrentOperationException("Migration cancelled because " + e1.getMessage()); } @@ -3423,12 +3421,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (ma == null || !ma.getResult()) { final String details = ma != null ? ma.getDetails() : "null answer returned"; final String msg = "Unable to migrate due to " + details; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } catch (final OperationTimedoutException e) { if (e.isActive()) { - s_logger.warn("Active migration command so scheduling a restart for " + vm); + logger.warn("Active migration command so scheduling a restart for " + vm); _haMgr.scheduleRestart(vm, true); } throw new AgentUnavailableException("Operation timed out on migrating " + vm, dstHostId); @@ -3447,23 +3445,23 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { if (!checkVmOnHost(vm, dstHostId)) { - s_logger.error("Unable to complete migration for " + vm); + logger.error("Unable to complete migration for " + vm); try { _agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null); } catch (final AgentUnavailableException e) { - s_logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId); + logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId); } cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true); throw new CloudRuntimeException("Unable to complete migration for " + vm); } } catch (final OperationTimedoutException e) { - s_logger.debug("Error while checking the vm " + vm + " on host " + dstHostId, e); + logger.debug("Error while checking the vm " + vm + " on host " + dstHostId, e); } migrated = true; } finally { if (!migrated) { - s_logger.info("Migration was unsuccessful. Cleaning up: " + vm); + logger.info("Migration was unsuccessful. Cleaning up: " + vm); _alertMgr.sendAlert(alertType, fromHost.getDataCenterId(), fromHost.getPodId(), "Unable to migrate vm " + vm.getInstanceName() + " from host " + fromHost.getName() + " in zone " + dest.getDataCenter().getName() + " and pod " + @@ -3471,13 +3469,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { _agentMgr.send(dstHostId, new Commands(cleanup(vm.getInstanceName())), null); } catch (final AgentUnavailableException ae) { - s_logger.info("Looks like the destination Host is unavailable for cleanup"); + logger.info("Looks like the destination Host is unavailable for cleanup"); } try { stateTransitTo(vm, Event.OperationFailed, srcHostId); } catch (final NoTransitionException e) { - s_logger.warn(e.getMessage()); + logger.warn(e.getMessage()); } } @@ -3499,14 +3497,14 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac _agentMgr.send(dest.getHost().getId(), cmds); final PlugNicAnswer plugNicAnswer = cmds.getAnswer(PlugNicAnswer.class); if (!(plugNicAnswer != null && plugNicAnswer.getResult())) { - s_logger.warn("Unable to plug nic for vm " + vm.getName()); + logger.warn("Unable to plug nic for vm " + vm.getName()); result = false; } } catch (final OperationTimedoutException e) { throw new AgentUnavailableException("Unable to plug nic for router " + vm.getName() + " in network " + network, dest.getHost().getId(), e); } } else { - s_logger.warn("Unable to apply PlugNic, vm " + router + " is not in the right state " + router.getState()); + logger.warn("Unable to apply PlugNic, vm " + router + " is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply PlugNic on the backend," + " vm " + vm + " is not in the right state", DataCenter.class, router.getDataCenterId()); @@ -3530,16 +3528,16 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final UnPlugNicAnswer unplugNicAnswer = cmds.getAnswer(UnPlugNicAnswer.class); if (!(unplugNicAnswer != null && unplugNicAnswer.getResult())) { - s_logger.warn("Unable to unplug nic from router " + router); + logger.warn("Unable to unplug nic from router " + router); result = false; } } catch (final OperationTimedoutException e) { throw new AgentUnavailableException("Unable to unplug nic from rotuer " + router + " from network " + network, dest.getHost().getId(), e); } } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - s_logger.debug("Vm " + router.getInstanceName() + " is in " + router.getState() + ", so not sending unplug nic command to the backend"); + logger.debug("Vm " + router.getInstanceName() + " is in " + router.getState() + ", so not sending unplug nic command to the backend"); } else { - s_logger.warn("Unable to apply unplug nic, Vm " + router + " is not in the right state " + router.getState()); + logger.warn("Unable to apply unplug nic, Vm " + router + " is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply unplug nic on the backend," + " vm " + router + " is not in the right state", DataCenter.class, router.getDataCenterId()); @@ -3587,7 +3585,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } else if (jobResult instanceof InsufficientServerCapacityException) { throw (InsufficientServerCapacityException)jobResult; } else if (jobResult instanceof Throwable) { - s_logger.error("Unhandled exception", (Throwable)jobResult); + logger.error("Unhandled exception", (Throwable)jobResult); throw new RuntimeException("Unhandled exception", (Throwable)jobResult); } } @@ -3636,7 +3634,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final Answer reconfigureAnswer = _agentMgr.send(vm.getHostId(), reconfigureCmd); if (reconfigureAnswer == null || !reconfigureAnswer.getResult()) { - s_logger.error("Unable to scale vm due to " + (reconfigureAnswer == null ? "" : reconfigureAnswer.getDetails())); + logger.error("Unable to scale vm due to " + (reconfigureAnswer == null ? "" : reconfigureAnswer.getDetails())); throw new CloudRuntimeException("Unable to scale vm due to " + (reconfigureAnswer == null ? "" : reconfigureAnswer.getDetails())); } @@ -3711,10 +3709,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac break; } } else { - s_logger.warn("VM " + vmId + " no longer exists when processing VM state report"); + logger.warn("VM " + vmId + " no longer exists when processing VM state report"); } } else { - s_logger.info("There is pending job or HA tasks working on the VM. vm id: " + vmId + ", postpone power-change report by resetting power-change counters"); + logger.info("There is pending job or HA tasks working on the VM. vm id: " + vmId + ", postpone power-change report by resetting power-change counters"); // reset VM power state tracking so that we won't lost signal when VM has // been translated to @@ -3731,15 +3729,15 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac // switch (vm.getState()) { case Starting: - s_logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); + logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { - s_logger.warn("Unexpected VM state transition exception, race-condition?", e); + logger.warn("Unexpected VM state transition exception, race-condition?", e); } - s_logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); + logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); // we need to alert admin or user about this risky state transition _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(), @@ -3750,50 +3748,50 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac case Running: try { if (vm.getHostId() != null && vm.getHostId().longValue() != vm.getPowerHostId().longValue()) { - s_logger.info("Detected out of band VM migration from host " + vm.getHostId() + " to host " + vm.getPowerHostId()); + logger.info("Detected out of band VM migration from host " + vm.getHostId() + " to host " + vm.getPowerHostId()); } stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { - s_logger.warn("Unexpected VM state transition exception, race-condition?", e); + logger.warn("Unexpected VM state transition exception, race-condition?", e); } break; case Stopping: case Stopped: - s_logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); + logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { - s_logger.warn("Unexpected VM state transition exception, race-condition?", e); + logger.warn("Unexpected VM state transition exception, race-condition?", e); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(), VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") state is sync-ed (" + vm.getState() + " -> Running) from out-of-context transition. VM network environment may need to be reset"); - s_logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); + logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); break; case Destroyed: case Expunging: - s_logger.info("Receive power on report when VM is in destroyed or expunging state. vm: " + logger.info("Receive power on report when VM is in destroyed or expunging state. vm: " + vm.getId() + ", state: " + vm.getState()); break; case Migrating: - s_logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); + logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { - s_logger.warn("Unexpected VM state transition exception, race-condition?", e); + logger.warn("Unexpected VM state transition exception, race-condition?", e); } - s_logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); + logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); break; case Error: default: - s_logger.info("Receive power on report when VM is in error or unexpected state. vm: " + logger.info("Receive power on report when VM is in error or unexpected state. vm: " + vm.getId() + ", state: " + vm.getState()); break; } @@ -3810,13 +3808,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac case Running: case Stopped: case Migrating: - s_logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-off report while there is no pending jobs on it"); + logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-off report while there is no pending jobs on it"); if(vm.isHaEnabled() && vm.getState() == State.Running && vm.getHypervisorType() != HypervisorType.VMware && vm.getHypervisorType() != HypervisorType.Hyperv) { - s_logger.info("Detected out-of-band stop of a HA enabled VM " + vm.getInstanceName() + ", will schedule restart"); + logger.info("Detected out-of-band stop of a HA enabled VM " + vm.getInstanceName() + ", will schedule restart"); if(!_haMgr.hasPendingHaWork(vm.getId())) { _haMgr.scheduleRestart(vm, true); } else { - s_logger.info("VM " + vm.getInstanceName() + " already has an pending HA task working on it"); + logger.info("VM " + vm.getInstanceName() + " already has an pending HA task working on it"); } return; } @@ -3831,14 +3829,14 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOffReport, null); } catch (final NoTransitionException e) { - s_logger.warn("Unexpected VM state transition exception, race-condition?", e); + logger.warn("Unexpected VM state transition exception, race-condition?", e); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(), VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") state is sync-ed (" + vm.getState() + " -> Stopped) from out-of-context transition."); - s_logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Stopped state according to power-off report from hypervisor"); + logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Stopped state according to power-off report from hypervisor"); break; @@ -4569,7 +4567,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private Pair orchestrateStart(final VmWorkStart work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { - s_logger.info("Unable to find vm " + work.getVmId()); + logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; @@ -4581,7 +4579,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private Pair orchestrateStop(final VmWorkStop work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { - s_logger.info("Unable to find vm " + work.getVmId()); + logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; @@ -4593,7 +4591,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private Pair orchestrateMigrate(final VmWorkMigrate work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { - s_logger.info("Unable to find vm " + work.getVmId()); + logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; @@ -4605,14 +4603,14 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private Pair orchestrateMigrateAway(final VmWorkMigrateAway work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { - s_logger.info("Unable to find vm " + work.getVmId()); + logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; try { orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), null); } catch (final InsufficientServerCapacityException e) { - s_logger.warn("Failed to deploy vm " + vm.getId() + " with original planner, sending HAPlanner"); + logger.warn("Failed to deploy vm " + vm.getId() + " with original planner, sending HAPlanner"); orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), _haMgr.getHAPlanner()); } @@ -4623,7 +4621,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private Pair orchestrateMigrateWithStorage(final VmWorkMigrateWithStorage work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { - s_logger.info("Unable to find vm " + work.getVmId()); + logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; orchestrateMigrateWithStorage(vm.getUuid(), @@ -4637,7 +4635,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private Pair orchestrateMigrateForScale(final VmWorkMigrateForScale work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { - s_logger.info("Unable to find vm " + work.getVmId()); + logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; orchestrateMigrateForScale(vm.getUuid(), @@ -4651,7 +4649,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private Pair orchestrateReboot(final VmWorkReboot work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { - s_logger.info("Unable to find vm " + work.getVmId()); + logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; orchestrateReboot(vm.getUuid(), work.getParams()); @@ -4662,7 +4660,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private Pair orchestrateAddVmToNetwork(final VmWorkAddVmToNetwork work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { - s_logger.info("Unable to find vm " + work.getVmId()); + logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; @@ -4677,7 +4675,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private Pair orchestrateRemoveNicFromVm(final VmWorkRemoveNicFromVm work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { - s_logger.info("Unable to find vm " + work.getVmId()); + logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; final NicVO nic = _entityMgr.findById(NicVO.class, work.getNicId()); @@ -4690,7 +4688,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private Pair orchestrateRemoveVmFromNetwork(final VmWorkRemoveVmFromNetwork work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { - s_logger.info("Unable to find vm " + work.getVmId()); + logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; final boolean result = orchestrateRemoveVmFromNetwork(vm, @@ -4703,7 +4701,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private Pair orchestrateReconfigure(final VmWorkReconfigure work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { - s_logger.info("Unable to find vm " + work.getVmId()); + logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; @@ -4718,7 +4716,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private Pair orchestrateStorageMigration(final VmWorkStorageMigration work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { - s_logger.info("Unable to find vm " + work.getVmId()); + logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; final StoragePool pool = (PrimaryDataStoreInfo)dataStoreMgr.getPrimaryDataStore(work.getDestStoragePoolId()); diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkJobDispatcher.java b/engine/orchestration/src/com/cloud/vm/VmWorkJobDispatcher.java index 9f1eca67022..c7e30e366a9 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkJobDispatcher.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkJobDispatcher.java @@ -20,7 +20,6 @@ import java.util.Map; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.jobs.AsyncJob; @@ -34,7 +33,6 @@ import com.cloud.utils.component.AdapterBase; import com.cloud.vm.dao.VMInstanceDao; public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatcher { - private static final Logger s_logger = Logger.getLogger(VmWorkJobDispatcher.class); @Inject private VirtualMachineManagerImpl _vmMgr; @Inject @@ -65,23 +63,23 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch try { workClz = Class.forName(job.getCmd()); } catch (ClassNotFoundException e) { - s_logger.error("VM work class " + cmd + " is not found" + ", job origin: " + job.getRelated(), e); + logger.error("VM work class " + cmd + " is not found" + ", job origin: " + job.getRelated(), e); _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, e.getMessage()); return; } work = VmWorkSerializer.deserialize(workClz, job.getCmdInfo()); if(work == null) { - s_logger.error("Unable to deserialize VM work " + job.getCmd() + ", job info: " + job.getCmdInfo() + ", job origin: " + job.getRelated()); + logger.error("Unable to deserialize VM work " + job.getCmd() + ", job info: " + job.getCmdInfo() + ", job origin: " + job.getRelated()); _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Unable to deserialize VM work"); return; } - if (s_logger.isDebugEnabled()) - s_logger.debug("Run VM work job: " + cmd + " for VM " + work.getVmId() + ", job origin: " + job.getRelated()); + if (logger.isDebugEnabled()) + logger.debug("Run VM work job: " + cmd + " for VM " + work.getVmId() + ", job origin: " + job.getRelated()); try { if (_handlers == null || _handlers.isEmpty()) { - s_logger.error("Invalid startup configuration, no work job handler is found. cmd: " + job.getCmd() + ", job info: " + job.getCmdInfo() + logger.error("Invalid startup configuration, no work job handler is found. cmd: " + job.getCmd() + ", job info: " + job.getCmdInfo() + ", job origin: " + job.getRelated()); _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Invalid startup configuration. no job handler is found"); return; @@ -90,7 +88,7 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch VmWorkJobHandler handler = _handlers.get(work.getHandlerName()); if (handler == null) { - s_logger.error("Unable to find work job handler. handler name: " + work.getHandlerName() + ", job cmd: " + job.getCmd() + logger.error("Unable to find work job handler. handler name: " + work.getHandlerName() + ", job cmd: " + job.getCmd() + ", job info: " + job.getCmdInfo() + ", job origin: " + job.getRelated()); _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Unable to find work job handler"); return; @@ -105,14 +103,14 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch CallContext.unregister(); } } finally { - if (s_logger.isDebugEnabled()) - s_logger.debug("Done with run of VM work job: " + cmd + " for VM " + work.getVmId() + ", job origin: " + job.getRelated()); + if (logger.isDebugEnabled()) + logger.debug("Done with run of VM work job: " + cmd + " for VM " + work.getVmId() + ", job origin: " + job.getRelated()); } } catch(InvalidParameterValueException e) { - s_logger.error("Unable to complete " + job + ", job origin:" + job.getRelated()); + logger.error("Unable to complete " + job + ", job origin:" + job.getRelated()); _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, _asyncJobMgr.marshallResultObject(e)); } catch(Throwable e) { - s_logger.error("Unable to complete " + job + ", job origin:" + job.getRelated(), e); + logger.error("Unable to complete " + job + ", job origin:" + job.getRelated(), e); //RuntimeException ex = new RuntimeException("Job failed due to exception " + e.getMessage()); _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, _asyncJobMgr.marshallResultObject(e)); diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkJobWakeupDispatcher.java b/engine/orchestration/src/com/cloud/vm/VmWorkJobWakeupDispatcher.java index 520a55042e3..d4da63b5bb6 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkJobWakeupDispatcher.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkJobWakeupDispatcher.java @@ -24,7 +24,6 @@ import java.util.Map; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.jobs.AsyncJob; @@ -47,7 +46,6 @@ import com.cloud.vm.dao.VMInstanceDao; * Current code base uses blocking calls to wait for job completion */ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDispatcher { - private static final Logger s_logger = Logger.getLogger(VmWorkJobWakeupDispatcher.class); @Inject private VmWorkJobDao _workjobDao; @@ -69,7 +67,7 @@ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDi try { List joinRecords = _joinMapDao.listJoinRecords(job.getId()); if (joinRecords.size() != 1) { - s_logger.warn("AsyncJob-" + job.getId() + logger.warn("AsyncJob-" + job.getId() + " received wakeup call with un-supported joining job number: " + joinRecords.size()); // if we fail wakeup-execution for any reason, avoid release sync-source if there is any @@ -84,7 +82,7 @@ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDi try { workClz = Class.forName(job.getCmd()); } catch (ClassNotFoundException e) { - s_logger.error("VM work class " + job.getCmd() + " is not found", e); + logger.error("VM work class " + job.getCmd() + " is not found", e); return; } @@ -105,14 +103,14 @@ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDi handler.invoke(_vmMgr); } else { assert (false); - s_logger.error("Unable to find wakeup handler " + joinRecord.getWakeupHandler() + + logger.error("Unable to find wakeup handler " + joinRecord.getWakeupHandler() + " when waking up job-" + job.getId()); } } finally { CallContext.unregister(); } } catch (Throwable e) { - s_logger.warn("Unexpected exception in waking up job-" + job.getId()); + logger.warn("Unexpected exception in waking up job-" + job.getId()); // if we fail wakeup-execution for any reason, avoid release sync-source if there is any job.setSyncSource(null); @@ -132,11 +130,11 @@ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDi method.setAccessible(true); } catch (SecurityException e) { assert (false); - s_logger.error("Unexpected exception", e); + logger.error("Unexpected exception", e); return null; } catch (NoSuchMethodException e) { assert (false); - s_logger.error("Unexpected exception", e); + logger.error("Unexpected exception", e); return null; } diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java index b21b230d36f..50681d435ed 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java @@ -28,7 +28,6 @@ import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity; @@ -53,7 +52,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component(value = "EngineClusterDao") @Local(value = EngineClusterDao.class) public class EngineClusterDaoImpl extends GenericDaoBase implements EngineClusterDao { - private static final Logger s_logger = Logger.getLogger(EngineClusterDaoImpl.class); protected final SearchBuilder PodSearch; protected final SearchBuilder HyTypeWithoutGuidSearch; @@ -274,7 +272,7 @@ public class EngineClusterDaoImpl extends GenericDaoBase int rows = update(vo, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { + if (rows == 0 && logger.isDebugEnabled()) { EngineClusterVO dbCluster = findByIdIncludingRemoved(vo.getId()); if (dbCluster != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); @@ -301,7 +299,7 @@ public class EngineClusterDaoImpl extends GenericDaoBase .append("; updatedTime=") .append(oldUpdatedTime); } else { - s_logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); + logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); } } return rows > 0; diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index 50ddc5915ca..62081931f0d 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -51,7 +51,6 @@ import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.region.PortableIpDao; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -213,7 +212,6 @@ import com.cloud.vm.dao.VMInstanceDao; */ @Local(value = {NetworkOrchestrationService.class}) public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestrationService, Listener, Configurable { - static final Logger s_logger = Logger.getLogger(NetworkOrchestrator.class); @Inject EntityManager _entityMgr; @@ -580,7 +578,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra Network.State.getStateMachine().registerListener(new NetworkStateListener(_usageEventDao, _networksDao, _configDao)); - s_logger.info("Network Manager is configured."); + logger.info("Network Manager is configured."); return true; } @@ -624,8 +622,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra .getBroadcastDomainType() == BroadcastDomainType.Vxlan))) { List configs = _networksDao.listBy(owner.getId(), offering.getId(), plan.getDataCenterId()); if (configs.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found existing network configuration for offering " + offering + ": " + configs.get(0)); + if (logger.isDebugEnabled()) { + logger.debug("Found existing network configuration for offering " + offering + ": " + configs.get(0)); } if (errorIfAlreadySetup) { @@ -692,7 +690,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra return networks; } finally { - s_logger.debug("Releasing lock for " + locked); + logger.debug("Releasing lock for " + locked); _accountDao.releaseFromLockTable(locked.getId()); } } @@ -774,7 +772,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } } if (nics.size() != size) { - s_logger.warn("Number of nics " + nics.size() + " doesn't match number of requested nics " + size); + logger.warn("Number of nics " + nics.size() + " doesn't match number of requested nics " + size); throw new CloudRuntimeException("Number of nics " + nics.size() + " doesn't match number of requested networks " + size); } @@ -791,7 +789,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException, ConcurrentOperationException { NetworkVO ntwkVO = _networksDao.findById(network.getId()); - s_logger.debug("Allocating nic for vm " + vm.getVirtualMachine() + " in network " + network + " with requested profile " + requested); + logger.debug("Allocating nic for vm " + vm.getVirtualMachine() + " in network " + network + " with requested profile " + requested); NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, ntwkVO.getGuruName()); if (requested != null && requested.getMode() == null) { @@ -961,7 +959,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra NetworkVO network = _networksDao.findById(networkId); NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName()); if (isNetworkImplemented(network)) { - s_logger.debug("Network id=" + networkId + " is already implemented"); + logger.debug("Network id=" + networkId + " is already implemented"); implemented.set(guru, network); return implemented; } @@ -975,19 +973,19 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra throw ex; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Lock is acquired for network id " + networkId + " as a part of network implement"); + if (logger.isDebugEnabled()) { + logger.debug("Lock is acquired for network id " + networkId + " as a part of network implement"); } try { if (isNetworkImplemented(network)) { - s_logger.debug("Network id=" + networkId + " is already implemented"); + logger.debug("Network id=" + networkId + " is already implemented"); implemented.set(guru, network); return implemented; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Asking " + guru.getName() + " to implement " + network); + if (logger.isDebugEnabled()) { + logger.debug("Asking " + guru.getName() + " to implement " + network); } NetworkOfferingVO offering = _networkOfferingDao.findById(network.getNetworkOfferingId()); @@ -1021,11 +1019,11 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra implemented.set(guru, network); return implemented; } catch (NoTransitionException e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); return null; } finally { if (implemented.first() == null) { - s_logger.debug("Cleaning up because we're unable to implement the network " + network); + logger.debug("Cleaning up because we're unable to implement the network " + network); try { if (isSharedNetworkWithServices(network)) { network.setState(Network.State.Shutdown); @@ -1034,20 +1032,20 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra stateTransitTo(network, Event.OperationFailed); } } catch (NoTransitionException e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } try { shutdownNetwork(networkId, context, false); } catch (Exception e) { // Don't throw this exception as it would hide the original thrown exception, just log - s_logger.error("Exception caught while shutting down a network as part of a failed implementation", e); + logger.error("Exception caught while shutting down a network as part of a failed implementation", e); } } _networksDao.releaseFromLockTable(networkId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Lock is released for network id " + networkId + " as a part of network implement"); + if (logger.isDebugEnabled()) { + logger.debug("Lock is released for network id " + networkId + " as a part of network implement"); } } } @@ -1073,13 +1071,13 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra ips = _ipAddressDao.listByAssociatedVpc(network.getVpcId(), true); if (ips.isEmpty()) { Vpc vpc = _vpcMgr.getActiveVpc(network.getVpcId()); - s_logger.debug("Creating a source nat ip for vpc " + vpc); + logger.debug("Creating a source nat ip for vpc " + vpc); _vpcMgr.assignSourceNatIpAddressToVpc(owner, vpc); } } else { ips = _ipAddressDao.listByAssociatedNetwork(network.getId(), true); if (ips.isEmpty()) { - s_logger.debug("Creating a source nat ip for network " + network); + logger.debug("Creating a source nat ip for network " + network); _ipAddrMgr.assignSourceNatIpAddressToGuestNetwork(owner, network); } } @@ -1097,8 +1095,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra + network.getPhysicalNetworkId()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Asking " + element.getName() + " to implemenet " + network); + if (logger.isDebugEnabled()) { + logger.debug("Asking " + element.getName() + " to implemenet " + network); } if (!element.implement(network, offering, dest, context)) { @@ -1117,9 +1115,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra try { // reapply all the firewall/staticNat/lb rules - s_logger.debug("Reprogramming network " + network + " as a part of network implement"); + logger.debug("Reprogramming network " + network + " as a part of network implement"); if (!reprogramNetworkRules(network.getId(), CallContext.current().getCallingAccount(), network)) { - s_logger.warn("Failed to re-program the network as a part of network " + network + " implement"); + logger.warn("Failed to re-program the network as a part of network " + network + " implement"); // see DataCenterVO.java ResourceUnavailableException ex = new ResourceUnavailableException("Unable to apply network rules as a part of network " + network + " implement", DataCenter.class, network.getDataCenterId()); @@ -1129,7 +1127,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra for (NetworkElement element : networkElements) { if ((element instanceof AggregatedCommandExecutor) && (providersToImplement.contains(element.getProvider()))) { if (!((AggregatedCommandExecutor)element).completeAggregatedExecution(network, dest)) { - s_logger.warn("Failed to re-program the network as a part of network " + network + " implement due to aggregated commands execution failure!"); + logger.warn("Failed to re-program the network as a part of network " + network + " implement due to aggregated commands execution failure!"); // see DataCenterVO.java ResourceUnavailableException ex = new ResourceUnavailableException("Unable to apply network rules as a part of network " + network + " implement", DataCenter.class, network.getDataCenterId()); @@ -1161,51 +1159,51 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra _firewallMgr.applyDefaultEgressFirewallRule(network.getId(), offering.getEgressDefaultPolicy(), true); } if (!_firewallMgr.applyFirewallRules(firewallEgressRulesToApply, false, caller)) { - s_logger.warn("Failed to reapply firewall Egress rule(s) as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply firewall Egress rule(s) as a part of network id=" + networkId + " restart"); success = false; } // associate all ip addresses if (!_ipAddrMgr.applyIpAssociations(network, false)) { - s_logger.warn("Failed to apply ip addresses as a part of network id" + networkId + " restart"); + logger.warn("Failed to apply ip addresses as a part of network id" + networkId + " restart"); success = false; } // apply static nat if (!_rulesMgr.applyStaticNatsForNetwork(networkId, false, caller)) { - s_logger.warn("Failed to apply static nats a part of network id" + networkId + " restart"); + logger.warn("Failed to apply static nats a part of network id" + networkId + " restart"); success = false; } // apply firewall rules List firewallIngressRulesToApply = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress); if (!_firewallMgr.applyFirewallRules(firewallIngressRulesToApply, false, caller)) { - s_logger.warn("Failed to reapply Ingress firewall rule(s) as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply Ingress firewall rule(s) as a part of network id=" + networkId + " restart"); success = false; } // apply port forwarding rules if (!_rulesMgr.applyPortForwardingRulesForNetwork(networkId, false, caller)) { - s_logger.warn("Failed to reapply port forwarding rule(s) as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply port forwarding rule(s) as a part of network id=" + networkId + " restart"); success = false; } // apply static nat rules if (!_rulesMgr.applyStaticNatRulesForNetwork(networkId, false, caller)) { - s_logger.warn("Failed to reapply static nat rule(s) as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply static nat rule(s) as a part of network id=" + networkId + " restart"); success = false; } // apply public load balancer rules if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Public)) { - s_logger.warn("Failed to reapply Public load balancer rules as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply Public load balancer rules as a part of network id=" + networkId + " restart"); success = false; } // apply internal load balancer rules if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Internal)) { - s_logger.warn("Failed to reapply internal load balancer rules as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply internal load balancer rules as a part of network id=" + networkId + " restart"); success = false; } @@ -1215,7 +1213,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra for (RemoteAccessVpn vpn : vpnsToReapply) { // Start remote access vpn per ip if (_vpnMgr.startRemoteAccessVpn(vpn.getServerAddressId(), false) == null) { - s_logger.warn("Failed to reapply vpn rules as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply vpn rules as a part of network id=" + networkId + " restart"); success = false; } } @@ -1223,7 +1221,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra //apply network ACLs if (!_networkACLMgr.applyACLToNetwork(networkId)) { - s_logger.warn("Failed to reapply network ACLs as a part of of network id=" + networkId + " restart"); + logger.warn("Failed to reapply network ACLs as a part of of network id=" + networkId + " restart"); success = false; } @@ -1263,7 +1261,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra _nicDao.update(nic.getId(), nic); if (nic.getVmType() == VirtualMachine.Type.User) { - s_logger.debug("Changing active number of nics for network id=" + networkId + " on " + count); + logger.debug("Changing active number of nics for network id=" + networkId + " on " + count); _networksDao.changeActiveNicsBy(networkId, count); } @@ -1297,7 +1295,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra for (NicVO nic : nics) { Pair implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter); if (implemented == null || implemented.first() == null) { - s_logger.warn("Failed to implement network id=" + nic.getNetworkId() + " as a part of preparing nic id=" + nic.getId()); + logger.warn("Failed to implement network id=" + nic.getNetworkId() + " as a part of preparing nic id=" + nic.getId()); throw new CloudRuntimeException("Failed to implement network id=" + nic.getNetworkId() + " as a part preparing nic id=" + nic.getId()); } @@ -1363,8 +1361,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra throw new CloudRuntimeException("Service provider " + element.getProvider().getName() + " either doesn't exist or is not enabled in physical network id: " + network.getPhysicalNetworkId()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Asking " + element.getName() + " to prepare for " + nic); + if (logger.isDebugEnabled()) { + logger.debug("Asking " + element.getName() + " to prepare for " + nic); } if (!prepareElement(element, network, profile, vmProfile, dest, context)) { throw new InsufficientAddressCapacityException("unable to configure the dhcp service, due to insufficiant address capacity", Network.class, network.getId()); @@ -1395,7 +1393,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra _networkModel.getNetworkTag(vm.getHypervisorType(), network)); if (guru instanceof NetworkMigrationResponder) { if (!((NetworkMigrationResponder)guru).prepareMigration(profile, network, vm, dest, context)) { - s_logger.error("NetworkGuru " + guru + " prepareForMigration failed."); // XXX: Transaction error + logger.error("NetworkGuru " + guru + " prepareForMigration failed."); // XXX: Transaction error } } List providersToImplement = getNetworkProviders(network.getId()); @@ -1407,7 +1405,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } if (element instanceof NetworkMigrationResponder) { if (!((NetworkMigrationResponder)element).prepareMigration(profile, network, vm, dest, context)) { - s_logger.error("NetworkElement " + element + " prepareForMigration failed."); // XXX: Transaction error + logger.error("NetworkElement " + element + " prepareForMigration failed."); // XXX: Transaction error } } } @@ -1439,7 +1437,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra _networkModel.isSecurityGroupSupportedInNetwork(network), _networkModel.getNetworkTag(vm.getHypervisorType(), network)); if(guru instanceof NetworkMigrationResponder){ if(!((NetworkMigrationResponder) guru).prepareMigration(profile, network, vm, dest, context)){ - s_logger.error("NetworkGuru "+guru+" prepareForMigration failed."); // XXX: Transaction error + logger.error("NetworkGuru "+guru+" prepareForMigration failed."); // XXX: Transaction error } } List providersToImplement = getNetworkProviders(network.getId()); @@ -1450,7 +1448,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } if(element instanceof NetworkMigrationResponder){ if(!((NetworkMigrationResponder) element).prepareMigration(profile, network, vm, dest, context)){ - s_logger.error("NetworkElement "+element+" prepareForMigration failed."); // XXX: Transaction error + logger.error("NetworkElement "+element+" prepareForMigration failed."); // XXX: Transaction error } } } @@ -1471,7 +1469,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra if(nic == null && !addedURIs.contains(broadcastUri.toString())){ //Nic details are not available in DB //Create nic profile for migration - s_logger.debug("Creating nic profile for migration. BroadcastUri: "+broadcastUri.toString()+" NetworkId: "+ntwkId+" Vm: "+vm.getId()); + logger.debug("Creating nic profile for migration. BroadcastUri: "+broadcastUri.toString()+" NetworkId: "+ntwkId+" Vm: "+vm.getId()); NetworkVO network = _networksDao.findById(ntwkId); Integer networkRate = _networkModel.getNetworkRate(network.getId(), vm.getId()); NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName()); @@ -1638,8 +1636,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra List providersToImplement = getNetworkProviders(network.getId()); for (NetworkElement element : networkElements) { if (providersToImplement.contains(element.getProvider())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Asking " + element.getName() + " to release " + profile); + if (logger.isDebugEnabled()) { + logger.debug("Asking " + element.getName() + " to release " + profile); } //NOTE: Context appear to never be used in release method //implementations. Consider removing it from interface Element @@ -1651,8 +1649,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra @Override public void cleanupNics(VirtualMachineProfile vm) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cleaning network for vm: " + vm.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Cleaning network for vm: " + vm.getId()); } List nics = _nicDao.listByVmId(vm.getId()); @@ -1674,7 +1672,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra try { releaseNic(vm, nic.getId()); } catch (Exception ex) { - s_logger.warn("Failed to release nic: " + nic.toString() + " as part of remove operation due to", ex); + logger.warn("Failed to release nic: " + nic.toString() + " as part of remove operation due to", ex); } } @@ -1692,15 +1690,15 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra List providersToImplement = getNetworkProviders(network.getId()); for (NetworkElement element : networkElements) { if (providersToImplement.contains(element.getProvider())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Asking " + element.getName() + " to release " + nic); + if (logger.isDebugEnabled()) { + logger.debug("Asking " + element.getName() + " to release " + nic); } try { element.release(network, profile, vm, null); } catch (ConcurrentOperationException ex) { - s_logger.warn("release failed during the nic " + nic.toString() + " removeNic due to ", ex); + logger.warn("release failed during the nic " + nic.toString() + " removeNic due to ", ex); } catch (ResourceUnavailableException ex) { - s_logger.warn("release failed during the nic " + nic.toString() + " removeNic due to ", ex); + logger.warn("release failed during the nic " + nic.toString() + " removeNic due to ", ex); } } } @@ -1723,10 +1721,10 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra guru.deallocate(network, profile, vm); _nicDao.remove(nic.getId()); - s_logger.debug("Removed nic id=" + nic.getId()); + logger.debug("Removed nic id=" + nic.getId()); //remove the secondary ip addresses corresponding to to this nic if (!removeVmSecondaryIpsOfNic(nic.getId())) { - s_logger.debug("Removing nic " + nic.getId() + " secondary ip addreses failed"); + logger.debug("Removing nic " + nic.getId() + " secondary ip addreses failed"); } } @@ -1765,12 +1763,12 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } }); if (!dhcpServiceProvider.removeDhcpSupportForSubnet(network)) { - s_logger.warn("Failed to remove the ip alias on the router, marking it as removed in db and freed the allocated ip " + ipAlias.getIp4Address()); + logger.warn("Failed to remove the ip alias on the router, marking it as removed in db and freed the allocated ip " + ipAlias.getIp4Address()); } } } catch (ResourceUnavailableException e) { //failed to remove the dhcpconfig on the router. - s_logger.info("Unable to delete the ip alias due to unable to contact the virtualrouter."); + logger.info("Unable to delete the ip alias due to unable to contact the virtualrouter."); } } @@ -1793,7 +1791,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra final NetworkOfferingVO ntwkOff = _networkOfferingDao.findById(networkOfferingId); // this method supports only guest network creation if (ntwkOff.getTrafficType() != TrafficType.Guest) { - s_logger.warn("Only guest networks can be created using this method"); + logger.warn("Only guest networks can be created using this method"); return null; } @@ -2091,12 +2089,12 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra public boolean shutdownNetwork(final long networkId, ReservationContext context, boolean cleanupElements) { NetworkVO network = _networksDao.findById(networkId); if (network.getState() == Network.State.Allocated) { - s_logger.debug("Network is already shutdown: " + network); + logger.debug("Network is already shutdown: " + network); return true; } if (network.getState() != Network.State.Implemented && network.getState() != Network.State.Shutdown) { - s_logger.debug("Network is not implemented: " + network); + logger.debug("Network is not implemented: " + network); return false; } @@ -2104,20 +2102,20 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra //do global lock for the network network = _networksDao.acquireInLockTable(networkId, NetworkLockTimeout.value()); if (network == null) { - s_logger.warn("Unable to acquire lock for the network " + network + " as a part of network shutdown"); + logger.warn("Unable to acquire lock for the network " + network + " as a part of network shutdown"); return false; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Lock is acquired for network " + network + " as a part of network shutdown"); + if (logger.isDebugEnabled()) { + logger.debug("Lock is acquired for network " + network + " as a part of network shutdown"); } if (network.getState() == Network.State.Allocated) { - s_logger.debug("Network is already shutdown: " + network); + logger.debug("Network is already shutdown: " + network); return true; } if (network.getState() != Network.State.Implemented && network.getState() != Network.State.Shutdown) { - s_logger.debug("Network is not implemented: " + network); + logger.debug("Network is not implemented: " + network); return false; } @@ -2142,8 +2140,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra boolean result = false; if (success) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Network id=" + networkId + " is shutdown successfully, cleaning up corresponding resources now."); + if (logger.isDebugEnabled()) { + logger.debug("Network id=" + networkId + " is shutdown successfully, cleaning up corresponding resources now."); } NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, networkFinal.getGuruName()); NetworkProfile profile = convertNetworkToNetworkProfile(networkFinal.getId()); @@ -2182,8 +2180,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } finally { if (network != null) { _networksDao.releaseFromLockTable(network.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Lock is released for network " + network + " as a part of network shutdown"); + if (logger.isDebugEnabled()) { + logger.debug("Lock is released for network " + network + " as a part of network shutdown"); } } } @@ -2210,11 +2208,11 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra cleanupResult = shutdownNetworkResources(network.getId(), context.getAccount(), context.getCaller().getId()); } } catch (Exception ex) { - s_logger.warn("shutdownNetworkRules failed during the network " + network + " shutdown due to ", ex); + logger.warn("shutdownNetworkRules failed during the network " + network + " shutdown due to ", ex); } finally { // just warn the administrator that the network elements failed to shutdown if (!cleanupResult) { - s_logger.warn("Failed to cleanup network id=" + network.getId() + " resources as a part of shutdownNetwork"); + logger.warn("Failed to cleanup network id=" + network.getId() + " resources as a part of shutdownNetwork"); } } @@ -2223,21 +2221,21 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra for (NetworkElement element : networkElements) { if (providersToShutdown.contains(element.getProvider())) { try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending network shutdown to " + element.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Sending network shutdown to " + element.getName()); } if (!element.shutdown(network, context, cleanupElements)) { - s_logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName()); + logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName()); success = false; } } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e); + logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e); success = false; } catch (ConcurrentOperationException e) { - s_logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e); + logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e); success = false; } catch (Exception e) { - s_logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e); + logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e); success = false; } } @@ -2252,7 +2250,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra NetworkVO network = _networksDao.findById(networkId); if (network == null) { - s_logger.debug("Unable to find network with id: " + networkId); + logger.debug("Unable to find network with id: " + networkId); return false; } @@ -2261,7 +2259,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra for (UserVmVO vm : userVms) { if (!(vm.getState() == VirtualMachine.State.Expunging && vm.getRemoved() != null)) { - s_logger.warn("Can't delete the network, not all user vms are expunged. Vm " + vm + " is in " + vm.getState() + " state"); + logger.warn("Can't delete the network, not all user vms are expunged. Vm " + vm + " is in " + vm.getState() + " state"); return false; } } @@ -2269,7 +2267,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra // Don't allow to delete network via api call when it has vms assigned to it int nicCount = getActiveNicsInNetwork(networkId); if (nicCount > 0) { - s_logger.debug("The network id=" + networkId + " has active Nics, but shouldn't."); + logger.debug("The network id=" + networkId + " has active Nics, but shouldn't."); // at this point we have already determined that there are no active user vms in network // if the op_networks table shows active nics, it's a bug in releasing nics updating op_networks _networksDao.changeActiveNicsBy(networkId, (-1 * nicCount)); @@ -2280,7 +2278,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra if (zone.getNetworkType() == NetworkType.Basic) { List systemVms = _vmDao.listNonRemovedVmsByTypeAndNetwork(network.getId(), Type.ConsoleProxy, Type.SecondaryStorageVm); if (systemVms != null && !systemVms.isEmpty()) { - s_logger.warn("Can't delete the network, not all consoleProxy/secondaryStorage vms are expunged"); + logger.warn("Can't delete the network, not all consoleProxy/secondaryStorage vms are expunged"); return false; } } @@ -2291,13 +2289,13 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra // get updated state for the network network = _networksDao.findById(networkId); if (network.getState() != Network.State.Allocated && network.getState() != Network.State.Setup && !forced) { - s_logger.debug("Network is not not in the correct state to be destroyed: " + network.getState()); + logger.debug("Network is not not in the correct state to be destroyed: " + network.getState()); return false; } boolean success = true; if (!cleanupNetworkResources(networkId, callerAccount, context.getCaller().getId())) { - s_logger.warn("Unable to delete network id=" + networkId + ": failed to cleanup network resources"); + logger.warn("Unable to delete network id=" + networkId + ": failed to cleanup network resources"); return false; } @@ -2306,30 +2304,30 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra for (NetworkElement element : networkElements) { if (providersToDestroy.contains(element.getProvider())) { try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending destroy to " + element); + if (logger.isDebugEnabled()) { + logger.debug("Sending destroy to " + element); } if (!element.destroy(network, context)) { success = false; - s_logger.warn("Unable to complete destroy of the network: failed to destroy network element " + element.getName()); + logger.warn("Unable to complete destroy of the network: failed to destroy network element " + element.getName()); } } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e); + logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e); success = false; } catch (ConcurrentOperationException e) { - s_logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e); + logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e); success = false; } catch (Exception e) { - s_logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e); + logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e); success = false; } } } if (success) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Network id=" + networkId + " is destroyed successfully, cleaning up corresponding resources now."); + if (logger.isDebugEnabled()) { + logger.debug("Network id=" + networkId + " is destroyed successfully, cleaning up corresponding resources now."); } final NetworkVO networkFinal = network; @@ -2342,14 +2340,14 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra guru.trash(networkFinal, _networkOfferingDao.findById(networkFinal.getNetworkOfferingId())); if (!deleteVlansInNetwork(networkFinal.getId(), context.getCaller().getId(), callerAccount)) { - s_logger.warn("Failed to delete network " + networkFinal + "; was unable to cleanup corresponding ip ranges"); + logger.warn("Failed to delete network " + networkFinal + "; was unable to cleanup corresponding ip ranges"); throw new CloudRuntimeException("Failed to delete network " + networkFinal + "; was unable to cleanup corresponding ip ranges"); } else { // commit transaction only when ips and vlans for the network are released successfully try { stateTransitTo(networkFinal, Event.DestroyNetwork); } catch (NoTransitionException e) { - s_logger.debug(e.getMessage()); + logger.debug(e.getMessage()); } if (_networksDao.remove(networkFinal.getId())) { NetworkDomainVO networkDomain = _networkDomainDao.getDomainNetworkMapByNetworkId(networkFinal.getId()); @@ -2376,7 +2374,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } return true; } catch (CloudRuntimeException e) { - s_logger.error("Failed to delete network", e); + logger.error("Failed to delete network", e); return false; } } @@ -2398,7 +2396,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra boolean result = true; for (VlanVO vlan : publicVlans) { if (!_configMgr.deleteVlanAndPublicIpRange(userId, vlan.getId(), callerAccount)) { - s_logger.warn("Failed to delete vlan " + vlan.getId() + ");"); + logger.warn("Failed to delete vlan " + vlan.getId() + ");"); result = false; } } @@ -2406,11 +2404,11 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra //cleanup private vlans int privateIpAllocCount = _privateIpDao.countAllocatedByNetworkId(networkId); if (privateIpAllocCount > 0) { - s_logger.warn("Can't delete Private ip range for network " + networkId + " as it has allocated ip addresses"); + logger.warn("Can't delete Private ip range for network " + networkId + " as it has allocated ip addresses"); result = false; } else { _privateIpDao.deleteByNetworkId(networkId); - s_logger.debug("Deleted ip range for private network id=" + networkId); + logger.debug("Deleted ip range for private network id=" + networkId); } return result; } @@ -2447,13 +2445,13 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra Long time = _lastNetworkIdsToFree.remove(networkId); if (time == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("We found network " + networkId + " to be free for the first time. Adding it to the list: " + currentTime); + if (logger.isDebugEnabled()) { + logger.debug("We found network " + networkId + " to be free for the first time. Adding it to the list: " + currentTime); } stillFree.put(networkId, currentTime); } else if (time > (currentTime - NetworkGcWait.value())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Network " + networkId + " is still free but it's not time to shutdown yet: " + time); + if (logger.isDebugEnabled()) { + logger.debug("Network " + networkId + " is still free but it's not time to shutdown yet: " + time); } stillFree.put(networkId, time); } else { @@ -2469,7 +2467,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra // If network is removed, unset gc flag for it if (_networksDao.findById(networkId) == null) { - s_logger.debug("Network id=" + networkId + " is removed, so clearing up corresponding gc check"); + logger.debug("Network id=" + networkId + " is removed, so clearing up corresponding gc check"); _networksDao.clearCheckForGc(networkId); } else { try { @@ -2481,12 +2479,12 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra shutdownNetwork(networkId, context, false); } catch (Exception e) { - s_logger.warn("Unable to shutdown network: " + networkId); + logger.warn("Unable to shutdown network: " + networkId); } } } } catch (Exception e) { - s_logger.warn("Caught exception while running network gc: ", e); + logger.warn("Caught exception while running network gc: ", e); } } } @@ -2504,10 +2502,10 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } // implement the network - s_logger.debug("Starting network " + network + "..."); + logger.debug("Starting network " + network + "..."); Pair implementedNetwork = implementNetwork(networkId, dest, context); if (implementedNetwork== null || implementedNetwork.first() == null) { - s_logger.warn("Failed to start the network " + network); + logger.warn("Failed to start the network " + network); return false; } else { return true; @@ -2520,27 +2518,27 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra NetworkVO network = _networksDao.findById(networkId); - s_logger.debug("Restarting network " + networkId + "..."); + logger.debug("Restarting network " + networkId + "..."); ReservationContext context = new ReservationContextImpl(null, null, callerUser, callerAccount); if (cleanup) { // shutdown the network - s_logger.debug("Shutting down the network id=" + networkId + " as a part of network restart"); + logger.debug("Shutting down the network id=" + networkId + " as a part of network restart"); if (!shutdownNetworkElementsAndResources(context, true, network)) { - s_logger.debug("Failed to shutdown the network elements and resources as a part of network restart: " + network.getState()); + logger.debug("Failed to shutdown the network elements and resources as a part of network restart: " + network.getState()); setRestartRequired(network, true); return false; } } else { - s_logger.debug("Skip the shutting down of network id=" + networkId); + logger.debug("Skip the shutting down of network id=" + networkId); } // implement the network elements and rules again DeployDestination dest = new DeployDestination(_dcDao.findById(network.getDataCenterId()), null, null, null); - s_logger.debug("Implementing the network " + network + " elements and resources as a part of network restart"); + logger.debug("Implementing the network " + network + " elements and resources as a part of network restart"); NetworkOfferingVO offering = _networkOfferingDao.findById(network.getNetworkOfferingId()); try { @@ -2548,13 +2546,13 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra setRestartRequired(network, true); return true; } catch (Exception ex) { - s_logger.warn("Failed to implement network " + network + " elements and resources as a part of network restart due to ", ex); + logger.warn("Failed to implement network " + network + " elements and resources as a part of network restart due to ", ex); return false; } } private void setRestartRequired(NetworkVO network, boolean restartRequired) { - s_logger.debug("Marking network " + network + " with restartRequired=" + restartRequired); + logger.debug("Marking network " + network + " with restartRequired=" + restartRequired); network.setRestartRequired(restartRequired); _networksDao.update(network.getId(), network); } @@ -2578,7 +2576,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra String passwordProvider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.UserData); if (passwordProvider == null) { - s_logger.debug("Network " + network + " doesn't support service " + Service.UserData.getName()); + logger.debug("Network " + network + " doesn't support service " + Service.UserData.getName()); return null; } @@ -2590,7 +2588,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra String SSHKeyProvider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.UserData); if (SSHKeyProvider == null) { - s_logger.debug("Network " + network + " doesn't support service " + Service.UserData.getName()); + logger.debug("Network " + network + " doesn't support service " + Service.UserData.getName()); return null; } @@ -2602,7 +2600,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra String DhcpProvider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.Dhcp); if (DhcpProvider == null) { - s_logger.debug("Network " + network + " doesn't support service " + Service.Dhcp.getName()); + logger.debug("Network " + network + " doesn't support service " + Service.Dhcp.getName()); return null; } @@ -2677,51 +2675,51 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra //remove all PF/Static Nat rules for the network try { if (_rulesMgr.revokeAllPFStaticNatRulesForNetwork(networkId, callerUserId, caller)) { - s_logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id=" + networkId); + logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id=" + networkId); } else { success = false; - s_logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup"); } } catch (ResourceUnavailableException ex) { success = false; // shouldn't even come here as network is being cleaned up after all network elements are shutdown - s_logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); } //remove all LB rules for the network if (_lbMgr.removeAllLoadBalanacersForNetwork(networkId, caller, callerUserId)) { - s_logger.debug("Successfully cleaned up load balancing rules for network id=" + networkId); + logger.debug("Successfully cleaned up load balancing rules for network id=" + networkId); } else { // shouldn't even come here as network is being cleaned up after all network elements are shutdown success = false; - s_logger.warn("Failed to cleanup LB rules as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to cleanup LB rules as a part of network id=" + networkId + " cleanup"); } //revoke all firewall rules for the network try { if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, callerUserId, caller)) { - s_logger.debug("Successfully cleaned up firewallRules rules for network id=" + networkId); + logger.debug("Successfully cleaned up firewallRules rules for network id=" + networkId); } else { success = false; - s_logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup"); } } catch (ResourceUnavailableException ex) { success = false; // shouldn't even come here as network is being cleaned up after all network elements are shutdown - s_logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); + logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); } //revoke all network ACLs for network try { if (_networkACLMgr.revokeACLItemsForNetwork(networkId)) { - s_logger.debug("Successfully cleaned up NetworkACLs for network id=" + networkId); + logger.debug("Successfully cleaned up NetworkACLs for network id=" + networkId); } else { success = false; - s_logger.warn("Failed to cleanup NetworkACLs as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to cleanup NetworkACLs as a part of network id=" + networkId + " cleanup"); } } catch (ResourceUnavailableException ex) { success = false; - s_logger.warn("Failed to cleanup Network ACLs as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); + logger.warn("Failed to cleanup Network ACLs as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); } //release all ip addresses @@ -2736,7 +2734,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra // so as part of network clean up just break IP association with guest network ipToRelease.setAssociatedWithNetworkId(null); _ipAddressDao.update(ipToRelease.getId(), ipToRelease); - s_logger.debug("Portable IP address " + ipToRelease + " is no longer associated with any network"); + logger.debug("Portable IP address " + ipToRelease + " is no longer associated with any network"); } } else { _vpcMgr.unassignIPFromVpcNetwork(ipToRelease.getId(), network.getId()); @@ -2745,7 +2743,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra try { if (!_ipAddrMgr.applyIpAssociations(network, true)) { - s_logger.warn("Unable to apply ip address associations for " + network); + logger.warn("Unable to apply ip address associations for " + network); success = false; } } catch (ResourceUnavailableException e) { @@ -2762,34 +2760,34 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra // Mark all PF rules as revoked and apply them on the backend (not in the DB) List pfRules = _portForwardingRulesDao.listByNetwork(networkId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + pfRules.size() + " port forwarding rules for network id=" + networkId + " as a part of shutdownNetworkRules"); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + pfRules.size() + " port forwarding rules for network id=" + networkId + " as a part of shutdownNetworkRules"); } for (PortForwardingRuleVO pfRule : pfRules) { - s_logger.trace("Marking pf rule " + pfRule + " with Revoke state"); + logger.trace("Marking pf rule " + pfRule + " with Revoke state"); pfRule.setState(FirewallRule.State.Revoke); } try { if (!_firewallMgr.applyRules(pfRules, true, false)) { - s_logger.warn("Failed to cleanup pf rules as a part of shutdownNetworkRules"); + logger.warn("Failed to cleanup pf rules as a part of shutdownNetworkRules"); success = false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup pf rules as a part of shutdownNetworkRules due to ", ex); + logger.warn("Failed to cleanup pf rules as a part of shutdownNetworkRules due to ", ex); success = false; } // Mark all static rules as revoked and apply them on the backend (not in the DB) List firewallStaticNatRules = _firewallDao.listByNetworkAndPurpose(networkId, Purpose.StaticNat); List staticNatRules = new ArrayList(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + firewallStaticNatRules.size() + " static nat rules for network id=" + networkId + " as a part of shutdownNetworkRules"); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + firewallStaticNatRules.size() + " static nat rules for network id=" + networkId + " as a part of shutdownNetworkRules"); } for (FirewallRuleVO firewallStaticNatRule : firewallStaticNatRules) { - s_logger.trace("Marking static nat rule " + firewallStaticNatRule + " with Revoke state"); + logger.trace("Marking static nat rule " + firewallStaticNatRule + " with Revoke state"); IpAddress ip = _ipAddressDao.findById(firewallStaticNatRule.getSourceIpAddressId()); FirewallRuleVO ruleVO = _firewallDao.findById(firewallStaticNatRule.getId()); @@ -2804,58 +2802,58 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra try { if (!_firewallMgr.applyRules(staticNatRules, true, false)) { - s_logger.warn("Failed to cleanup static nat rules as a part of shutdownNetworkRules"); + logger.warn("Failed to cleanup static nat rules as a part of shutdownNetworkRules"); success = false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup static nat rules as a part of shutdownNetworkRules due to ", ex); + logger.warn("Failed to cleanup static nat rules as a part of shutdownNetworkRules due to ", ex); success = false; } try { if (!_lbMgr.revokeLoadBalancersForNetwork(networkId, Scheme.Public)) { - s_logger.warn("Failed to cleanup public lb rules as a part of shutdownNetworkRules"); + logger.warn("Failed to cleanup public lb rules as a part of shutdownNetworkRules"); success = false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup public lb rules as a part of shutdownNetworkRules due to ", ex); + logger.warn("Failed to cleanup public lb rules as a part of shutdownNetworkRules due to ", ex); success = false; } try { if (!_lbMgr.revokeLoadBalancersForNetwork(networkId, Scheme.Internal)) { - s_logger.warn("Failed to cleanup internal lb rules as a part of shutdownNetworkRules"); + logger.warn("Failed to cleanup internal lb rules as a part of shutdownNetworkRules"); success = false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup public lb rules as a part of shutdownNetworkRules due to ", ex); + logger.warn("Failed to cleanup public lb rules as a part of shutdownNetworkRules due to ", ex); success = false; } // revoke all firewall rules for the network w/o applying them on the DB List firewallRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + firewallRules.size() + " firewall ingress rules for network id=" + networkId + " as a part of shutdownNetworkRules"); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + firewallRules.size() + " firewall ingress rules for network id=" + networkId + " as a part of shutdownNetworkRules"); } for (FirewallRuleVO firewallRule : firewallRules) { - s_logger.trace("Marking firewall ingress rule " + firewallRule + " with Revoke state"); + logger.trace("Marking firewall ingress rule " + firewallRule + " with Revoke state"); firewallRule.setState(FirewallRule.State.Revoke); } try { if (!_firewallMgr.applyRules(firewallRules, true, false)) { - s_logger.warn("Failed to cleanup firewall ingress rules as a part of shutdownNetworkRules"); + logger.warn("Failed to cleanup firewall ingress rules as a part of shutdownNetworkRules"); success = false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup firewall ingress rules as a part of shutdownNetworkRules due to ", ex); + logger.warn("Failed to cleanup firewall ingress rules as a part of shutdownNetworkRules due to ", ex); success = false; } List firewallEgressRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Egress); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + firewallEgressRules.size() + " firewall egress rules for network id=" + networkId + " as a part of shutdownNetworkRules"); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + firewallEgressRules.size() + " firewall egress rules for network id=" + networkId + " as a part of shutdownNetworkRules"); } try { @@ -2868,38 +2866,38 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup firewall default egress rule as a part of shutdownNetworkRules due to ", ex); + logger.warn("Failed to cleanup firewall default egress rule as a part of shutdownNetworkRules due to ", ex); success = false; } for (FirewallRuleVO firewallRule : firewallEgressRules) { - s_logger.trace("Marking firewall egress rule " + firewallRule + " with Revoke state"); + logger.trace("Marking firewall egress rule " + firewallRule + " with Revoke state"); firewallRule.setState(FirewallRule.State.Revoke); } try { if (!_firewallMgr.applyRules(firewallEgressRules, true, false)) { - s_logger.warn("Failed to cleanup firewall egress rules as a part of shutdownNetworkRules"); + logger.warn("Failed to cleanup firewall egress rules as a part of shutdownNetworkRules"); success = false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup firewall egress rules as a part of shutdownNetworkRules due to ", ex); + logger.warn("Failed to cleanup firewall egress rules as a part of shutdownNetworkRules due to ", ex); success = false; } if (network.getVpcId() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing Network ACL Items for network id=" + networkId + " as a part of shutdownNetworkRules"); + if (logger.isDebugEnabled()) { + logger.debug("Releasing Network ACL Items for network id=" + networkId + " as a part of shutdownNetworkRules"); } try { //revoke all Network ACLs for the network w/o applying them in the DB if (!_networkACLMgr.revokeACLItemsForNetwork(networkId)) { - s_logger.warn("Failed to cleanup network ACLs as a part of shutdownNetworkRules"); + logger.warn("Failed to cleanup network ACLs as a part of shutdownNetworkRules"); success = false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup network ACLs as a part of shutdownNetworkRules due to ", ex); + logger.warn("Failed to cleanup network ACLs as a part of shutdownNetworkRules due to ", ex); success = false; } @@ -2907,7 +2905,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra //release all static nats for the network if (!_rulesMgr.applyStaticNatForNetwork(networkId, false, caller, true)) { - s_logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network id " + networkId); + logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network id " + networkId); success = false; } @@ -2924,7 +2922,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra try { if (!_ipAddrMgr.applyIpAssociations(network, true, true, publicIpsToRelease)) { - s_logger.warn("Unable to apply ip address associations for " + network + " as a part of shutdownNetworkRules"); + logger.warn("Unable to apply ip address associations for " + network + " as a part of shutdownNetworkRules"); success = false; } } catch (ResourceUnavailableException e) { @@ -2974,8 +2972,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra dcId = dc.getId(); HypervisorType hypervisorType = startup.getHypervisorType(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host's hypervisorType is: " + hypervisorType); + if (logger.isDebugEnabled()) { + logger.debug("Host's hypervisorType is: " + hypervisorType); } List networkInfoList = new ArrayList(); @@ -3003,20 +3001,20 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } // send the names to the agent - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending CheckNetworkCommand to check the Network is setup correctly on Agent"); + if (logger.isDebugEnabled()) { + logger.debug("Sending CheckNetworkCommand to check the Network is setup correctly on Agent"); } CheckNetworkCommand nwCmd = new CheckNetworkCommand(networkInfoList); CheckNetworkAnswer answer = (CheckNetworkAnswer)_agentMgr.easySend(hostId, nwCmd); if (answer == null) { - s_logger.warn("Unable to get an answer to the CheckNetworkCommand from agent:" + host.getId()); + logger.warn("Unable to get an answer to the CheckNetworkCommand from agent:" + host.getId()); throw new ConnectionException(true, "Unable to get an answer to the CheckNetworkCommand from agent: " + host.getId()); } if (!answer.getResult()) { - s_logger.warn("Unable to setup agent " + hostId + " due to " + answer.getDetails() ); + logger.warn("Unable to setup agent " + hostId + " due to " + answer.getDetails() ); String msg = "Incorrect Network setup on agent, Reinitialize agent after network names are setup, details : " + answer.getDetails(); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, host.getPodId(), msg, msg); throw new ConnectionException(true, msg); @@ -3024,8 +3022,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra if (answer.needReconnect()) { throw new ConnectionException(false, "Reinitialize agent after network setup."); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Network setup is correct on Agent"); + if (logger.isDebugEnabled()) { + logger.debug("Network setup is correct on Agent"); } return; } @@ -3158,18 +3156,18 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra VMNetworkMapVO vno = new VMNetworkMapVO(vm.getId(), network.getId()); _vmNetworkMapDao.persist(vno); } - s_logger.debug("Nic is allocated successfully for vm " + vm + " in network " + network); + logger.debug("Nic is allocated successfully for vm " + vm + " in network " + network); } //2) prepare nic if (prepare) { Pair implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter); if (implemented == null || implemented.first() == null) { - s_logger.warn("Failed to implement network id=" + nic.getNetworkId() + " as a part of preparing nic id=" + nic.getId()); + logger.warn("Failed to implement network id=" + nic.getNetworkId() + " as a part of preparing nic id=" + nic.getId()); throw new CloudRuntimeException("Failed to implement network id=" + nic.getNetworkId() + " as a part preparing nic id=" + nic.getId()); } nic = prepareNic(vmProfile, dest, context, nic.getId(), implemented.second()); - s_logger.debug("Nic is prepared successfully for vm " + vm + " in network " + network); + logger.debug("Nic is prepared successfully for vm " + vm + " in network " + network); } return nic; @@ -3232,18 +3230,18 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra List providers = getProvidersForServiceInNetwork(network, service); //Only support one provider now if (providers == null) { - s_logger.error("Cannot find " + service.getName() + " provider for network " + network.getId()); + logger.error("Cannot find " + service.getName() + " provider for network " + network.getId()); return null; } if (providers.size() != 1 && service != Service.Lb) { //support more than one LB providers only - s_logger.error("Found " + providers.size() + " " + service.getName() + " providers for network!" + network.getId()); + logger.error("Found " + providers.size() + " " + service.getName() + " providers for network!" + network.getId()); return null; } for (Provider provider : providers) { NetworkElement element = _networkModel.getElementImplementingProvider(provider.getName()); - s_logger.info("Let " + element.getName() + " handle " + service.getName() + " in network " + network.getId()); + logger.info("Let " + element.getName() + " handle " + service.getName() + " in network " + network.getId()); elements.add(element); } return elements; @@ -3304,7 +3302,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra for (NicSecondaryIpVO ip : ipList) { _nicSecondaryIpDao.remove(ip.getId()); } - s_logger.debug("Revoving nic secondary ip entry ..."); + logger.debug("Revoving nic secondary ip entry ..."); } } }); diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index d407bb1afff..f793efbf6aa 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -30,7 +30,6 @@ import java.util.concurrent.ExecutionException; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; @@ -131,7 +130,6 @@ import com.cloud.vm.VmWorkTakeVolumeSnapshot; import com.cloud.vm.dao.UserVmDao; public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrationService, Configurable { - private static final Logger s_logger = Logger.getLogger(VolumeOrchestrator.class); @Inject EntityManager _entityMgr; @@ -343,8 +341,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati if (pool == null) { //pool could not be found in the VM's pod/cluster. - if (s_logger.isDebugEnabled()) { - s_logger.debug("Could not find any storage pool to create Volume in the pod/cluster of the provided VM " + vm.getUuid()); + if (logger.isDebugEnabled()) { + logger.debug("Could not find any storage pool to create Volume in the pod/cluster of the provided VM " + vm.getUuid()); } StringBuilder addDetails = new StringBuilder(msg); addDetails.append(", Could not find any storage pool to create Volume in the pod/cluster of the VM "); @@ -361,8 +359,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati } if (pool != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found a suitable pool for create volume: " + pool.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Found a suitable pool for create volume: " + pool.getId()); } break; } @@ -370,7 +368,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati } if (pool == null) { - s_logger.info(msg); + logger.info(msg); throw new StorageUnavailableException(msg, -1); } @@ -389,7 +387,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati _snapshotSrv.syncVolumeSnapshotsToRegionStore(snapVolId, snapStore); } catch (Exception ex) { // log but ignore the sync error to avoid any potential S3 down issue, it should be sync next time - s_logger.warn(ex.getMessage(), ex); + logger.warn(ex.getMessage(), ex); } } @@ -398,15 +396,15 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati try { VolumeApiResult result = future.get(); if (result.isFailed()) { - s_logger.debug("Failed to create volume from snapshot:" + result.getResult()); + logger.debug("Failed to create volume from snapshot:" + result.getResult()); throw new CloudRuntimeException("Failed to create volume from snapshot:" + result.getResult()); } return result.getVolume(); } catch (InterruptedException e) { - s_logger.debug("Failed to create volume from snapshot", e); + logger.debug("Failed to create volume from snapshot", e); throw new CloudRuntimeException("Failed to create volume from snapshot", e); } catch (ExecutionException e) { - s_logger.debug("Failed to create volume from snapshot", e); + logger.debug("Failed to create volume from snapshot", e); throw new CloudRuntimeException("Failed to create volume from snapshot", e); } @@ -466,15 +464,15 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati try { VolumeApiResult result = future.get(); if (result.isFailed()) { - s_logger.debug("copy volume failed: " + result.getResult()); + logger.debug("copy volume failed: " + result.getResult()); throw new CloudRuntimeException("copy volume failed: " + result.getResult()); } return result.getVolume(); } catch (InterruptedException e) { - s_logger.debug("Failed to copy volume: " + volume.getId(), e); + logger.debug("Failed to copy volume: " + volume.getId(), e); throw new CloudRuntimeException("Failed to copy volume", e); } catch (ExecutionException e) { - s_logger.debug("Failed to copy volume: " + volume.getId(), e); + logger.debug("Failed to copy volume: " + volume.getId(), e); throw new CloudRuntimeException("Failed to copy volume", e); } } @@ -504,12 +502,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati pool = findStoragePool(dskCh, dc, pod, clusterId, vm.getHostId(), vm, avoidPools); if (pool == null) { - s_logger.warn("Unable to find suitable primary storage when creating volume " + volume.getName()); + logger.warn("Unable to find suitable primary storage when creating volume " + volume.getName()); throw new CloudRuntimeException("Unable to find suitable primary storage when creating volume " + volume.getName()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Trying to create " + volume + " on " + pool); + if (logger.isDebugEnabled()) { + logger.debug("Trying to create " + volume + " on " + pool); } DataStore store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); for (int i = 0; i < 2; i++) { @@ -526,20 +524,20 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati VolumeApiResult result = future.get(); if (result.isFailed()) { if (result.getResult().contains("request template reload") && (i == 0)) { - s_logger.debug("Retry template re-deploy for vmware"); + logger.debug("Retry template re-deploy for vmware"); continue; } else { - s_logger.debug("create volume failed: " + result.getResult()); + logger.debug("create volume failed: " + result.getResult()); throw new CloudRuntimeException("create volume failed:" + result.getResult()); } } return result.getVolume(); } catch (InterruptedException e) { - s_logger.error("create volume failed", e); + logger.error("create volume failed", e); throw new CloudRuntimeException("create volume failed", e); } catch (ExecutionException e) { - s_logger.error("create volume failed", e); + logger.error("create volume failed", e); throw new CloudRuntimeException("create volume failed", e); } } @@ -674,10 +672,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati if (rootDisksize != null ) { rootDisksize = rootDisksize * 1024 * 1024 * 1024; if (rootDisksize > size) { - s_logger.debug("Using root disk size of " + rootDisksize + " Bytes for volume " + name); + logger.debug("Using root disk size of " + rootDisksize + " Bytes for volume " + name); size = rootDisksize; } else { - s_logger.debug("Using root disk size of " + size + " Bytes for volume " + name + "since specified root disk size of " + rootDisksize + " Bytes is smaller than template"); + logger.debug("Using root disk size of " + size + " Bytes for volume " + name + "since specified root disk size of " + rootDisksize + " Bytes is smaller than template"); } } @@ -818,8 +816,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati Long volTemplateId = existingVolume.getTemplateId(); long vmTemplateId = vm.getTemplateId(); if (volTemplateId != null && volTemplateId.longValue() != vmTemplateId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("switchVolume: Old Volume's templateId: " + volTemplateId + " does not match the VM's templateId: " + vmTemplateId + if (logger.isDebugEnabled()) { + logger.debug("switchVolume: Old Volume's templateId: " + volTemplateId + " does not match the VM's templateId: " + vmTemplateId + ", updating templateId in the new Volume"); } templateIdToUse = vmTemplateId; @@ -833,16 +831,16 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati try { stateTransitTo(existingVolume, Volume.Event.DestroyRequested); } catch (NoTransitionException e) { - s_logger.debug("Unable to destroy existing volume: " + e.toString()); + logger.debug("Unable to destroy existing volume: " + e.toString()); } // In case of VMware VM will continue to use the old root disk until expunged, so force expunge old root disk if (vm.getHypervisorType() == HypervisorType.VMware) { - s_logger.info("Expunging volume " + existingVolume.getId() + " from primary data store"); + logger.info("Expunging volume " + existingVolume.getId() + " from primary data store"); AsyncCallFuture future = volService.expungeVolumeAsync(volFactory.getVolume(existingVolume.getId())); try { future.get(); } catch (Exception e) { - s_logger.debug("Failed to expunge volume:" + existingVolume.getId(), e); + logger.debug("Failed to expunge volume:" + existingVolume.getId(), e); } } @@ -859,8 +857,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati @Override @DB public void cleanupVolumes(long vmId) throws ConcurrentOperationException { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cleaning storage for vm: " + vmId); + if (logger.isDebugEnabled()) { + logger.debug("Cleaning storage for vm: " + vmId); } final List volumesForVm = _volsDao.findByInstance(vmId); final List toBeExpunged = new ArrayList(); @@ -875,12 +873,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati if (!volumeAlreadyDestroyed) { volService.destroyVolume(vol.getId()); } else { - s_logger.debug("Skipping destroy for the volume " + vol + " as its in state " + vol.getState().toString()); + logger.debug("Skipping destroy for the volume " + vol + " as its in state " + vol.getState().toString()); } toBeExpunged.add(vol); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Detaching " + vol); + if (logger.isDebugEnabled()) { + logger.debug("Detaching " + vol); } _volsDao.detachVolume(vol.getId()); } @@ -894,9 +892,9 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati try { future.get(); } catch (InterruptedException e) { - s_logger.debug("failed expunge volume" + expunge.getId(), e); + logger.debug("failed expunge volume" + expunge.getId(), e); } catch (ExecutionException e) { - s_logger.debug("failed expunge volume" + expunge.getId(), e); + logger.debug("failed expunge volume" + expunge.getId(), e); } } } @@ -938,7 +936,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati try { VolumeApiResult result = future.get(); if (result.isFailed()) { - s_logger.error("Migrate volume failed:" + result.getResult()); + logger.error("Migrate volume failed:" + result.getResult()); throw new StorageUnavailableException("Migrate volume failed: " + result.getResult(), destPool.getId()); } else { // update the volumeId for snapshots on secondary @@ -949,10 +947,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati } return result.getVolume(); } catch (InterruptedException e) { - s_logger.debug("migrate volume failed", e); + logger.debug("migrate volume failed", e); throw new CloudRuntimeException(e.getMessage()); } catch (ExecutionException e) { - s_logger.debug("migrate volume failed", e); + logger.debug("migrate volume failed", e); throw new CloudRuntimeException(e.getMessage()); } } @@ -964,15 +962,15 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati try { VolumeApiResult result = future.get(); if (result.isFailed()) { - s_logger.debug("migrate volume failed:" + result.getResult()); + logger.debug("migrate volume failed:" + result.getResult()); return null; } return result.getVolume(); } catch (InterruptedException e) { - s_logger.debug("migrate volume failed", e); + logger.debug("migrate volume failed", e); return null; } catch (ExecutionException e) { - s_logger.debug("migrate volume failed", e); + logger.debug("migrate volume failed", e); return null; } } @@ -1003,13 +1001,13 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati try { CommandResult result = future.get(); if (result.isFailed()) { - s_logger.debug("Failed to migrated vm " + vm + " along with its volumes. " + result.getResult()); + logger.debug("Failed to migrated vm " + vm + " along with its volumes. " + result.getResult()); throw new CloudRuntimeException("Failed to migrated vm " + vm + " along with its volumes. "); } } catch (InterruptedException e) { - s_logger.debug("Failed to migrated vm " + vm + " along with its volumes.", e); + logger.debug("Failed to migrated vm " + vm + " along with its volumes.", e); } catch (ExecutionException e) { - s_logger.debug("Failed to migrated vm " + vm + " along with its volumes.", e); + logger.debug("Failed to migrated vm " + vm + " along with its volumes.", e); } } @@ -1020,12 +1018,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati for (VolumeVO volume : vols) { if (volume.getState() != Volume.State.Ready) { - s_logger.debug("volume: " + volume.getId() + " is in " + volume.getState() + " state"); + logger.debug("volume: " + volume.getId() + " is in " + volume.getState() + " state"); throw new CloudRuntimeException("volume: " + volume.getId() + " is in " + volume.getState() + " state"); } if (volume.getPoolId() == destPool.getId()) { - s_logger.debug("volume: " + volume.getId() + " is on the same storage pool: " + destPool.getId()); + logger.debug("volume: " + volume.getId() + " is on the same storage pool: " + destPool.getId()); continue; } @@ -1033,7 +1031,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati } if (volumesNeedToMigrate.isEmpty()) { - s_logger.debug("No volume need to be migrated"); + logger.debug("No volume need to be migrated"); return true; } @@ -1049,8 +1047,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati @Override public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest) { List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Preparing " + vols.size() + " volumes for " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Preparing " + vols.size() + " volumes for " + vm); } for (VolumeVO vol : vols) { @@ -1138,21 +1136,21 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati tasks.add(task); } else { if (vol.isRecreatable()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Volume " + vol + " will be recreated on storage pool " + assignedPool + " assigned by deploymentPlanner"); + if (logger.isDebugEnabled()) { + logger.debug("Volume " + vol + " will be recreated on storage pool " + assignedPool + " assigned by deploymentPlanner"); } VolumeTask task = new VolumeTask(VolumeTaskType.RECREATE, vol, null); tasks.add(task); } else { if (assignedPool.getId() != vol.getPoolId()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Mismatch in storage pool " + assignedPool + " assigned by deploymentPlanner and the one associated with volume " + vol); + if (logger.isDebugEnabled()) { + logger.debug("Mismatch in storage pool " + assignedPool + " assigned by deploymentPlanner and the one associated with volume " + vol); } DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, vol.getDiskOfferingId()); if (diskOffering.getUseLocalStorage()) { // Currently migration of local volume is not supported so bail out - if (s_logger.isDebugEnabled()) { - s_logger.debug("Local volume " + vol + " cannot be recreated on storagepool " + assignedPool + " assigned by deploymentPlanner"); + if (logger.isDebugEnabled()) { + logger.debug("Local volume " + vol + " cannot be recreated on storagepool " + assignedPool + " assigned by deploymentPlanner"); } throw new CloudRuntimeException("Local volume " + vol + " cannot be recreated on storagepool " + assignedPool + " assigned by deploymentPlanner"); } else { @@ -1165,8 +1163,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati storageMigrationEnabled = StorageMigrationEnabled.value(); } if(storageMigrationEnabled){ - if (s_logger.isDebugEnabled()) { - s_logger.debug("Shared volume " + vol + " will be migrated on storage pool " + assignedPool + " assigned by deploymentPlanner"); + if (logger.isDebugEnabled()) { + logger.debug("Shared volume " + vol + " will be migrated on storage pool " + assignedPool + " assigned by deploymentPlanner"); } VolumeTask task = new VolumeTask(VolumeTaskType.MIGRATE, vol, assignedPool); tasks.add(task); @@ -1187,8 +1185,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati throw new StorageUnavailableException("Volume has no pool associate and also no storage pool assigned in DeployDestination, Unable to create " + vol, Volume.class, vol.getId()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("No need to recreate the volume: " + vol + ", since it already has a pool assigned: " + vol.getPoolId() + ", adding disk to VM"); + if (logger.isDebugEnabled()) { + logger.debug("No need to recreate the volume: " + vol + ", since it already has a pool assigned: " + vol.getPoolId() + ", adding disk to VM"); } StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId()); VolumeTask task = new VolumeTask(VolumeTaskType.NOP, vol, pool); @@ -1205,7 +1203,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati DataStore destPool = null; if (recreate && (dest.getStorageForDisks() == null || dest.getStorageForDisks().get(vol) == null)) { destPool = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary); - s_logger.debug("existing pool: " + destPool.getId()); + logger.debug("existing pool: " + destPool.getId()); } else { StoragePool pool = dest.getStorageForDisks().get(vol); destPool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); @@ -1222,8 +1220,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati dest.getStorageForDisks().put(newVol, poolWithOldVol); dest.getStorageForDisks().remove(vol); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Created new volume " + newVol + " for old volume " + vol); + if (logger.isDebugEnabled()) { + logger.debug("Created new volume " + newVol + " for old volume " + vol); } } VolumeInfo volume = volFactory.getVolume(newVol.getId(), destPool); @@ -1245,7 +1243,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati TemplateInfo templ = tmplFactory.getReadyTemplateOnImageStore(templateId, dest.getDataCenter().getId()); if (templ == null) { - s_logger.debug("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId()); + logger.debug("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId()); throw new CloudRuntimeException("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId()); } @@ -1271,10 +1269,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati result = future.get(); if (result.isFailed()) { if (result.getResult().contains("request template reload") && (i == 0)) { - s_logger.debug("Retry template re-deploy for vmware"); + logger.debug("Retry template re-deploy for vmware"); continue; } else { - s_logger.debug("Unable to create " + newVol + ":" + result.getResult()); + logger.debug("Unable to create " + newVol + ":" + result.getResult()); throw new StorageUnavailableException("Unable to create " + newVol + ":" + result.getResult(), destPool.getId()); } } @@ -1291,10 +1289,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati newVol = _volsDao.findById(newVol.getId()); break; //break out of template-redeploy retry loop } catch (InterruptedException e) { - s_logger.error("Unable to create " + newVol, e); + logger.error("Unable to create " + newVol, e); throw new StorageUnavailableException("Unable to create " + newVol + ":" + e.toString(), destPool.getId()); } catch (ExecutionException e) { - s_logger.error("Unable to create " + newVol, e); + logger.error("Unable to create " + newVol, e); throw new StorageUnavailableException("Unable to create " + newVol + ":" + e.toString(), destPool.getId()); } } @@ -1306,8 +1304,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException { if (dest == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("DeployDestination cannot be null, cannot prepare Volumes for the vm: " + vm); + if (logger.isDebugEnabled()) { + logger.debug("DeployDestination cannot be null, cannot prepare Volumes for the vm: " + vm); } throw new CloudRuntimeException("Unable to prepare Volume for vm because DeployDestination is null, vm:" + vm); } @@ -1318,8 +1316,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati } List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking if we need to prepare " + vols.size() + " volumes for " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Checking if we need to prepare " + vols.size() + " volumes for " + vm); } List tasks = getTasks(vols, dest.getStorageForDisks(), vm); @@ -1398,7 +1396,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati } if (volume.getState().equals(Volume.State.Creating)) { - s_logger.debug("Remove volume: " + volume.getId() + ", as it's leftover from last mgt server stop"); + logger.debug("Remove volume: " + volume.getId() + ", as it's leftover from last mgt server stop"); _volsDao.remove(volume.getId()); } } @@ -1413,11 +1411,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati if (volume.getState() == Volume.State.Migrating) { VolumeVO duplicateVol = _volsDao.findByPoolIdName(destPoolId, volume.getName()); if (duplicateVol != null) { - s_logger.debug("Remove volume " + duplicateVol.getId() + " on storage pool " + destPoolId); + logger.debug("Remove volume " + duplicateVol.getId() + " on storage pool " + destPoolId); _volsDao.remove(duplicateVol.getId()); } - s_logger.debug("change volume state to ready from migrating in case migration failure for vol: " + volumeId); + logger.debug("change volume state to ready from migrating in case migration failure for vol: " + volumeId); volume.setState(Volume.State.Ready); _volsDao.update(volumeId, volume); } @@ -1428,7 +1426,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati _snapshotSrv.cleanupVolumeDuringSnapshotFailure(volumeId, snapshotId); VolumeVO volume = _volsDao.findById(volumeId); if (volume.getState() == Volume.State.Snapshotting) { - s_logger.debug("change volume state back to Ready: " + volume.getId()); + logger.debug("change volume state back to Ready: " + volume.getId()); volume.setState(Volume.State.Ready); _volsDao.update(volume.getId(), volume); } @@ -1453,7 +1451,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati cleanupVolumeDuringSnapshotFailure(work.getVolumeId(), work.getSnapshotId()); } } catch (Exception e) { - s_logger.debug("clean up job failure, will continue", e); + logger.debug("clean up job failure, will continue", e); } } } @@ -1486,7 +1484,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati //FIXME - why recalculate and not decrement _resourceLimitMgr.recalculateResourceCount(volume.getAccountId(), volume.getDomainId(), ResourceType.primary_storage.getOrdinal()); } catch (Exception e) { - s_logger.debug("Failed to destroy volume" + volume.getId(), e); + logger.debug("Failed to destroy volume" + volume.getId(), e); throw new CloudRuntimeException("Failed to destroy volume" + volume.getId(), e); } } @@ -1517,7 +1515,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati needUpdate = true; if (needUpdate) { - s_logger.info("Update volume disk chain info. vol: " + vol.getId() + ", " + vol.getPath() + " -> " + path + ", " + vol.getChainInfo() + " -> " + chainInfo); + logger.info("Update volume disk chain info. vol: " + vol.getId() + ", " + vol.getPath() + " -> " + path + ", " + vol.getChainInfo() + " -> " + chainInfo); vol.setPath(path); vol.setChainInfo(chainInfo); _volsDao.update(volumeId, vol); diff --git a/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java b/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java index 2bd6bcc4863..946e08c3fe8 100644 --- a/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java +++ b/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java @@ -28,7 +28,6 @@ import java.util.Map.Entry; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -52,7 +51,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value = {CapacityDao.class}) public class CapacityDaoImpl extends GenericDaoBase implements CapacityDao { - private static final Logger s_logger = Logger.getLogger(CapacityDaoImpl.class); private static final String ADD_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity + ? WHERE host_id = ? AND capacity_type = ?"; private static final String SUBTRACT_ALLOCATED_SQL = @@ -523,7 +521,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.warn("Exception updating capacity for host: " + hostId, e); + logger.warn("Exception updating capacity for host: " + hostId, e); } } @@ -988,7 +986,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements } pstmt.executeUpdate(); } catch (Exception e) { - s_logger.warn("Error updating CapacityVO", e); + logger.warn("Error updating CapacityVO", e); } } @@ -1008,7 +1006,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements return rs.getFloat(1); } } catch (Exception e) { - s_logger.warn("Error checking cluster threshold", e); + logger.warn("Error checking cluster threshold", e); } return 0; } diff --git a/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java b/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java index 978fee04401..d6d944deb6d 100644 --- a/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java +++ b/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java @@ -18,7 +18,6 @@ package com.cloud.certificate.dao; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.certificate.CertificateVO; @@ -30,7 +29,6 @@ import com.cloud.utils.db.GenericDaoBase; @DB public class CertificateDaoImpl extends GenericDaoBase implements CertificateDao { - private static final Logger s_logger = Logger.getLogger(CertificateDaoImpl.class); public CertificateDaoImpl() { @@ -44,7 +42,7 @@ public class CertificateDaoImpl extends GenericDaoBase impl update(cert.getId(), cert); return cert.getId(); } catch (Exception e) { - s_logger.warn("Unable to read the certificate: " + e); + logger.warn("Unable to read the certificate: " + e); return new Long(0); } } diff --git a/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java b/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java index 483ea45172a..493ee48a947 100644 --- a/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java +++ b/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java @@ -22,7 +22,6 @@ import java.util.List; import javax.annotation.PostConstruct; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.cluster.agentlb.HostTransferMapVO; @@ -36,7 +35,6 @@ import com.cloud.utils.db.SearchCriteria; @Local(value = {HostTransferMapDao.class}) @DB public class HostTransferMapDaoImpl extends GenericDaoBase implements HostTransferMapDao { - private static final Logger s_logger = Logger.getLogger(HostTransferMapDaoImpl.class); protected SearchBuilder AllFieldsSearch; protected SearchBuilder IntermediateStateSearch; diff --git a/engine/schema/src/com/cloud/dc/dao/DataCenterDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/DataCenterDaoImpl.java index 373446e780a..5c216e78ed1 100644 --- a/engine/schema/src/com/cloud/dc/dao/DataCenterDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/DataCenterDaoImpl.java @@ -26,7 +26,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import javax.persistence.TableGenerator; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenterDetailVO; @@ -58,7 +57,6 @@ import com.cloud.utils.net.NetUtils; @Component @Local(value = {DataCenterDao.class}) public class DataCenterDaoImpl extends GenericDaoBase implements DataCenterDao { - private static final Logger s_logger = Logger.getLogger(DataCenterDaoImpl.class); protected SearchBuilder NameSearch; protected SearchBuilder ListZonesByDomainIdSearch; @@ -412,7 +410,7 @@ public class DataCenterDaoImpl extends GenericDaoBase implem Long dcId = Long.parseLong(tokenOrIdOrName); return findById(dcId); } catch (NumberFormatException nfe) { - s_logger.debug("Cannot parse " + tokenOrIdOrName + " into long. " + nfe); + logger.debug("Cannot parse " + tokenOrIdOrName + " into long. " + nfe); } } } diff --git a/engine/schema/src/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java index ca79eedd529..21501c58db9 100644 --- a/engine/schema/src/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java @@ -23,7 +23,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenterIpAddressVO; @@ -41,7 +40,6 @@ import com.cloud.utils.net.NetUtils; @Local(value = {DataCenterIpAddressDao.class}) @DB public class DataCenterIpAddressDaoImpl extends GenericDaoBase implements DataCenterIpAddressDao { - private static final Logger s_logger = Logger.getLogger(DataCenterIpAddressDaoImpl.class); private final SearchBuilder AllFieldsSearch; private final GenericSearchBuilder AllIpCount; @@ -144,8 +142,8 @@ public class DataCenterIpAddressDaoImpl extends GenericDaoBase sc = AllFieldsSearch.create(); sc.setParameters("ip", ipAddress); @@ -162,8 +160,8 @@ public class DataCenterIpAddressDaoImpl extends GenericDaoBase implements DataCenterLinkLocalIpAddressDao { - private static final Logger s_logger = Logger.getLogger(DataCenterLinkLocalIpAddressDaoImpl.class); private final SearchBuilder AllFieldsSearch; private final GenericSearchBuilder AllIpCount; @@ -107,8 +105,8 @@ public class DataCenterLinkLocalIpAddressDaoImpl extends GenericDaoBase sc = AllFieldsSearch.create(); sc.setParameters("ip", ipAddress); diff --git a/engine/schema/src/com/cloud/dc/dao/HostPodDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/HostPodDaoImpl.java index 1137eb80ca6..0dd943c7ab3 100644 --- a/engine/schema/src/com/cloud/dc/dao/HostPodDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/HostPodDaoImpl.java @@ -25,7 +25,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.HostPodVO; @@ -40,7 +39,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {HostPodDao.class}) public class HostPodDaoImpl extends GenericDaoBase implements HostPodDao { - private static final Logger s_logger = Logger.getLogger(HostPodDaoImpl.class); protected SearchBuilder DataCenterAndNameSearch; protected SearchBuilder DataCenterIdSearch; @@ -102,7 +100,7 @@ public class HostPodDaoImpl extends GenericDaoBase implements H currentPodCidrSubnets.put(podId, cidrPair); } } catch (SQLException ex) { - s_logger.warn("DB exception " + ex.getMessage(), ex); + logger.warn("DB exception " + ex.getMessage(), ex); return null; } diff --git a/engine/schema/src/com/cloud/domain/dao/DomainDaoImpl.java b/engine/schema/src/com/cloud/domain/dao/DomainDaoImpl.java index 223172e0d67..ca2a0b560dc 100644 --- a/engine/schema/src/com/cloud/domain/dao/DomainDaoImpl.java +++ b/engine/schema/src/com/cloud/domain/dao/DomainDaoImpl.java @@ -25,7 +25,6 @@ import java.util.Set; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.domain.Domain; @@ -40,7 +39,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {DomainDao.class}) public class DomainDaoImpl extends GenericDaoBase implements DomainDao { - private static final Logger s_logger = Logger.getLogger(DomainDaoImpl.class); protected SearchBuilder DomainNameLikeSearch; protected SearchBuilder ParentDomainNameLikeSearch; @@ -112,7 +110,7 @@ public class DomainDaoImpl extends GenericDaoBase implements Dom DomainVO parentDomain = findById(parent); if (parentDomain == null) { - s_logger.error("Unable to load parent domain: " + parent); + logger.error("Unable to load parent domain: " + parent); return null; } @@ -122,7 +120,7 @@ public class DomainDaoImpl extends GenericDaoBase implements Dom parentDomain = this.lockRow(parent, true); if (parentDomain == null) { - s_logger.error("Unable to lock parent domain: " + parent); + logger.error("Unable to lock parent domain: " + parent); return null; } @@ -137,7 +135,7 @@ public class DomainDaoImpl extends GenericDaoBase implements Dom txn.commit(); return domain; } catch (Exception e) { - s_logger.error("Unable to create domain due to " + e.getMessage(), e); + logger.error("Unable to create domain due to " + e.getMessage(), e); txn.rollback(); return null; } @@ -148,23 +146,23 @@ public class DomainDaoImpl extends GenericDaoBase implements Dom public boolean remove(Long id) { // check for any active users / domains assigned to the given domain id and don't remove the domain if there are any if (id != null && id.longValue() == Domain.ROOT_DOMAIN) { - s_logger.error("Can not remove domain " + id + " as it is ROOT domain"); + logger.error("Can not remove domain " + id + " as it is ROOT domain"); return false; } else { if(id == null) { - s_logger.error("Can not remove domain without id."); + logger.error("Can not remove domain without id."); return false; } } DomainVO domain = findById(id); if (domain == null) { - s_logger.info("Unable to remove domain as domain " + id + " no longer exists"); + logger.info("Unable to remove domain as domain " + id + " no longer exists"); return true; } if (domain.getParent() == null) { - s_logger.error("Invalid domain " + id + ", orphan?"); + logger.error("Invalid domain " + id + ", orphan?"); return false; } @@ -177,7 +175,7 @@ public class DomainDaoImpl extends GenericDaoBase implements Dom txn.start(); DomainVO parentDomain = super.lockRow(domain.getParent(), true); if (parentDomain == null) { - s_logger.error("Unable to load parent domain: " + domain.getParent()); + logger.error("Unable to load parent domain: " + domain.getParent()); return false; } @@ -198,7 +196,7 @@ public class DomainDaoImpl extends GenericDaoBase implements Dom txn.commit(); } catch (SQLException ex) { success = false; - s_logger.error("error removing domain: " + id, ex); + logger.error("error removing domain: " + id, ex); txn.rollback(); } return success; diff --git a/engine/schema/src/com/cloud/event/dao/EventDaoImpl.java b/engine/schema/src/com/cloud/event/dao/EventDaoImpl.java index be589e7649b..3ad5fe07d13 100644 --- a/engine/schema/src/com/cloud/event/dao/EventDaoImpl.java +++ b/engine/schema/src/com/cloud/event/dao/EventDaoImpl.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.event.Event.State; @@ -36,7 +35,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {EventDao.class}) public class EventDaoImpl extends GenericDaoBase implements EventDao { - public static final Logger s_logger = Logger.getLogger(EventDaoImpl.class.getName()); protected final SearchBuilder CompletedEventSearch; protected final SearchBuilder ToArchiveOrDeleteEventSearch; diff --git a/engine/schema/src/com/cloud/event/dao/UsageEventDaoImpl.java b/engine/schema/src/com/cloud/event/dao/UsageEventDaoImpl.java index 1739254bd8b..8134ea8af05 100644 --- a/engine/schema/src/com/cloud/event/dao/UsageEventDaoImpl.java +++ b/engine/schema/src/com/cloud/event/dao/UsageEventDaoImpl.java @@ -26,7 +26,6 @@ import java.util.TimeZone; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.Vlan; @@ -44,7 +43,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value = {UsageEventDao.class}) public class UsageEventDaoImpl extends GenericDaoBase implements UsageEventDao { - public static final Logger s_logger = Logger.getLogger(UsageEventDaoImpl.class.getName()); private final SearchBuilder latestEventsSearch; private final SearchBuilder IpeventsSearch; @@ -103,8 +101,8 @@ public class UsageEventDaoImpl extends GenericDaoBase implem // Copy events from cloud db to usage db String sql = COPY_EVENTS; if (recentEventId == 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("no recent event date, copying all events"); + if (logger.isDebugEnabled()) { + logger.debug("no recent event date, copying all events"); } sql = COPY_ALL_EVENTS; } @@ -122,7 +120,7 @@ public class UsageEventDaoImpl extends GenericDaoBase implem txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error copying events from cloud db to usage db", ex); + logger.error("error copying events from cloud db to usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } finally { txn.close(); @@ -131,8 +129,8 @@ public class UsageEventDaoImpl extends GenericDaoBase implem // Copy event details from cloud db to usage db sql = COPY_EVENT_DETAILS; if (recentEventId == 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("no recent event date, copying all event detailss"); + if (logger.isDebugEnabled()) { + logger.debug("no recent event date, copying all event detailss"); } sql = COPY_ALL_EVENT_DETAILS; } @@ -150,7 +148,7 @@ public class UsageEventDaoImpl extends GenericDaoBase implem txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error copying event details from cloud db to usage db", ex); + logger.error("error copying event details from cloud db to usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } finally { txn.close(); @@ -173,7 +171,7 @@ public class UsageEventDaoImpl extends GenericDaoBase implem } return 0; } catch (Exception ex) { - s_logger.error("error getting most recent event id", ex); + logger.error("error getting most recent event id", ex); throw new CloudRuntimeException(ex.getMessage()); } finally { txn.close(); @@ -185,7 +183,7 @@ public class UsageEventDaoImpl extends GenericDaoBase implem try { return listLatestEvents(endDate); } catch (Exception ex) { - s_logger.error("error getting most recent event date", ex); + logger.error("error getting most recent event date", ex); throw new CloudRuntimeException(ex.getMessage()); } finally { txn.close(); @@ -205,7 +203,7 @@ public class UsageEventDaoImpl extends GenericDaoBase implem } return 0; } catch (Exception ex) { - s_logger.error("error getting max event id", ex); + logger.error("error getting max event id", ex); throw new CloudRuntimeException(ex.getMessage()); } finally { txn.close(); diff --git a/engine/schema/src/com/cloud/event/dao/UsageEventDetailsDaoImpl.java b/engine/schema/src/com/cloud/event/dao/UsageEventDetailsDaoImpl.java index 35d77c12b08..583ba79cd16 100644 --- a/engine/schema/src/com/cloud/event/dao/UsageEventDetailsDaoImpl.java +++ b/engine/schema/src/com/cloud/event/dao/UsageEventDetailsDaoImpl.java @@ -21,7 +21,6 @@ import java.util.Map; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.event.UsageEventDetailsVO; @@ -33,7 +32,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {UsageEventDetailsDao.class}) public class UsageEventDetailsDaoImpl extends GenericDaoBase implements UsageEventDetailsDao { - public static final Logger s_logger = Logger.getLogger(UsageEventDetailsDaoImpl.class.getName()); protected final SearchBuilder EventDetailsSearch; protected final SearchBuilder DetailSearch; diff --git a/engine/schema/src/com/cloud/gpu/dao/HostGpuGroupsDaoImpl.java b/engine/schema/src/com/cloud/gpu/dao/HostGpuGroupsDaoImpl.java index 6bddea262ed..669a18b7a75 100644 --- a/engine/schema/src/com/cloud/gpu/dao/HostGpuGroupsDaoImpl.java +++ b/engine/schema/src/com/cloud/gpu/dao/HostGpuGroupsDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.gpu.HostGpuGroupsVO; @@ -32,7 +31,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = HostGpuGroupsDao.class) public class HostGpuGroupsDaoImpl extends GenericDaoBase implements HostGpuGroupsDao { - private static final Logger s_logger = Logger.getLogger(HostGpuGroupsDaoImpl.class); private final SearchBuilder _hostIdGroupNameSearch; private final SearchBuilder _searchByHostId; diff --git a/engine/schema/src/com/cloud/gpu/dao/VGPUTypesDaoImpl.java b/engine/schema/src/com/cloud/gpu/dao/VGPUTypesDaoImpl.java index 96e3a6277f8..6fb774ccf1b 100644 --- a/engine/schema/src/com/cloud/gpu/dao/VGPUTypesDaoImpl.java +++ b/engine/schema/src/com/cloud/gpu/dao/VGPUTypesDaoImpl.java @@ -28,7 +28,6 @@ import java.util.Map.Entry; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.api.VgpuTypesInfo; @@ -43,7 +42,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value = VGPUTypesDao.class) public class VGPUTypesDaoImpl extends GenericDaoBase implements VGPUTypesDao { - private static final Logger s_logger = Logger.getLogger(VGPUTypesDaoImpl.class); private final SearchBuilder _searchByGroupId; private final SearchBuilder _searchByGroupIdVGPUType; diff --git a/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java index 8342f1fcf77..578a19d7dfb 100644 --- a/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java +++ b/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java @@ -31,7 +31,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.persistence.TableGenerator; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.api.VgpuTypesInfo; @@ -71,9 +70,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @DB @TableGenerator(name = "host_req_sq", table = "op_host", pkColumnName = "id", valueColumnName = "sequence", allocationSize = 1) public class HostDaoImpl extends GenericDaoBase implements HostDao { //FIXME: , ExternalIdDao { - private static final Logger s_logger = Logger.getLogger(HostDaoImpl.class); - private static final Logger status_logger = Logger.getLogger(Status.class); - private static final Logger state_logger = Logger.getLogger(ResourceState.class); protected SearchBuilder TypePodDcStatusSearch; @@ -289,7 +285,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao try { HostTransferSearch = _hostTransferDao.createSearchBuilder(); } catch (Throwable e) { - s_logger.debug("error", e); + logger.debug("error", e); } HostTransferSearch.and("id", HostTransferSearch.entity().getId(), SearchCriteria.Op.NULL); UnmanagedDirectConnectSearch.join("hostTransferSearch", HostTransferSearch, HostTransferSearch.entity().getId(), UnmanagedDirectConnectSearch.entity().getId(), @@ -445,8 +441,8 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao sb.append(" "); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Following hosts got reset: " + sb.toString()); + if (logger.isTraceEnabled()) { + logger.trace("Following hosts got reset: " + sb.toString()); } } @@ -505,19 +501,19 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Resetting hosts suitable for reconnect"); + if (logger.isDebugEnabled()) { + logger.debug("Resetting hosts suitable for reconnect"); } // reset hosts that are suitable candidates for reconnect resetHosts(managementServerId, lastPingSecondsAfter); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Completed resetting hosts suitable for reconnect"); + if (logger.isDebugEnabled()) { + logger.debug("Completed resetting hosts suitable for reconnect"); } List assignedHosts = new ArrayList(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Acquiring hosts for clusters already owned by this management server"); + if (logger.isDebugEnabled()) { + logger.debug("Acquiring hosts for clusters already owned by this management server"); } List clusters = findClustersOwnedByManagementServer(managementServerId); if (clusters.size() > 0) { @@ -535,17 +531,17 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao sb.append(host.getId()); sb.append(" "); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Following hosts got acquired for clusters already owned: " + sb.toString()); + if (logger.isTraceEnabled()) { + logger.trace("Following hosts got acquired for clusters already owned: " + sb.toString()); } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Completed acquiring hosts for clusters already owned by this management server"); + if (logger.isDebugEnabled()) { + logger.debug("Completed acquiring hosts for clusters already owned by this management server"); } if (assignedHosts.size() < limit) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Acquiring hosts for clusters not owned by any management server"); + if (logger.isDebugEnabled()) { + logger.debug("Acquiring hosts for clusters not owned by any management server"); } // for remaining hosts not owned by any MS check if they can be owned (by owning full cluster) clusters = findClustersForHostsNotOwnedByAnyManagementServer(); @@ -585,12 +581,12 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao break; } } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Following hosts got acquired from newly owned clusters: " + sb.toString()); + if (logger.isTraceEnabled()) { + logger.trace("Following hosts got acquired from newly owned clusters: " + sb.toString()); } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Completed acquiring hosts for clusters not owned by any management server"); + if (logger.isDebugEnabled()) { + logger.debug("Completed acquiring hosts for clusters not owned by any management server"); } } txn.commit(); @@ -754,7 +750,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao } } } catch (SQLException e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); } return result; } @@ -865,15 +861,15 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao l.add(info); } } catch (SQLException e) { - s_logger.debug("SQLException caught", e); + logger.debug("SQLException caught", e); } return l; } @Override public long getNextSequence(long hostId) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("getNextSequence(), hostId: " + hostId); + if (logger.isTraceEnabled()) { + logger.trace("getNextSequence(), hostId: " + hostId); } TableGenerator tg = _tgs.get("host_req_sq"); @@ -953,7 +949,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao HostVO ho = findById(host.getId()); assert ho != null : "How how how? : " + host.getId(); - if (status_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { StringBuilder str = new StringBuilder("Unable to update host for event:").append(event.toString()); str.append(". Name=").append(host.getName()); @@ -975,7 +971,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao .append(":old update count=") .append(oldUpdateCount) .append("]"); - status_logger.debug(str.toString()); + logger.debug(str.toString()); } else { StringBuilder msg = new StringBuilder("Agent status update: ["); msg.append("id = " + host.getId()); @@ -985,11 +981,11 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao msg.append("; new status = " + newStatus); msg.append("; old update count = " + oldUpdateCount); msg.append("; new update count = " + newUpdateCount + "]"); - status_logger.debug(msg.toString()); + logger.debug(msg.toString()); } if (ho.getState() == newStatus) { - status_logger.debug("Host " + ho.getName() + " state has already been updated to " + newStatus); + logger.debug("Host " + ho.getName() + " state has already been updated to " + newStatus); return true; } } @@ -1015,7 +1011,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao int result = update(ub, sc, null); assert result <= 1 : "How can this update " + result + " rows? "; - if (state_logger.isDebugEnabled() && result == 0) { + if (logger.isDebugEnabled() && result == 0) { HostVO ho = findById(host.getId()); assert ho != null : "How how how? : " + host.getId(); @@ -1025,7 +1021,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao str.append("; old state = " + oldState); str.append("; event = " + event); str.append("; new state = " + newState + "]"); - state_logger.debug(str.toString()); + logger.debug(str.toString()); } else { StringBuilder msg = new StringBuilder("Resource state update: ["); msg.append("id = " + host.getId()); @@ -1033,7 +1029,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao msg.append("; old state = " + oldState); msg.append("; event = " + event); msg.append("; new state = " + newState + "]"); - state_logger.debug(msg.toString()); + logger.debug(msg.toString()); } return result > 0; diff --git a/engine/schema/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java b/engine/schema/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java index 2b94e69737d..b8f07cbf453 100644 --- a/engine/schema/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java +++ b/engine/schema/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -33,7 +32,6 @@ import com.cloud.utils.db.SearchCriteria; @Local(value = HypervisorCapabilitiesDao.class) public class HypervisorCapabilitiesDaoImpl extends GenericDaoBase implements HypervisorCapabilitiesDao { - private static final Logger s_logger = Logger.getLogger(HypervisorCapabilitiesDaoImpl.class); protected final SearchBuilder HypervisorTypeSearch; protected final SearchBuilder HypervisorTypeAndVersionSearch; diff --git a/engine/schema/src/com/cloud/network/dao/FirewallRulesCidrsDaoImpl.java b/engine/schema/src/com/cloud/network/dao/FirewallRulesCidrsDaoImpl.java index e89536b990c..35be85b2d90 100644 --- a/engine/schema/src/com/cloud/network/dao/FirewallRulesCidrsDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/FirewallRulesCidrsDaoImpl.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.DB; @@ -33,7 +32,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = FirewallRulesCidrsDao.class) public class FirewallRulesCidrsDaoImpl extends GenericDaoBase implements FirewallRulesCidrsDao { - private static final Logger s_logger = Logger.getLogger(FirewallRulesCidrsDaoImpl.class); protected final SearchBuilder CidrsSearch; protected FirewallRulesCidrsDaoImpl() { diff --git a/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java b/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java index 5122876adf8..460630be11d 100644 --- a/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java @@ -26,7 +26,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.cloudstack.resourcedetail.dao.UserIpAddressDetailsDao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.Vlan.VlanType; @@ -50,7 +49,6 @@ import com.cloud.utils.net.Ip; @Local(value = {IPAddressDao.class}) @DB public class IPAddressDaoImpl extends GenericDaoBase implements IPAddressDao { - private static final Logger s_logger = Logger.getLogger(IPAddressDaoImpl.class); protected SearchBuilder AllFieldsSearch; protected SearchBuilder VlanDbIdSearchUnallocated; @@ -322,7 +320,7 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen ipCount = rs.getInt(1); } } catch (Exception e) { - s_logger.warn("Exception counting IP addresses", e); + logger.warn("Exception counting IP addresses", e); } return ipCount; diff --git a/engine/schema/src/com/cloud/network/dao/PortProfileDaoImpl.java b/engine/schema/src/com/cloud/network/dao/PortProfileDaoImpl.java index 211280dbe1f..5a81947e122 100644 --- a/engine/schema/src/com/cloud/network/dao/PortProfileDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/PortProfileDaoImpl.java @@ -23,7 +23,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.DB; @@ -38,7 +37,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Local(value = PortProfileDao.class) @DB() public class PortProfileDaoImpl extends GenericDaoBase implements PortProfileDao { - protected static final Logger s_logger = Logger.getLogger(PortProfileDaoImpl.class); final SearchBuilder nameSearch; final SearchBuilder accessVlanSearch; diff --git a/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnDaoImpl.java b/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnDaoImpl.java index 965d4337f59..34d96b94ee9 100644 --- a/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.network.RemoteAccessVpn; @@ -31,7 +30,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {RemoteAccessVpnDao.class}) public class RemoteAccessVpnDaoImpl extends GenericDaoBase implements RemoteAccessVpnDao { - private static final Logger s_logger = Logger.getLogger(RemoteAccessVpnDaoImpl.class); private final SearchBuilder AllFieldsSearch; diff --git a/engine/schema/src/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java b/engine/schema/src/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java index e32533f6a5c..8329f3de4de 100644 --- a/engine/schema/src/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java @@ -22,7 +22,6 @@ import javax.annotation.PostConstruct; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; @@ -33,7 +32,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {Site2SiteVpnConnectionDao.class}) public class Site2SiteVpnConnectionDaoImpl extends GenericDaoBase implements Site2SiteVpnConnectionDao { - private static final Logger s_logger = Logger.getLogger(Site2SiteVpnConnectionDaoImpl.class); @Inject protected IPAddressDao _addrDao; diff --git a/engine/schema/src/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java b/engine/schema/src/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java index 658ca0ab407..7951b055b5e 100644 --- a/engine/schema/src/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java @@ -19,7 +19,6 @@ package com.cloud.network.dao; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; @@ -32,7 +31,6 @@ public class Site2SiteVpnGatewayDaoImpl extends GenericDaoBase AllFieldsSearch; diff --git a/engine/schema/src/com/cloud/network/dao/UserIpv6AddressDaoImpl.java b/engine/schema/src/com/cloud/network/dao/UserIpv6AddressDaoImpl.java index f0ba19993df..c0ba455da5e 100644 --- a/engine/schema/src/com/cloud/network/dao/UserIpv6AddressDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/UserIpv6AddressDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.network.UserIpv6AddressVO; @@ -34,7 +33,6 @@ import com.cloud.utils.db.SearchCriteria.Op; @Component @Local(value = UserIpv6AddressDao.class) public class UserIpv6AddressDaoImpl extends GenericDaoBase implements UserIpv6AddressDao { - private static final Logger s_logger = Logger.getLogger(IPAddressDaoImpl.class); protected final SearchBuilder AllFieldsSearch; protected GenericSearchBuilder CountFreePublicIps; diff --git a/engine/schema/src/com/cloud/network/security/dao/VmRulesetLogDaoImpl.java b/engine/schema/src/com/cloud/network/security/dao/VmRulesetLogDaoImpl.java index 8fe9375c051..e430910c1b9 100644 --- a/engine/schema/src/com/cloud/network/security/dao/VmRulesetLogDaoImpl.java +++ b/engine/schema/src/com/cloud/network/security/dao/VmRulesetLogDaoImpl.java @@ -27,7 +27,6 @@ import java.util.Set; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.network.security.VmRulesetLogVO; @@ -39,7 +38,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {VmRulesetLogDao.class}) public class VmRulesetLogDaoImpl extends GenericDaoBase implements VmRulesetLogDao { - protected static final Logger s_logger = Logger.getLogger(VmRulesetLogDaoImpl.class); private SearchBuilder VmIdSearch; private String InsertOrUpdateSQl = "INSERT INTO op_vm_ruleset_log (instance_id, created, logsequence) " + " VALUES(?, now(), 1) ON DUPLICATE KEY UPDATE logsequence=logsequence+1"; @@ -100,19 +98,19 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase im } catch (SQLTransactionRollbackException e1) { if (i < maxTries - 1) { int delayMs = (i + 1) * 1000; - s_logger.debug("Caught a deadlock exception while inserting security group rule log, retrying in " + delayMs); + logger.debug("Caught a deadlock exception while inserting security group rule log, retrying in " + delayMs); try { Thread.sleep(delayMs); } catch (InterruptedException ie) { - s_logger.debug("[ignored] interupted while inserting security group rule log."); + logger.debug("[ignored] interupted while inserting security group rule logger."); } } else - s_logger.warn("Caught another deadlock exception while retrying inserting security group rule log, giving up"); + logger.warn("Caught another deadlock exception while retrying inserting security group rule log, giving up"); } } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Inserted or updated " + numUpdated + " rows"); + if (logger.isTraceEnabled()) { + logger.trace("Inserted or updated " + numUpdated + " rows"); } return numUpdated; } @@ -136,8 +134,8 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase im vmIds.add(vmId); } int numUpdated = executeWithRetryOnDeadlock(txn, pstmt, vmIds); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Inserted or updated " + numUpdated + " rows"); + if (logger.isTraceEnabled()) { + logger.trace("Inserted or updated " + numUpdated + " rows"); } if (numUpdated > 0) count += stmtSize; @@ -147,7 +145,7 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase im } } catch (SQLException sqe) { - s_logger.warn("Failed to execute multi insert ", sqe); + logger.warn("Failed to execute multi insert ", sqe); } return count; @@ -175,10 +173,10 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase im queryResult = stmtInsert.executeBatch(); txn.commit(); - if (s_logger.isTraceEnabled()) - s_logger.trace("Updated or inserted " + workItems.size() + " log items"); + if (logger.isTraceEnabled()) + logger.trace("Updated or inserted " + workItems.size() + " log items"); } catch (SQLException e) { - s_logger.warn("Failed to execute batch update statement for ruleset log: ", e); + logger.warn("Failed to execute batch update statement for ruleset log: ", e); txn.rollback(); success = false; } @@ -187,7 +185,7 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase im workItems.toArray(arrayItems); for (int i = 0; i < queryResult.length; i++) { if (queryResult[i] < 0) { - s_logger.debug("Batch query update failed for vm " + arrayItems[i]); + logger.debug("Batch query update failed for vm " + arrayItems[i]); } } } diff --git a/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemCidrsDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemCidrsDaoImpl.java index 4c2574e6094..dd28fe3a8c6 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemCidrsDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemCidrsDaoImpl.java @@ -23,7 +23,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.network.vpc.NetworkACLItemCidrsDao; @@ -40,7 +39,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = NetworkACLItemCidrsDao.class) public class NetworkACLItemCidrsDaoImpl extends GenericDaoBase implements NetworkACLItemCidrsDao { - private static final Logger s_logger = Logger.getLogger(NetworkACLItemCidrsDaoImpl.class); protected final SearchBuilder cidrsSearch; protected NetworkACLItemCidrsDaoImpl() { diff --git a/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemDaoImpl.java index 201197c93f2..d6008685f8a 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemDaoImpl.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.network.vpc.NetworkACLItem.State; @@ -40,7 +39,6 @@ import com.cloud.utils.db.TransactionLegacy; @Local(value = NetworkACLItemDao.class) @DB() public class NetworkACLItemDaoImpl extends GenericDaoBase implements NetworkACLItemDao { - private static final Logger s_logger = Logger.getLogger(NetworkACLItemDaoImpl.class); protected final SearchBuilder AllFieldsSearch; protected final SearchBuilder NotRevokedSearch; diff --git a/engine/schema/src/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java index 21784529033..a276890f5ad 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.network.vpc.PrivateIpVO; @@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy; @Local(value = PrivateIpDao.class) @DB() public class PrivateIpDaoImpl extends GenericDaoBase implements PrivateIpDao { - private static final Logger s_logger = Logger.getLogger(PrivateIpDaoImpl.class); private final SearchBuilder AllFieldsSearch; private final GenericSearchBuilder CountAllocatedByNetworkId; @@ -92,8 +90,8 @@ public class PrivateIpDaoImpl extends GenericDaoBase implemen @Override public void releaseIpAddress(String ipAddress, long networkId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing private ip address: " + ipAddress + " network id " + networkId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing private ip address: " + ipAddress + " network id " + networkId); } SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("ip", ipAddress); diff --git a/engine/schema/src/com/cloud/projects/dao/ProjectAccountDaoImpl.java b/engine/schema/src/com/cloud/projects/dao/ProjectAccountDaoImpl.java index 2ecf3fb8091..7565041b1aa 100644 --- a/engine/schema/src/com/cloud/projects/dao/ProjectAccountDaoImpl.java +++ b/engine/schema/src/com/cloud/projects/dao/ProjectAccountDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.projects.ProjectAccount; @@ -39,7 +38,6 @@ public class ProjectAccountDaoImpl extends GenericDaoBase AdminSearch; final GenericSearchBuilder ProjectAccountSearch; final GenericSearchBuilder CountByRoleSearch; - public static final Logger s_logger = Logger.getLogger(ProjectAccountDaoImpl.class.getName()); protected ProjectAccountDaoImpl() { AllFieldsSearch = createSearchBuilder(); @@ -150,7 +148,7 @@ public class ProjectAccountDaoImpl extends GenericDaoBase 0) { - s_logger.debug("Removed account id=" + accountId + " from " + rowsRemoved + " projects"); + logger.debug("Removed account id=" + accountId + " from " + rowsRemoved + " projects"); } } diff --git a/engine/schema/src/com/cloud/projects/dao/ProjectDaoImpl.java b/engine/schema/src/com/cloud/projects/dao/ProjectDaoImpl.java index b2cf2d899f6..18f564f4911 100644 --- a/engine/schema/src/com/cloud/projects/dao/ProjectDaoImpl.java +++ b/engine/schema/src/com/cloud/projects/dao/ProjectDaoImpl.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.projects.Project; @@ -39,7 +38,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {ProjectDao.class}) public class ProjectDaoImpl extends GenericDaoBase implements ProjectDao { - private static final Logger s_logger = Logger.getLogger(ProjectDaoImpl.class); protected final SearchBuilder AllFieldsSearch; protected GenericSearchBuilder CountByDomain; protected GenericSearchBuilder ProjectAccountSearch; @@ -79,7 +77,7 @@ public class ProjectDaoImpl extends GenericDaoBase implements P ProjectVO projectToRemove = findById(projectId); projectToRemove.setName(null); if (!update(projectId, projectToRemove)) { - s_logger.warn("Failed to reset name for the project id=" + projectId + " as a part of project remove"); + logger.warn("Failed to reset name for the project id=" + projectId + " as a part of project remove"); return false; } diff --git a/engine/schema/src/com/cloud/projects/dao/ProjectInvitationDaoImpl.java b/engine/schema/src/com/cloud/projects/dao/ProjectInvitationDaoImpl.java index 0dff4af525a..16112c496d2 100644 --- a/engine/schema/src/com/cloud/projects/dao/ProjectInvitationDaoImpl.java +++ b/engine/schema/src/com/cloud/projects/dao/ProjectInvitationDaoImpl.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.projects.ProjectInvitation.State; @@ -34,7 +33,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {ProjectInvitationDao.class}) public class ProjectInvitationDaoImpl extends GenericDaoBase implements ProjectInvitationDao { - private static final Logger s_logger = Logger.getLogger(ProjectInvitationDaoImpl.class); protected final SearchBuilder AllFieldsSearch; protected final SearchBuilder InactiveSearch; @@ -91,7 +89,7 @@ public class ProjectInvitationDaoImpl extends GenericDaoBase implements SnapshotDao { - public static final Logger s_logger = Logger.getLogger(SnapshotDaoImpl.class.getName()); // TODO: we should remove these direct sqls private static final String GET_LAST_SNAPSHOT = "SELECT snapshots.id FROM snapshot_store_ref, snapshots where snapshots.id = snapshot_store_ref.snapshot_id AND snapshosts.volume_id = ? AND snapshot_store_ref.role = ? ORDER BY created DESC"; @@ -208,7 +206,7 @@ public class SnapshotDaoImpl extends GenericDaoBase implements return rs.getLong(1); } } catch (Exception ex) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "caught something while getting sec. host id: " + ex.getLocalizedMessage()); } return null; @@ -228,7 +226,7 @@ public class SnapshotDaoImpl extends GenericDaoBase implements return rs.getLong(1); } } catch (Exception ex) { - s_logger.error("error getting last snapshot", ex); + logger.error("error getting last snapshot", ex); } return 0; } @@ -246,7 +244,7 @@ public class SnapshotDaoImpl extends GenericDaoBase implements pstmt.executeUpdate(); return 1; } catch (Exception ex) { - s_logger.error("error getting last snapshot", ex); + logger.error("error getting last snapshot", ex); } return 0; } @@ -263,7 +261,7 @@ public class SnapshotDaoImpl extends GenericDaoBase implements pstmt.executeUpdate(); return 1; } catch (Exception ex) { - s_logger.error("error set secondary storage host id", ex); + logger.error("error set secondary storage host id", ex); } return 0; } diff --git a/engine/schema/src/com/cloud/storage/dao/StoragePoolHostDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/StoragePoolHostDaoImpl.java index c1a765737be..df5a45aacc2 100644 --- a/engine/schema/src/com/cloud/storage/dao/StoragePoolHostDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/StoragePoolHostDaoImpl.java @@ -24,7 +24,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.host.Status; @@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {StoragePoolHostDao.class}) public class StoragePoolHostDaoImpl extends GenericDaoBase implements StoragePoolHostDao { - public static final Logger s_logger = Logger.getLogger(StoragePoolHostDaoImpl.class.getName()); protected final SearchBuilder PoolSearch; protected final SearchBuilder HostSearch; @@ -115,10 +113,10 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase(rs.getLong(1), rs.getInt(2))); } } catch (SQLException e) { - s_logger.debug("SQLException: ", e); + logger.debug("SQLException: ", e); } return l; } diff --git a/engine/schema/src/com/cloud/storage/dao/UploadDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/UploadDaoImpl.java index 191fc4ccfb1..42f989a81e7 100644 --- a/engine/schema/src/com/cloud/storage/dao/UploadDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/UploadDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.storage.Upload.Mode; @@ -33,7 +32,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {UploadDao.class}) public class UploadDaoImpl extends GenericDaoBase implements UploadDao { - public static final Logger s_logger = Logger.getLogger(UploadDaoImpl.class.getName()); protected final SearchBuilder typeUploadStatusSearch; protected final SearchBuilder typeHostAndUploadStatusSearch; protected final SearchBuilder typeModeAndStatusSearch; diff --git a/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java index 02fb4b466ed..0c21c4f6a83 100644 --- a/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -30,7 +30,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.dao.DataCenterDao; @@ -65,7 +64,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value = {VMTemplateDao.class}) public class VMTemplateDaoImpl extends GenericDaoBase implements VMTemplateDao { - private static final Logger s_logger = Logger.getLogger(VMTemplateDaoImpl.class); @Inject VMTemplateZoneDao _templateZoneDao; @@ -234,7 +232,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem l.add(rs.getLong(1)); } } catch (SQLException e) { - s_logger.debug("Exception: ", e); + logger.debug("Exception: ", e); } return l; } @@ -288,7 +286,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem routerTmpltName = (String)params.get("routing.uniquename"); - s_logger.debug("Found parameter routing unique name " + routerTmpltName); + logger.debug("Found parameter routing unique name " + routerTmpltName); if (routerTmpltName == null) { routerTmpltName = "routing"; } @@ -297,8 +295,8 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem if (consoleProxyTmpltName == null) { consoleProxyTmpltName = "routing"; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Use console proxy template : " + consoleProxyTmpltName); + if (logger.isDebugEnabled()) { + logger.debug("Use console proxy template : " + consoleProxyTmpltName); } UniqueNameSearch = createSearchBuilder(); @@ -512,10 +510,10 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem * (rs.next()) { Pair templateZonePair = new Pair(rs.getLong(1), -1L); templateZonePairList.add(templateZonePair); } * - * } catch (Exception e) { s_logger.warn("Error listing templates", e); } + * } catch (Exception e) { logger.warn("Error listing templates", e); } * finally { try { if (rs != null) { rs.close(); } if (pstmt != null) { * pstmt.close(); } txn.commit(); } catch (SQLException sqle) { - * s_logger.warn("Error in cleaning up", sqle); } } + * logger.warn("Error in cleaning up", sqle); } } * * return templateZonePairList; } * @@ -696,9 +694,9 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem * null)); continue; } else if (keyword == null && name == null){ * templateZonePairList.add(new Pair(publicIsos.get(i).getId(), * null)); } } } } } catch (Exception e) { - * s_logger.warn("Error listing templates", e); } finally { try { if (rs != + * logger.warn("Error listing templates", e); } finally { try { if (rs != * null) { rs.close(); } if (pstmt != null) { pstmt.close(); } txn.commit(); - * } catch( SQLException sqle) { s_logger.warn("Error in cleaning up", + * } catch( SQLException sqle) { logger.warn("Error in cleaning up", * sqle); } } * * return templateZonePairList; } @@ -1021,7 +1019,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem * while (rs.next()) { final Pair templateZonePair = new * Pair( rs.getLong(1), -1L); * templateZonePairList.add(templateZonePair); } txn.commit(); } catch - * (Exception e) { s_logger.warn("Error listing S3 templates", e); if (txn + * (Exception e) { logger.warn("Error listing S3 templates", e); if (txn * != null) { txn.rollback(); } } finally { closeResources(pstmt, rs); if * (txn != null) { txn.close(); } } * @@ -1050,7 +1048,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem builder.set(vo, "updated", new Date()); int rows = update((VMTemplateVO)vo, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { + if (rows == 0 && logger.isDebugEnabled()) { VMTemplateVO dbTemplate = findByIdIncludingRemoved(vo.getId()); if (dbTemplate != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); @@ -1083,7 +1081,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem .append("; updatedTime=") .append(oldUpdatedTime); } else { - s_logger.debug("Unable to update template: id=" + vo.getId() + ", as no such template exists in the database anymore"); + logger.debug("Unable to update template: id=" + vo.getId() + ", as no such template exists in the database anymore"); } } return rows > 0; diff --git a/engine/schema/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java index a176881c742..862ca4386c2 100644 --- a/engine/schema/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java @@ -29,7 +29,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; @@ -53,7 +52,6 @@ import com.cloud.utils.db.UpdateBuilder; @Component @Local(value = {VMTemplateHostDao.class}) public class VMTemplateHostDaoImpl extends GenericDaoBase implements VMTemplateHostDao { - public static final Logger s_logger = Logger.getLogger(VMTemplateHostDaoImpl.class.getName()); @Inject HostDao _hostDao; protected final SearchBuilder HostSearch; @@ -172,7 +170,7 @@ public class VMTemplateHostDaoImpl extends GenericDaoBase 0; diff --git a/engine/schema/src/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java index aacd4ffc36a..8a4f8f13ffe 100644 --- a/engine/schema/src/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java @@ -24,7 +24,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; @@ -44,7 +43,6 @@ import com.cloud.utils.db.UpdateBuilder; @Component @Local(value = {VMTemplatePoolDao.class}) public class VMTemplatePoolDaoImpl extends GenericDaoBase implements VMTemplatePoolDao { - public static final Logger s_logger = Logger.getLogger(VMTemplatePoolDaoImpl.class.getName()); protected final SearchBuilder PoolSearch; protected final SearchBuilder TemplateSearch; @@ -160,7 +158,7 @@ public class VMTemplatePoolDaoImpl extends GenericDaoBase 0; diff --git a/engine/schema/src/com/cloud/storage/dao/VMTemplateZoneDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VMTemplateZoneDaoImpl.java index 8cadf617f0b..ebd0cf5222c 100644 --- a/engine/schema/src/com/cloud/storage/dao/VMTemplateZoneDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/VMTemplateZoneDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.storage.VMTemplateZoneVO; @@ -32,7 +31,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {VMTemplateZoneDao.class}) public class VMTemplateZoneDaoImpl extends GenericDaoBase implements VMTemplateZoneDao { - public static final Logger s_logger = Logger.getLogger(VMTemplateZoneDaoImpl.class.getName()); protected final SearchBuilder ZoneSearch; protected final SearchBuilder TemplateSearch; diff --git a/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java index 85d08b854c0..e260770be27 100644 --- a/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java @@ -26,7 +26,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.exception.InvalidParameterValueException; @@ -55,7 +54,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value = VolumeDao.class) public class VolumeDaoImpl extends GenericDaoBase implements VolumeDao { - private static final Logger s_logger = Logger.getLogger(VolumeDaoImpl.class); protected final SearchBuilder DetachedAccountIdSearch; protected final SearchBuilder TemplateZoneSearch; protected final GenericSearchBuilder TotalSizeByPoolSearch; @@ -268,7 +266,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol else if (scope == ScopeType.ZONE) sql = SELECT_HYPERTYPE_FROM_ZONE_VOLUME; else - s_logger.error("Unhandled scope type '" + scope + "' when running getHypervisorType on volume id " + volumeId); + logger.error("Unhandled scope type '" + scope + "' when running getHypervisorType on volume id " + volumeId); pstmt = txn.prepareAutoCloseStatement(sql); pstmt.setLong(1, volumeId); @@ -297,7 +295,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol } else if (type.equals(HypervisorType.VMware)) { return ImageFormat.OVA; } else { - s_logger.warn("Do not support hypervisor " + type.toString()); + logger.warn("Do not support hypervisor " + type.toString()); return null; } } @@ -483,7 +481,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol builder.set(vo, "updated", new Date()); int rows = update((VolumeVO)vo, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { + if (rows == 0 && logger.isDebugEnabled()) { VolumeVO dbVol = findByIdIncludingRemoved(vo.getId()); if (dbVol != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); @@ -516,7 +514,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol .append("; updatedTime=") .append(oldUpdatedTime); } else { - s_logger.debug("Unable to update volume: id=" + vo.getId() + ", as there is no such volume exists in the database anymore"); + logger.debug("Unable to update volume: id=" + vo.getId() + ", as there is no such volume exists in the database anymore"); } } return rows > 0; diff --git a/engine/schema/src/com/cloud/storage/dao/VolumeHostDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VolumeHostDaoImpl.java index 2820061bac1..8922a807e65 100644 --- a/engine/schema/src/com/cloud/storage/dao/VolumeHostDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/VolumeHostDaoImpl.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; @@ -38,7 +37,6 @@ import com.cloud.utils.db.UpdateBuilder; @Component @Local(value = {VolumeHostDao.class}) public class VolumeHostDaoImpl extends GenericDaoBase implements VolumeHostDao { - private static final Logger s_logger = Logger.getLogger(VolumeHostDaoImpl.class); protected final SearchBuilder HostVolumeSearch; protected final SearchBuilder ZoneVolumeSearch; protected final SearchBuilder VolumeSearch; @@ -141,7 +139,7 @@ public class VolumeHostDaoImpl extends GenericDaoBase implem builder.set(vo, "updated", new Date()); int rows = update((VolumeHostVO)vo, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { + if (rows == 0 && logger.isDebugEnabled()) { VolumeHostVO dbVol = findByIdIncludingRemoved(volHost.getId()); if (dbVol != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); @@ -174,7 +172,7 @@ public class VolumeHostDaoImpl extends GenericDaoBase implem .append("; updatedTime=") .append(oldUpdatedTime); } else { - s_logger.debug("Unable to update objectIndatastore: id=" + volHost.getId() + ", as there is no such object exists in the database anymore"); + logger.debug("Unable to update objectIndatastore: id=" + volHost.getId() + ", as there is no such object exists in the database anymore"); } } return rows > 0; diff --git a/engine/schema/src/com/cloud/upgrade/DatabaseIntegrityChecker.java b/engine/schema/src/com/cloud/upgrade/DatabaseIntegrityChecker.java index 2001fae0529..8a2773599de 100644 --- a/engine/schema/src/com/cloud/upgrade/DatabaseIntegrityChecker.java +++ b/engine/schema/src/com/cloud/upgrade/DatabaseIntegrityChecker.java @@ -24,7 +24,6 @@ import java.sql.SQLException; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.maint.Version; @@ -39,7 +38,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value = {SystemIntegrityChecker.class}) public class DatabaseIntegrityChecker extends AdapterBase implements SystemIntegrityChecker { - private static final Logger s_logger = Logger.getLogger(DatabaseIntegrityChecker.class); @Inject VersionDao _dao; @@ -103,32 +101,32 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg } catch (Exception e) { - s_logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage()); + logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage()); throw new CloudRuntimeException("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage(),e); } } catch (Exception e) { - s_logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage()); + logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage()); throw new CloudRuntimeException("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage(),e); } } if (noDuplicate) { - s_logger.debug("No duplicate hosts with the same local storage found in database"); + logger.debug("No duplicate hosts with the same local storage found in database"); } else { - s_logger.error(helpInfo.toString()); + logger.error(helpInfo.toString()); } txn.commit(); return noDuplicate; }catch (Exception e) { - s_logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage()); + logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage()); throw new CloudRuntimeException("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage(),e); } } catch (Exception e) { - s_logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage()); + logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage()); throw new CloudRuntimeException("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage(),e); } finally @@ -139,7 +137,7 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg } }catch(Exception e) { - s_logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception:"+ e.getMessage()); + logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception:"+ e.getMessage()); } } } @@ -152,7 +150,7 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg String tableName = rs.getString(1); if (tableName.equalsIgnoreCase("usage_event") || tableName.equalsIgnoreCase("usage_port_forwarding") || tableName.equalsIgnoreCase("usage_network_offering")) { num++; - s_logger.debug("Checking 21to22PremiumUprage table " + tableName + " found"); + logger.debug("Checking 21to22PremiumUprage table " + tableName + " found"); } if (num == 3) { return true; @@ -168,7 +166,7 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg boolean found = false; while (rs.next()) { if (column.equalsIgnoreCase(rs.getString(1))) { - s_logger.debug(String.format("Column %1$s.%2$s.%3$s found", dbName, tableName, column)); + logger.debug(String.format("Column %1$s.%2$s.%3$s found", dbName, tableName, column)); found = true; break; } @@ -225,33 +223,33 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg } } if (!hasUsage) { - s_logger.debug("No cloud_usage found in database, no need to check missed premium upgrade"); + logger.debug("No cloud_usage found in database, no need to check missed premium upgrade"); txn.commit(); return true; } if (!check21to22PremiumUprage(conn)) { - s_logger.error("21to22 premium upgrade missed"); + logger.error("21to22 premium upgrade missed"); txn.commit(); return false; } if (!check221to222PremiumUprage(conn)) { - s_logger.error("221to222 premium upgrade missed"); + logger.error("221to222 premium upgrade missed"); txn.commit(); return false; } if (!check222to224PremiumUpgrade(conn)) { - s_logger.error("222to224 premium upgrade missed"); + logger.error("222to224 premium upgrade missed"); txn.commit(); return false; } txn.commit(); return true; } catch (Exception e) { - s_logger.error("checkMissedPremiumUpgradeFor228: Exception:" + e.getMessage()); + logger.error("checkMissedPremiumUpgradeFor228: Exception:" + e.getMessage()); throw new CloudRuntimeException("checkMissedPremiumUpgradeFor228: Exception:" + e.getMessage(), e); } }catch (Exception e) { - s_logger.error("checkMissedPremiumUpgradeFor228: Exception:"+ e.getMessage()); + logger.error("checkMissedPremiumUpgradeFor228: Exception:"+ e.getMessage()); throw new CloudRuntimeException("checkMissedPremiumUpgradeFor228: Exception:" + e.getMessage(),e); } finally @@ -262,7 +260,7 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg } }catch(Exception e) { - s_logger.error("checkMissedPremiumUpgradeFor228: Exception:"+ e.getMessage()); + logger.error("checkMissedPremiumUpgradeFor228: Exception:"+ e.getMessage()); } } } @@ -271,19 +269,19 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg public void check() { GlobalLock lock = GlobalLock.getInternLock("DatabaseIntegrity"); try { - s_logger.info("Grabbing lock to check for database integrity."); + logger.info("Grabbing lock to check for database integrity."); if (!lock.lock(20 * 60)) { throw new CloudRuntimeException("Unable to acquire lock to check for database integrity."); } try { - s_logger.info("Performing database integrity check"); + logger.info("Performing database integrity check"); if (!checkDuplicateHostWithTheSameLocalStorage()) { throw new CloudRuntimeException("checkDuplicateHostWithTheSameLocalStorage detected error"); } if (!checkMissedPremiumUpgradeFor228()) { - s_logger.error("Your current database version is 2.2.8, management server detected some missed premium upgrade, please contact CloudStack support and attach log file. Thank you!"); + logger.error("Your current database version is 2.2.8, management server detected some missed premium upgrade, please contact CloudStack support and attach log file. Thank you!"); throw new CloudRuntimeException("Detected missed premium upgrade"); } } finally { @@ -299,7 +297,7 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg try { check(); } catch (Exception e) { - s_logger.error("System integrity check exception", e); + logger.error("System integrity check exception", e); System.exit(1); } return true; diff --git a/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java b/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java index 0fb2dfe5a90..fb462c2565c 100644 --- a/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java +++ b/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java @@ -24,7 +24,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.upgrade.dao.VersionVO.Step; @@ -42,7 +41,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Local(value = VersionDao.class) @DB() public class VersionDaoImpl extends GenericDaoBase implements VersionDao { - private static final Logger s_logger = Logger.getLogger(VersionDaoImpl.class); final GenericSearchBuilder CurrentVersionSearch; final SearchBuilder AllFieldsSearch; @@ -76,7 +74,7 @@ public class VersionDaoImpl extends GenericDaoBase implements V @DB public String getCurrentVersion() { try (Connection conn = TransactionLegacy.getStandaloneConnection();) { - s_logger.debug("Checking to see if the database is at a version before it was the version table is created"); + logger.debug("Checking to see if the database is at a version before it was the version table is created"); try ( PreparedStatement pstmt = conn.prepareStatement("SHOW TABLES LIKE 'version'"); @@ -91,8 +89,8 @@ public class VersionDaoImpl extends GenericDaoBase implements V pstmt_domain.executeQuery(); return "2.1.8"; } catch (final SQLException e) { - s_logger.debug("Assuming the exception means domain_id is not there."); - s_logger.debug("No version table and no nics table, returning 2.1.7"); + logger.debug("Assuming the exception means domain_id is not there."); + logger.debug("No version table and no nics table, returning 2.1.7"); return "2.1.7"; } } else { @@ -100,7 +98,7 @@ public class VersionDaoImpl extends GenericDaoBase implements V ResultSet rs_static_nat = pstmt_static_nat.executeQuery();){ return "2.2.1"; } catch (final SQLException e) { - s_logger.debug("Assuming the exception means static_nat field doesn't exist in firewall_rules table, returning version 2.2.2"); + logger.debug("Assuming the exception means static_nat field doesn't exist in firewall_rules table, returning version 2.2.2"); return "2.2.2"; } } @@ -127,7 +125,7 @@ public class VersionDaoImpl extends GenericDaoBase implements V } // Use nics table information and is_static_nat field from firewall_rules table to determine version information - s_logger.debug("Version table exists, but it's empty; have to confirm that version is 2.2.2"); + logger.debug("Version table exists, but it's empty; have to confirm that version is 2.2.2"); try (PreparedStatement pstmt = conn.prepareStatement("SHOW TABLES LIKE 'nics'"); ResultSet rs = pstmt.executeQuery();){ if (!rs.next()) { @@ -138,7 +136,7 @@ public class VersionDaoImpl extends GenericDaoBase implements V throw new CloudRuntimeException("Unable to determine the current version, version table exists and empty, " + "nics table doesn't exist, is_static_nat field exists in firewall_rules table"); } catch (final SQLException e) { - s_logger.debug("Assuming the exception means static_nat field doesn't exist in firewall_rules table, returning version 2.2.2"); + logger.debug("Assuming the exception means static_nat field doesn't exist in firewall_rules table, returning version 2.2.2"); return "2.2.2"; } } diff --git a/engine/schema/src/com/cloud/usage/dao/UsageDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageDaoImpl.java index 93ddf9bf27a..70adff4c0fb 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageDaoImpl.java @@ -27,7 +27,6 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import javax.ejb.Local; @@ -42,7 +41,6 @@ import java.util.TimeZone; @Component @Local(value = {UsageDao.class}) public class UsageDaoImpl extends GenericDaoBase implements UsageDao { - public static final Logger s_logger = Logger.getLogger(UsageDaoImpl.class.getName()); private static final String DELETE_ALL = "DELETE FROM cloud_usage"; private static final String DELETE_ALL_BY_ACCOUNTID = "DELETE FROM cloud_usage WHERE account_id = ?"; private static final String DELETE_ALL_BY_INTERVAL = "DELETE FROM cloud_usage WHERE end_date < DATE_SUB(CURRENT_DATE(), INTERVAL ? DAY)"; @@ -92,7 +90,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error retrieving usage vm instances for account id: " + accountId); + logger.error("error retrieving usage vm instances for account id: " + accountId); } finally { txn.close(); } @@ -132,7 +130,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error saving account to cloud_usage db", ex); + logger.error("error saving account to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } } @@ -162,7 +160,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error saving account to cloud_usage db", ex); + logger.error("error saving account to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } } @@ -203,7 +201,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error saving user stats to cloud_usage db", ex); + logger.error("error saving user stats to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } } @@ -230,7 +228,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error saving user stats to cloud_usage db", ex); + logger.error("error saving user stats to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } } @@ -247,7 +245,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage return Long.valueOf(rs.getLong(1)); } } catch (Exception ex) { - s_logger.error("error getting last account id", ex); + logger.error("error getting last account id", ex); } return null; } @@ -264,7 +262,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage return Long.valueOf(rs.getLong(1)); } } catch (Exception ex) { - s_logger.error("error getting last user stats id", ex); + logger.error("error getting last user stats id", ex); } return null; } @@ -283,7 +281,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage templateList.add(Long.valueOf(rs.getLong(1))); } } catch (Exception ex) { - s_logger.error("error listing public templates", ex); + logger.error("error listing public templates", ex); } return templateList; } @@ -300,7 +298,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage return Long.valueOf(rs.getLong(1)); } } catch (Exception ex) { - s_logger.error("error getting last vm disk stats id", ex); + logger.error("error getting last vm disk stats id", ex); } return null; } @@ -333,7 +331,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error saving vm disk stats to cloud_usage db", ex); + logger.error("error saving vm disk stats to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } @@ -379,7 +377,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error saving vm disk stats to cloud_usage db", ex); + logger.error("error saving vm disk stats to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } @@ -446,7 +444,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error saving usage records to cloud_usage db", ex); + logger.error("error saving usage records to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } } @@ -464,7 +462,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error removing old cloud_usage records for interval: " + days); + logger.error("error removing old cloud_usage records for interval: " + days); } finally { txn.close(); } diff --git a/engine/schema/src/com/cloud/usage/dao/UsageIPAddressDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageIPAddressDaoImpl.java index 358e638b547..f3ea70128db 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageIPAddressDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageIPAddressDaoImpl.java @@ -27,7 +27,6 @@ import java.util.TimeZone; import javax.ejb.Local; import com.cloud.exception.CloudException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageIPAddressVO; @@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {UsageIPAddressDao.class}) public class UsageIPAddressDaoImpl extends GenericDaoBase implements UsageIPAddressDao { - public static final Logger s_logger = Logger.getLogger(UsageIPAddressDaoImpl.class.getName()); protected static final String UPDATE_RELEASED = "UPDATE usage_ip_address SET released = ? WHERE account_id = ? AND public_ip_address = ? and released IS NULL"; protected static final String GET_USAGE_RECORDS_BY_ACCOUNT = @@ -77,7 +75,7 @@ public class UsageIPAddressDaoImpl extends GenericDaoBase implements UsageJobDao { - private static final Logger s_logger = Logger.getLogger(UsageJobDaoImpl.class.getName()); private static final String GET_LAST_JOB_SUCCESS_DATE_MILLIS = "SELECT end_millis FROM cloud_usage.usage_job WHERE end_millis > 0 and success = 1 ORDER BY end_millis DESC LIMIT 1"; @@ -53,7 +51,7 @@ public class UsageJobDaoImpl extends GenericDaoBase implements return rs.getLong(1); } } catch (Exception ex) { - s_logger.error("error getting last usage job success date", ex); + logger.error("error getting last usage job success date", ex); } finally { txn.close(); } @@ -79,7 +77,7 @@ public class UsageJobDaoImpl extends GenericDaoBase implements txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error updating job success date", ex); + logger.error("error updating job success date", ex); throw new CloudRuntimeException(ex.getMessage()); } finally { txn.close(); diff --git a/engine/schema/src/com/cloud/usage/dao/UsageLoadBalancerPolicyDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageLoadBalancerPolicyDaoImpl.java index abace041865..20ee05b4566 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageLoadBalancerPolicyDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageLoadBalancerPolicyDaoImpl.java @@ -27,7 +27,6 @@ import java.util.TimeZone; import javax.ejb.Local; import com.cloud.exception.CloudException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageLoadBalancerPolicyVO; @@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {UsageLoadBalancerPolicyDao.class}) public class UsageLoadBalancerPolicyDaoImpl extends GenericDaoBase implements UsageLoadBalancerPolicyDao { - public static final Logger s_logger = Logger.getLogger(UsageLoadBalancerPolicyDaoImpl.class.getName()); protected static final String REMOVE_BY_USERID_LBID = "DELETE FROM usage_load_balancer_policy WHERE account_id = ? AND id = ?"; protected static final String UPDATE_DELETED = "UPDATE usage_load_balancer_policy SET deleted = ? WHERE account_id = ? AND id = ? and deleted IS NULL"; @@ -66,7 +64,7 @@ public class UsageLoadBalancerPolicyDaoImpl extends GenericDaoBase implements UsageNetworkDao { - private static final Logger s_logger = Logger.getLogger(UsageVMInstanceDaoImpl.class.getName()); private static final String SELECT_LATEST_STATS = "SELECT u.account_id, u.zone_id, u.host_id, u.host_type, u.network_id, u.bytes_sent, u.bytes_received, u.agg_bytes_received, u.agg_bytes_sent, u.event_time_millis " + "FROM cloud_usage.usage_network u INNER JOIN (SELECT netusage.account_id as acct_id, netusage.zone_id as z_id, max(netusage.event_time_millis) as max_date " @@ -79,7 +77,7 @@ public class UsageNetworkDaoImpl extends GenericDaoBase im } return returnMap; } catch (Exception ex) { - s_logger.error("error getting recent usage network stats", ex); + logger.error("error getting recent usage network stats", ex); } finally { txn.close(); } @@ -99,7 +97,7 @@ public class UsageNetworkDaoImpl extends GenericDaoBase im txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error deleting old usage network stats", ex); + logger.error("error deleting old usage network stats", ex); } } @@ -128,7 +126,7 @@ public class UsageNetworkDaoImpl extends GenericDaoBase im txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error saving usage_network to cloud_usage db", ex); + logger.error("error saving usage_network to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } } diff --git a/engine/schema/src/com/cloud/usage/dao/UsageNetworkOfferingDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageNetworkOfferingDaoImpl.java index 2661d4e0ec4..4905372d340 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageNetworkOfferingDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageNetworkOfferingDaoImpl.java @@ -27,7 +27,6 @@ import java.util.TimeZone; import javax.ejb.Local; import com.cloud.exception.CloudException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageNetworkOfferingVO; @@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {UsageNetworkOfferingDao.class}) public class UsageNetworkOfferingDaoImpl extends GenericDaoBase implements UsageNetworkOfferingDao { - public static final Logger s_logger = Logger.getLogger(UsageNetworkOfferingDaoImpl.class.getName()); protected static final String UPDATE_DELETED = "UPDATE usage_network_offering SET deleted = ? WHERE account_id = ? AND vm_instance_id = ? AND network_offering_id = ? and deleted IS NULL"; @@ -76,7 +74,7 @@ public class UsageNetworkOfferingDaoImpl extends GenericDaoBase implements UsagePortForwardingRuleDao { - public static final Logger s_logger = Logger.getLogger(UsagePortForwardingRuleDaoImpl.class.getName()); protected static final String REMOVE_BY_USERID_PFID = "DELETE FROM usage_port_forwarding WHERE account_id = ? AND id = ?"; protected static final String UPDATE_DELETED = "UPDATE usage_port_forwarding SET deleted = ? WHERE account_id = ? AND id = ? and deleted IS NULL"; @@ -66,7 +64,7 @@ public class UsagePortForwardingRuleDaoImpl extends GenericDaoBase implements UsageSecurityGroupDao { - public static final Logger s_logger = Logger.getLogger(UsageSecurityGroupDaoImpl.class.getName()); protected static final String UPDATE_DELETED = "UPDATE usage_security_group SET deleted = ? WHERE account_id = ? AND vm_instance_id = ? AND security_group_id = ? and deleted IS NULL"; @@ -76,7 +74,7 @@ public class UsageSecurityGroupDaoImpl extends GenericDaoBase implements UsageStorageDao { - public static final Logger s_logger = Logger.getLogger(UsageStorageDaoImpl.class.getName()); protected static final String REMOVE_BY_USERID_STORAGEID = "DELETE FROM usage_storage WHERE account_id = ? AND id = ? AND storage_type = ?"; protected static final String UPDATE_DELETED = "UPDATE usage_storage SET deleted = ? WHERE account_id = ? AND id = ? AND storage_type = ? and deleted IS NULL"; @@ -108,7 +106,7 @@ public class UsageStorageDaoImpl extends GenericDaoBase im txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.error("Error removing usageStorageVO", e); + logger.error("Error removing usageStorageVO", e); } finally { txn.close(); } @@ -136,7 +134,7 @@ public class UsageStorageDaoImpl extends GenericDaoBase im txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.error("Error updating UsageStorageVO:"+e.getMessage(), e); + logger.error("Error updating UsageStorageVO:"+e.getMessage(), e); } finally { txn.close(); } @@ -210,7 +208,7 @@ public class UsageStorageDaoImpl extends GenericDaoBase im } }catch (Exception e) { txn.rollback(); - s_logger.error("getUsageRecords:Exception:"+e.getMessage(), e); + logger.error("getUsageRecords:Exception:"+e.getMessage(), e); } finally { txn.close(); } diff --git a/engine/schema/src/com/cloud/usage/dao/UsageVMInstanceDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageVMInstanceDaoImpl.java index 930ad899992..4817b7d1dfc 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageVMInstanceDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageVMInstanceDaoImpl.java @@ -25,7 +25,6 @@ import java.util.TimeZone; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageVMInstanceVO; @@ -36,7 +35,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {UsageVMInstanceDao.class}) public class UsageVMInstanceDaoImpl extends GenericDaoBase implements UsageVMInstanceDao { - public static final Logger s_logger = Logger.getLogger(UsageVMInstanceDaoImpl.class.getName()); protected static final String UPDATE_USAGE_INSTANCE_SQL = "UPDATE usage_vm_instance SET end_date = ? " + "WHERE account_id = ? and vm_instance_id = ? and usage_type = ? and end_date IS NULL"; @@ -64,7 +62,7 @@ public class UsageVMInstanceDaoImpl extends GenericDaoBase implements UsageVMSnapshotDao { - public static final Logger s_logger = Logger.getLogger(UsageVMSnapshotDaoImpl.class.getName()); protected static final String GET_USAGE_RECORDS_BY_ACCOUNT = "SELECT id, zone_id, account_id, domain_id, vm_id, disk_offering_id, size, created, processed " + " FROM usage_vmsnapshot" + " WHERE account_id = ? " + " AND ( (created BETWEEN ? AND ?) OR " + " (created < ? AND processed is NULL) ) ORDER BY created asc"; @@ -62,7 +60,7 @@ public class UsageVMSnapshotDaoImpl extends GenericDaoBase implements UsageVPNUserDao { - public static final Logger s_logger = Logger.getLogger(UsageVPNUserDaoImpl.class.getName()); protected static final String UPDATE_DELETED = "UPDATE usage_vpn_user SET deleted = ? WHERE account_id = ? AND user_id = ? and deleted IS NULL"; protected static final String GET_USAGE_RECORDS_BY_ACCOUNT = "SELECT zone_id, account_id, domain_id, user_id, user_name, created, deleted " + "FROM usage_vpn_user " @@ -71,7 +69,7 @@ public class UsageVPNUserDaoImpl extends GenericDaoBase im txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.error("Error updating UsageVPNUserVO:"+e.getMessage(), e); + logger.error("Error updating UsageVPNUserVO:"+e.getMessage(), e); } finally { txn.close(); } @@ -141,7 +139,7 @@ public class UsageVPNUserDaoImpl extends GenericDaoBase im } } catch (Exception e) { txn.rollback(); - s_logger.warn("Error getting usage records", e); + logger.warn("Error getting usage records", e); } finally { txn.close(); } diff --git a/engine/schema/src/com/cloud/usage/dao/UsageVmDiskDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageVmDiskDaoImpl.java index 4491c6730b9..7daaf0fbb3f 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageVmDiskDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageVmDiskDaoImpl.java @@ -24,7 +24,6 @@ import java.util.Map; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageVmDiskVO; @@ -35,7 +34,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value = {UsageVmDiskDao.class}) public class UsageVmDiskDaoImpl extends GenericDaoBase implements UsageVmDiskDao { - private static final Logger s_logger = Logger.getLogger(UsageVmDiskDaoImpl.class.getName()); private static final String SELECT_LATEST_STATS = "SELECT uvd.account_id, uvd.zone_id, uvd.vm_id, uvd.volume_id, uvd.io_read, uvd.io_write, uvd.agg_io_read, uvd.agg_io_write, " + "uvd.bytes_read, uvd.bytes_write, uvd.agg_bytes_read, uvd.agg_bytes_write, uvd.event_time_millis " @@ -83,7 +81,7 @@ public class UsageVmDiskDaoImpl extends GenericDaoBase impl } return returnMap; } catch (Exception ex) { - s_logger.error("error getting recent usage disk stats", ex); + logger.error("error getting recent usage disk stats", ex); } finally { txn.close(); } @@ -103,7 +101,7 @@ public class UsageVmDiskDaoImpl extends GenericDaoBase impl txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error deleting old usage disk stats", ex); + logger.error("error deleting old usage disk stats", ex); } } @@ -135,7 +133,7 @@ public class UsageVmDiskDaoImpl extends GenericDaoBase impl txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error saving usage_vm_disk to cloud_usage db", ex); + logger.error("error saving usage_vm_disk to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } } diff --git a/engine/schema/src/com/cloud/usage/dao/UsageVolumeDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageVolumeDaoImpl.java index 7ef4222f928..96f1e489a6e 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageVolumeDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageVolumeDaoImpl.java @@ -27,7 +27,6 @@ import java.util.TimeZone; import javax.ejb.Local; import com.cloud.exception.CloudException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageVolumeVO; @@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {UsageVolumeDao.class}) public class UsageVolumeDaoImpl extends GenericDaoBase implements UsageVolumeDao { - public static final Logger s_logger = Logger.getLogger(UsageVolumeDaoImpl.class.getName()); protected static final String REMOVE_BY_USERID_VOLID = "DELETE FROM usage_volume WHERE account_id = ? AND id = ?"; protected static final String UPDATE_DELETED = "UPDATE usage_volume SET deleted = ? WHERE account_id = ? AND id = ? and deleted IS NULL"; @@ -71,7 +69,7 @@ public class UsageVolumeDaoImpl extends GenericDaoBase impl txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.warn("Error removing usageVolumeVO:"+e.getMessage(), e); + logger.warn("Error removing usageVolumeVO:"+e.getMessage(), e); } finally { txn.close(); } @@ -93,7 +91,7 @@ public class UsageVolumeDaoImpl extends GenericDaoBase impl txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.warn("Error updating UsageVolumeVO", e); + logger.warn("Error updating UsageVolumeVO", e); } finally { txn.close(); } @@ -171,7 +169,7 @@ public class UsageVolumeDaoImpl extends GenericDaoBase impl } } catch (Exception e) { txn.rollback(); - s_logger.warn("Error getting usage records", e); + logger.warn("Error getting usage records", e); } finally { txn.close(); } diff --git a/engine/schema/src/com/cloud/user/dao/AccountDaoImpl.java b/engine/schema/src/com/cloud/user/dao/AccountDaoImpl.java index 9ae279fe5f0..e99cfc04b84 100644 --- a/engine/schema/src/com/cloud/user/dao/AccountDaoImpl.java +++ b/engine/schema/src/com/cloud/user/dao/AccountDaoImpl.java @@ -23,7 +23,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.user.Account; @@ -44,7 +43,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {AccountDao.class}) public class AccountDaoImpl extends GenericDaoBase implements AccountDao { - private static final Logger s_logger = Logger.getLogger(AccountDaoImpl.class); private static final String FIND_USER_ACCOUNT_BY_API_KEY = "SELECT u.id, u.username, u.account_id, u.secret_key, u.state, " + "a.id, a.account_name, a.type, a.domain_id, a.state " + "FROM `cloud`.`user` u, `cloud`.`account` a " + "WHERE u.account_id = a.id AND u.api_key = ? and u.removed IS NULL"; @@ -148,7 +146,7 @@ public class AccountDaoImpl extends GenericDaoBase implements A userAcctPair = new Pair(u, a); } } catch (Exception e) { - s_logger.warn("Exception finding user/acct by api key: " + apiKey, e); + logger.warn("Exception finding user/acct by api key: " + apiKey, e); } return userAcctPair; } @@ -266,7 +264,7 @@ public class AccountDaoImpl extends GenericDaoBase implements A if (!account.getNeedsCleanup()) { account.setNeedsCleanup(true); if (!update(accountId, account)) { - s_logger.warn("Failed to mark account id=" + accountId + " for cleanup"); + logger.warn("Failed to mark account id=" + accountId + " for cleanup"); } } } @@ -286,7 +284,7 @@ public class AccountDaoImpl extends GenericDaoBase implements A domain_id = account_vo.getDomainId(); } catch (Exception e) { - s_logger.warn("getDomainIdForGivenAccountId: Exception :" + e.getMessage()); + logger.warn("getDomainIdForGivenAccountId: Exception :" + e.getMessage()); } finally { return domain_id; diff --git a/engine/schema/src/com/cloud/user/dao/UserStatisticsDaoImpl.java b/engine/schema/src/com/cloud/user/dao/UserStatisticsDaoImpl.java index 7a8e714dc77..ceefb58d2f6 100644 --- a/engine/schema/src/com/cloud/user/dao/UserStatisticsDaoImpl.java +++ b/engine/schema/src/com/cloud/user/dao/UserStatisticsDaoImpl.java @@ -25,7 +25,6 @@ import java.util.TimeZone; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.user.UserStatisticsVO; @@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {UserStatisticsDao.class}) public class UserStatisticsDaoImpl extends GenericDaoBase implements UserStatisticsDao { - private static final Logger s_logger = Logger.getLogger(UserStatisticsDaoImpl.class); private static final String ACTIVE_AND_RECENTLY_DELETED_SEARCH = "SELECT us.id, us.data_center_id, us.account_id, us.public_ip_address, us.device_id, us.device_type, us.network_id, us.agg_bytes_received, us.agg_bytes_sent " + "FROM user_statistics us, account a " + "WHERE us.account_id = a.id AND (a.removed IS NULL OR a.removed >= ?) " + "ORDER BY us.id"; @@ -111,7 +109,7 @@ public class UserStatisticsDaoImpl extends GenericDaoBase implements VmDiskStatisticsDao { - private static final Logger s_logger = Logger.getLogger(VmDiskStatisticsDaoImpl.class); private static final String ACTIVE_AND_RECENTLY_DELETED_SEARCH = "SELECT bcf.id, bcf.data_center_id, bcf.account_id, bcf.vm_id, bcf.volume_id, bcf.agg_io_read, bcf.agg_io_write, bcf.agg_bytes_read, bcf.agg_bytes_write " + "FROM vm_disk_statistics bcf, account a " + "WHERE bcf.account_id = a.id AND (a.removed IS NULL OR a.removed >= ?) " + "ORDER BY bcf.id"; @@ -106,7 +104,7 @@ public class VmDiskStatisticsDaoImpl extends GenericDaoBase implements ConsoleProxyDao { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyDaoImpl.class); // // query SQL for returnning console proxy assignment info as following @@ -217,7 +215,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im l.add(new Pair(rs.getLong(1), rs.getInt(2))); } } catch (SQLException e) { - s_logger.debug("Caught SQLException: ", e); + logger.debug("Caught SQLException: ", e); } return l; } @@ -242,7 +240,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im l.add(new Pair(rs.getLong(1), rs.getInt(2))); } } catch (SQLException e) { - s_logger.debug("Caught SQLException: ", e); + logger.debug("Caught SQLException: ", e); } return l; } @@ -261,7 +259,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im return rs.getInt(1); } } catch (SQLException e) { - s_logger.debug("Caught SQLException: ", e); + logger.debug("Caught SQLException: ", e); } return 0; } @@ -279,7 +277,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im return rs.getInt(1); } } catch (SQLException e) { - s_logger.debug("Caught SQLException: ", e); + logger.debug("Caught SQLException: ", e); } return 0; } @@ -301,7 +299,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im l.add(info); } } catch (SQLException e) { - s_logger.debug("Exception: ", e); + logger.debug("Exception: ", e); } return l; } @@ -323,7 +321,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im l.add(rs.getLong(1)); } } catch (SQLException e) { - s_logger.debug("Caught SQLException: ", e); + logger.debug("Caught SQLException: ", e); } return l; } diff --git a/engine/schema/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java index 3007810d454..d6da4fb5c24 100644 --- a/engine/schema/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java @@ -21,7 +21,6 @@ import java.util.List; import javax.annotation.PostConstruct; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.DB; @@ -35,7 +34,6 @@ import com.cloud.vm.UserVmCloneSettingVO; @Local(value = {UserVmCloneSettingDao.class}) @DB() public class UserVmCloneSettingDaoImpl extends GenericDaoBase implements UserVmCloneSettingDao { - public static final Logger s_logger = Logger.getLogger(UserVmCloneSettingDaoImpl.class); protected SearchBuilder vmIdSearch; protected SearchBuilder cloneTypeSearch; diff --git a/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java index 1f2843d144f..c9099cdf361 100644 --- a/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java @@ -30,7 +30,6 @@ import javax.annotation.PostConstruct; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.dao.ResourceTagDao; @@ -55,7 +54,6 @@ import com.cloud.vm.dao.UserVmData.SecurityGroupData; @Local(value = {UserVmDao.class}) public class UserVmDaoImpl extends GenericDaoBase implements UserVmDao { - public static final Logger s_logger = Logger.getLogger(UserVmDaoImpl.class); protected SearchBuilder AccountPodSearch; protected SearchBuilder AccountDataCenterSearch; @@ -378,13 +376,13 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use } } catch (Exception e) { - s_logger.error("listPodIdsHavingVmsforAccount:Exception: " + e.getMessage()); + logger.error("listPodIdsHavingVmsforAccount:Exception: " + e.getMessage()); throw new CloudRuntimeException("listPodIdsHavingVmsforAccount:Exception: " + e.getMessage(), e); } txn.commit(); return result; } catch (Exception e) { - s_logger.error("listPodIdsHavingVmsforAccount:Exception : " + e.getMessage()); + logger.error("listPodIdsHavingVmsforAccount:Exception : " + e.getMessage()); throw new CloudRuntimeException("listPodIdsHavingVmsforAccount:Exception: " + e.getMessage(), e); } finally { @@ -396,7 +394,7 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use } catch (Exception e) { - s_logger.error("listVmDetails:Exception:" + e.getMessage()); + logger.error("listVmDetails:Exception:" + e.getMessage()); } } @@ -433,7 +431,7 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use } catch (Exception e) { - s_logger.error("listVmDetails:Exception:" + e.getMessage()); + logger.error("listVmDetails:Exception:" + e.getMessage()); throw new CloudRuntimeException("listVmDetails: Exception:" + e.getMessage(),e); } curr_index += VM_DETAILS_BATCH_SIZE; @@ -441,7 +439,7 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use } catch (Exception e) { - s_logger.error("listVmDetails:Exception:" + e.getMessage()); + logger.error("listVmDetails:Exception:" + e.getMessage()); throw new CloudRuntimeException("listVmDetails: Exception:" + e.getMessage(),e); } } @@ -469,20 +467,20 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use } catch (Exception e) { - s_logger.error("listVmDetails: Exception:" + e.getMessage()); + logger.error("listVmDetails: Exception:" + e.getMessage()); throw new CloudRuntimeException("listVmDetails: Exception:" + e.getMessage(),e); } } catch (Exception e) { - s_logger.error("listVmDetails:Exception:" + e.getMessage()); + logger.error("listVmDetails:Exception:" + e.getMessage()); throw new CloudRuntimeException("listVmDetails: Exception:" + e.getMessage(),e); } } txn.commit(); return userVmDataHash; } catch (Exception e) { - s_logger.error("listVmDetails:Exception:" + e.getMessage()); + logger.error("listVmDetails:Exception:" + e.getMessage()); throw new CloudRuntimeException("listVmDetails:Exception : ", e); } finally { @@ -494,7 +492,7 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use } catch (Exception e) { - s_logger.error("listVmDetails:Exception:" + e.getMessage()); + logger.error("listVmDetails:Exception:" + e.getMessage()); } } @@ -656,7 +654,7 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use } } } catch (SQLException e) { - s_logger.error("GetVmsDetailsByNames: Exception in sql: " + e.getMessage()); + logger.error("GetVmsDetailsByNames: Exception in sql: " + e.getMessage()); throw new CloudRuntimeException("GetVmsDetailsByNames: Exception: " + e.getMessage()); } diff --git a/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java index be0e3668e7a..dd5f3ab50eb 100644 --- a/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -29,7 +29,6 @@ import javax.annotation.PostConstruct; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.host.HostVO; @@ -66,7 +65,6 @@ import com.cloud.vm.VirtualMachine.Type; @Local(value = {VMInstanceDao.class}) public class VMInstanceDaoImpl extends GenericDaoBase implements VMInstanceDao { - public static final Logger s_logger = Logger.getLogger(VMInstanceDaoImpl.class); private static final int MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT = 3; protected SearchBuilder VMClusterSearch; @@ -439,8 +437,8 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem @Override public boolean updateState(State oldState, Event event, State newState, VirtualMachine vm, Object opaque) { if (newState == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("There's no way to transition from old state: " + oldState.toString() + " event: " + event.toString()); + if (logger.isDebugEnabled()) { + logger.debug("There's no way to transition from old state: " + oldState.toString() + " event: " + event.toString()); } return false; } @@ -479,7 +477,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem if (result == 0) { VMInstanceVO vo = findByIdIncludingRemoved(vm.getId()); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (vo != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); str.append(": DB Data={Host=").append(vo.getHostId()).append("; State=").append(vo.getState().toString()).append("; updated=").append(vo.getUpdated()) @@ -488,16 +486,16 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem .append("; time=").append(vo.getUpdateTime()); str.append("} Stale Data: {Host=").append(oldHostId).append("; State=").append(oldState).append("; updated=").append(oldUpdated).append("; time=") .append(oldUpdateDate).append("}"); - s_logger.debug(str.toString()); + logger.debug(str.toString()); } else { - s_logger.debug("Unable to update the vm id=" + vm.getId() + "; the vm either doesn't exist or already removed"); + logger.debug("Unable to update the vm id=" + vm.getId() + "; the vm either doesn't exist or already removed"); } } if (vo != null && vo.getState() == newState) { // allow for concurrent update if target state has already been matched - s_logger.debug("VM " + vo.getInstanceName() + " state has been already been updated to " + newState); + logger.debug("VM " + vo.getInstanceName() + " state has been already been updated to " + newState); return true; } } diff --git a/engine/schema/src/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java b/engine/schema/src/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java index a87d284dc12..2ea46f211be 100644 --- a/engine/schema/src/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java @@ -22,7 +22,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; @@ -38,7 +37,6 @@ import com.cloud.vm.snapshot.VMSnapshotVO; @Component @Local(value = {VMSnapshotDao.class}) public class VMSnapshotDaoImpl extends GenericDaoBase implements VMSnapshotDao { - private static final Logger s_logger = Logger.getLogger(VMSnapshotDaoImpl.class); private final SearchBuilder SnapshotSearch; private final SearchBuilder ExpungingSnapshotSearch; private final SearchBuilder SnapshotStatusSearch; @@ -145,7 +143,7 @@ public class VMSnapshotDaoImpl extends GenericDaoBase implem builder.set(vo, "updated", new Date()); int rows = update((VMSnapshotVO)vo, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { + if (rows == 0 && logger.isDebugEnabled()) { VMSnapshotVO dbVol = findByIdIncludingRemoved(vo.getId()); if (dbVol != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); @@ -178,7 +176,7 @@ public class VMSnapshotDaoImpl extends GenericDaoBase implem .append("; updatedTime=") .append(oldUpdatedTime); } else { - s_logger.debug("Unable to update VM snapshot: id=" + vo.getId() + ", as there is no such snapshot exists in the database anymore"); + logger.debug("Unable to update VM snapshot: id=" + vo.getId() + ", as there is no such snapshot exists in the database anymore"); } } return rows > 0; diff --git a/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMEntityDaoImpl.java b/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMEntityDaoImpl.java index 6df243b7122..d681c305336 100644 --- a/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMEntityDaoImpl.java +++ b/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMEntityDaoImpl.java @@ -23,7 +23,6 @@ import javax.annotation.PostConstruct; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.cloud.entity.api.db.VMEntityVO; @@ -39,7 +38,6 @@ import com.cloud.utils.db.TransactionLegacy; @Local(value = {VMEntityDao.class}) public class VMEntityDaoImpl extends GenericDaoBase implements VMEntityDao { - public static final Logger s_logger = Logger.getLogger(VMEntityDaoImpl.class); @Inject protected VMReservationDao _vmReservationDao; diff --git a/engine/schema/src/org/apache/cloudstack/region/dao/RegionDaoImpl.java b/engine/schema/src/org/apache/cloudstack/region/dao/RegionDaoImpl.java index d7f102d6059..f9ff6bffd7f 100644 --- a/engine/schema/src/org/apache/cloudstack/region/dao/RegionDaoImpl.java +++ b/engine/schema/src/org/apache/cloudstack/region/dao/RegionDaoImpl.java @@ -18,7 +18,6 @@ package org.apache.cloudstack.region.dao; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.region.RegionVO; @@ -30,7 +29,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {RegionDao.class}) public class RegionDaoImpl extends GenericDaoBase implements RegionDao { - private static final Logger s_logger = Logger.getLogger(RegionDaoImpl.class); protected SearchBuilder NameSearch; protected SearchBuilder AllFieldsSearch; diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/DirectAgentManagerSimpleImpl.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/DirectAgentManagerSimpleImpl.java index b34b697af04..91c886cb8d3 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/DirectAgentManagerSimpleImpl.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/DirectAgentManagerSimpleImpl.java @@ -26,7 +26,6 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -60,7 +59,6 @@ import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.fsm.StateMachine2; public class DirectAgentManagerSimpleImpl extends ManagerBase implements AgentManager { - private static final Logger logger = Logger.getLogger(DirectAgentManagerSimpleImpl.class); private final Map hostResourcesMap = new HashMap(); @Inject HostDao hostDao; diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java index 13fd54cf8f7..c201cf601e5 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java @@ -24,7 +24,6 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; import org.apache.cloudstack.engine.subsystem.api.storage.VMSnapshotOptions; @@ -71,7 +70,6 @@ import com.cloud.vm.snapshot.VMSnapshotVO; import com.cloud.vm.snapshot.dao.VMSnapshotDao; public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshotStrategy { - private static final Logger s_logger = Logger.getLogger(DefaultVMSnapshotStrategy.class); @Inject VMSnapshotHelper vmSnapshotHelper; @Inject @@ -148,7 +146,7 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot answer = (CreateVMSnapshotAnswer)agentMgr.send(hostId, ccmd); if (answer != null && answer.getResult()) { processAnswer(vmSnapshotVO, userVm, answer, hostId); - s_logger.debug("Create vm snapshot " + vmSnapshot.getName() + " succeeded for vm: " + userVm.getInstanceName()); + logger.debug("Create vm snapshot " + vmSnapshot.getName() + " succeeded for vm: " + userVm.getInstanceName()); result = true; for (VolumeObjectTO volumeTo : answer.getVolumeTOs()) { @@ -159,21 +157,21 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot String errMsg = "Creating VM snapshot: " + vmSnapshot.getName() + " failed"; if (answer != null && answer.getDetails() != null) errMsg = errMsg + " due to " + answer.getDetails(); - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } } catch (OperationTimedoutException e) { - s_logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString()); + logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString()); throw new CloudRuntimeException("Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString()); } catch (AgentUnavailableException e) { - s_logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed", e); + logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed", e); throw new CloudRuntimeException("Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString()); } finally { if (!result) { try { vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); } catch (NoTransitionException e1) { - s_logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); + logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); } } } @@ -186,7 +184,7 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot try { vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.ExpungeRequested); } catch (NoTransitionException e) { - s_logger.debug("Failed to change vm snapshot state with event ExpungeRequested"); + logger.debug("Failed to change vm snapshot state with event ExpungeRequested"); throw new CloudRuntimeException("Failed to change vm snapshot state with event ExpungeRequested: " + e.getMessage()); } @@ -214,7 +212,7 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot return true; } else { String errMsg = (answer == null) ? null : answer.getDetails(); - s_logger.error("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + errMsg); + logger.error("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + errMsg); throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + errMsg); } } catch (OperationTimedoutException e) { @@ -247,7 +245,7 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot }); } catch (Exception e) { String errMsg = "Error while process answer: " + as.getClass() + " due to " + e.getMessage(); - s_logger.error(errMsg, e); + logger.error(errMsg, e); throw new CloudRuntimeException(errMsg); } } @@ -367,21 +365,21 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot String errMsg = "Revert VM: " + userVm.getInstanceName() + " to snapshot: " + vmSnapshotVO.getName() + " failed"; if (answer != null && answer.getDetails() != null) errMsg = errMsg + " due to " + answer.getDetails(); - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } } catch (OperationTimedoutException e) { - s_logger.debug("Failed to revert vm snapshot", e); + logger.debug("Failed to revert vm snapshot", e); throw new CloudRuntimeException(e.getMessage()); } catch (AgentUnavailableException e) { - s_logger.debug("Failed to revert vm snapshot", e); + logger.debug("Failed to revert vm snapshot", e); throw new CloudRuntimeException(e.getMessage()); } finally { if (!result) { try { vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); } catch (NoTransitionException e1) { - s_logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); + logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); } } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java index 73a8544f51a..c446d1660db 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java @@ -28,7 +28,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import com.cloud.storage.Storage; -import org.apache.log4j.Logger; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; @@ -54,7 +53,6 @@ import com.cloud.vm.DiskProfile; import com.cloud.vm.VirtualMachineProfile; public abstract class AbstractStoragePoolAllocator extends AdapterBase implements StoragePoolAllocator { - private static final Logger s_logger = Logger.getLogger(AbstractStoragePoolAllocator.class); @Inject StorageManager storageMgr; protected @Inject @@ -116,8 +114,8 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement } List poolIdsByCapacity = _capacityDao.orderHostsByFreeCapacity(clusterId, capacityType); - if (s_logger.isDebugEnabled()) { - s_logger.debug("List of pools in descending order of free capacity: "+ poolIdsByCapacity); + if (logger.isDebugEnabled()) { + logger.debug("List of pools in descending order of free capacity: "+ poolIdsByCapacity); } //now filter the given list of Pools by this ordered list @@ -146,8 +144,8 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement Long clusterId = plan.getClusterId(); List poolIdsByVolCount = _volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("List of pools in ascending order of number of volumes for account id: " + account.getAccountId() + " is: " + poolIdsByVolCount); + if (logger.isDebugEnabled()) { + logger.debug("List of pools in ascending order of number of volumes for account id: " + account.getAccountId() + " is: " + poolIdsByVolCount); } // now filter the given list of Pools by this ordered list @@ -189,12 +187,12 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, DeploymentPlan plan) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking if storage pool is suitable, name: " + pool.getName() + " ,poolId: " + pool.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Checking if storage pool is suitable, name: " + pool.getName() + " ,poolId: " + pool.getId()); } if (avoid.shouldAvoid(pool)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("StoragePool is in avoid set, skipping this pool"); + if (logger.isDebugEnabled()) { + logger.debug("StoragePool is in avoid set, skipping this pool"); } return false; } @@ -203,14 +201,14 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement if (clusterId != null) { ClusterVO cluster = _clusterDao.findById(clusterId); if (!(cluster.getHypervisorType() == dskCh.getHypervisorType())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("StoragePool's Cluster does not have required hypervisorType, skipping this pool"); + if (logger.isDebugEnabled()) { + logger.debug("StoragePool's Cluster does not have required hypervisorType, skipping this pool"); } return false; } } else if (pool.getHypervisor() != null && !pool.getHypervisor().equals(HypervisorType.Any) && !(pool.getHypervisor() == dskCh.getHypervisorType())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("StoragePool does not have required hypervisorType, skipping this pool"); + if (logger.isDebugEnabled()) { + logger.debug("StoragePool does not have required hypervisorType, skipping this pool"); } return false; } @@ -235,13 +233,13 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement //LXC ROOT disks supports NFS and local storage pools only if(!(Storage.StoragePoolType.NetworkFilesystem.equals(poolType) || Storage.StoragePoolType.Filesystem.equals(poolType)) ){ - s_logger.debug("StoragePool does not support LXC ROOT disk, skipping this pool"); + logger.debug("StoragePool does not support LXC ROOT disk, skipping this pool"); return false; } } else if (Volume.Type.DATADISK.equals(volType)){ //LXC DATA disks supports RBD storage pool only if(!Storage.StoragePoolType.RBD.equals(poolType)){ - s_logger.debug("StoragePool does not support LXC DATA disk, skipping this pool"); + logger.debug("StoragePool does not support LXC DATA disk, skipping this pool"); return false; } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/GarbageCollectingStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/GarbageCollectingStoragePoolAllocator.java index 761c3a8eabc..d52971088a0 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/allocator/GarbageCollectingStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/GarbageCollectingStoragePoolAllocator.java @@ -23,7 +23,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -38,7 +37,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = StoragePoolAllocator.class) public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAllocator { - private static final Logger s_logger = Logger.getLogger(GarbageCollectingStoragePoolAllocator.class); StoragePoolAllocator _firstFitStoragePoolAllocator; StoragePoolAllocator _localStoragePoolAllocator; @@ -50,9 +48,9 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl @Override public List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { - s_logger.debug("GarbageCollectingStoragePoolAllocator looking for storage pool"); + logger.debug("GarbageCollectingStoragePoolAllocator looking for storage pool"); if (!_storagePoolCleanupEnabled) { - s_logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped."); + logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped."); return null; } diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java index a4edf76c105..51c7cb66f86 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java @@ -28,7 +28,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.capacity.dao.CapacityDao; @@ -47,7 +46,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Component @Local(value = StoragePoolAllocator.class) public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { - private static final Logger s_logger = Logger.getLogger(LocalStoragePoolAllocator.class); @Inject StoragePoolHostDao _poolHostDao; @@ -64,18 +62,18 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { @Override protected List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { - s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm"); + logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm"); if (!dskCh.useLocalStorage()) { return null; } - if (s_logger.isTraceEnabled()) { + if (logger.isTraceEnabled()) { // Log the pools details that are ignored because they are in disabled state List disabledPools = _storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), ScopeType.HOST); if (disabledPools != null && !disabledPools.isEmpty()) { for (StoragePoolVO pool : disabledPools) { - s_logger.trace("Ignoring pool " + pool + " as it is in disabled state."); + logger.trace("Ignoring pool " + pool + " as it is in disabled state."); } } } @@ -89,7 +87,7 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { if (pool != null && pool.isLocal()) { StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); if (filter(avoid, storagePool, dskCh, plan)) { - s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list"); + logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list"); suitablePools.add(storagePool); } else { avoid.addPool(pool.getId()); @@ -128,8 +126,8 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("LocalStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools"); + if (logger.isDebugEnabled()) { + logger.debug("LocalStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools"); } return suitablePools; diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java index 7a109669ab7..229bc683320 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java @@ -23,7 +23,6 @@ import java.util.Map; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -41,7 +40,6 @@ import com.cloud.vm.VirtualMachineProfile; @Component public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { - private static final Logger s_logger = Logger.getLogger(ZoneWideStoragePoolAllocator.class); @Inject PrimaryDataStoreDao _storagePoolDao; @Inject @@ -50,18 +48,18 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { @Override protected List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { - s_logger.debug("ZoneWideStoragePoolAllocator to find storage pool"); + logger.debug("ZoneWideStoragePoolAllocator to find storage pool"); if (dskCh.useLocalStorage()) { return null; } - if (s_logger.isTraceEnabled()) { + if (logger.isTraceEnabled()) { // Log the pools details that are ignored because they are in disabled state List disabledPools = _storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(), null, null, ScopeType.ZONE); if (disabledPools != null && !disabledPools.isEmpty()) { for (StoragePoolVO pool : disabledPools) { - s_logger.trace("Ignoring pool " + pool + " as it is in disabled state."); + logger.trace("Ignoring pool " + pool + " as it is in disabled state."); } } } @@ -114,8 +112,8 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { long dcId = plan.getDataCenterId(); List poolIdsByVolCount = _volumeDao.listZoneWidePoolIdsByVolumeCount(dcId, account.getAccountId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("List of pools in ascending order of number of volumes for account id: " + account.getAccountId() + " is: " + poolIdsByVolCount); + if (logger.isDebugEnabled()) { + logger.debug("List of pools in ascending order of number of volumes for account id: " + account.getAccountId() + " is: " + poolIdsByVolCount); } // now filter the given list of Pools by this ordered list diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java index 6e36514c491..47ebd27ff89 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java @@ -30,7 +30,6 @@ import java.util.concurrent.CopyOnWriteArrayList; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.StorageProviderResponse; @@ -48,7 +47,6 @@ import com.cloud.utils.component.Registry; @Component public class DataStoreProviderManagerImpl extends ManagerBase implements DataStoreProviderManager, Registry { - private static final Logger s_logger = Logger.getLogger(DataStoreProviderManagerImpl.class); List providers; protected Map providerMap = new ConcurrentHashMap(); @@ -123,18 +121,18 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto String providerName = provider.getName(); if (providerMap.get(providerName) != null) { - s_logger.debug("Did not register data store provider, provider name: " + providerName + " is not unique"); + logger.debug("Did not register data store provider, provider name: " + providerName + " is not unique"); return false; } - s_logger.debug("registering data store provider:" + provider.getName()); + logger.debug("registering data store provider:" + provider.getName()); providerMap.put(providerName, provider); try { boolean registrationResult = provider.configure(copyParams); if (!registrationResult) { providerMap.remove(providerName); - s_logger.debug("Failed to register data store provider: " + providerName); + logger.debug("Failed to register data store provider: " + providerName); return false; } @@ -146,7 +144,7 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto imageStoreProviderMgr.registerDriver(provider.getName(), (ImageStoreDriver)provider.getDataStoreDriver()); } } catch (Exception e) { - s_logger.debug("configure provider failed", e); + logger.debug("configure provider failed", e); providerMap.remove(providerName); return false; } diff --git a/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java index a492e76b991..4b13c100a44 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java @@ -21,7 +21,6 @@ import java.util.Map; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; @@ -36,7 +35,6 @@ import com.cloud.utils.db.UpdateBuilder; @Component public class ObjectInDataStoreDaoImpl extends GenericDaoBase implements ObjectInDataStoreDao { - private static final Logger s_logger = Logger.getLogger(ObjectInDataStoreDaoImpl.class); private SearchBuilder updateStateSearch; @Override @@ -69,7 +67,7 @@ public class ObjectInDataStoreDaoImpl extends GenericDaoBase 0; diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java index 142cd669cb3..25052668355 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java @@ -25,7 +25,6 @@ import java.util.Map; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; @@ -46,7 +45,6 @@ import com.cloud.utils.db.UpdateBuilder; @Component public class SnapshotDataStoreDaoImpl extends GenericDaoBase implements SnapshotDataStoreDao { - private static final Logger s_logger = Logger.getLogger(SnapshotDataStoreDaoImpl.class); private SearchBuilder updateStateSearch; private SearchBuilder storeSearch; private SearchBuilder destroyedSearch; @@ -140,7 +138,7 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase 0; @@ -234,7 +232,7 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase snapshots = listBy(sc); // create an entry for each record, but with empty install path since the content is not yet on region-wide store yet if (snapshots != null) { - s_logger.info("Duplicate " + snapshots.size() + " snapshot cache store records to region store"); + logger.info("Duplicate " + snapshots.size() + " snapshot cache store records to region store"); for (SnapshotDataStoreVO snap : snapshots) { SnapshotDataStoreVO snapStore = findByStoreSnapshot(DataStoreRole.Image, storeId, snap.getSnapshotId()); if (snapStore != null) { - s_logger.info("There is already entry for snapshot " + snap.getSnapshotId() + " on region store " + storeId); + logger.info("There is already entry for snapshot " + snap.getSnapshotId() + " on region store " + storeId); continue; } - s_logger.info("Persisting an entry for snapshot " + snap.getSnapshotId() + " on region store " + storeId); + logger.info("Persisting an entry for snapshot " + snap.getSnapshotId() + " on region store " + storeId); SnapshotDataStoreVO ss = new SnapshotDataStoreVO(); ss.setSnapshotId(snap.getSnapshotId()); ss.setDataStoreId(storeId); @@ -377,7 +375,7 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase snaps = listBy(sc); if (snaps != null) { - s_logger.info("Update to cache store role for " + snaps.size() + " entries in snapshot_store_ref"); + logger.info("Update to cache store role for " + snaps.size() + " entries in snapshot_store_ref"); for (SnapshotDataStoreVO snap : snaps) { snap.setRole(DataStoreRole.ImageCache); update(snap.getId(), snap); diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java index 066503b0208..7f63261abab 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java @@ -26,7 +26,6 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -56,7 +55,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component public class TemplateDataStoreDaoImpl extends GenericDaoBase implements TemplateDataStoreDao { - private static final Logger s_logger = Logger.getLogger(TemplateDataStoreDaoImpl.class); private SearchBuilder updateStateSearch; private SearchBuilder storeSearch; private SearchBuilder cacheSearch; @@ -174,7 +172,7 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase 0; @@ -457,7 +455,7 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase tmpls = listBy(sc); // create an entry for each template record, but with empty install path since the content is not yet on region-wide store yet if (tmpls != null) { - s_logger.info("Duplicate " + tmpls.size() + " template cache store records to region store"); + logger.info("Duplicate " + tmpls.size() + " template cache store records to region store"); for (TemplateDataStoreVO tmpl : tmpls) { long templateId = tmpl.getTemplateId(); VMTemplateVO template = _tmpltDao.findById(templateId); @@ -465,15 +463,15 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase tmpls = listBy(sc); if (tmpls != null) { - s_logger.info("Update to cache store role for " + tmpls.size() + " entries in template_store_ref"); + logger.info("Update to cache store role for " + tmpls.size() + " entries in template_store_ref"); for (TemplateDataStoreVO tmpl : tmpls) { tmpl.setDataStoreRole(DataStoreRole.ImageCache); update(tmpl.getId(), tmpl); @@ -537,7 +535,7 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase implements VolumeDataStoreDao { - private static final Logger s_logger = Logger.getLogger(VolumeDataStoreDaoImpl.class); private SearchBuilder updateStateSearch; private SearchBuilder volumeSearch; private SearchBuilder storeSearch; @@ -141,7 +139,7 @@ public class VolumeDataStoreDaoImpl extends GenericDaoBase 0; @@ -281,14 +279,14 @@ public class VolumeDataStoreDaoImpl extends GenericDaoBase implements TemplatePrimaryDataStoreDao { - private static final Logger s_logger = Logger.getLogger(TemplatePrimaryDataStoreDaoImpl.class); protected final SearchBuilder updateSearchBuilder; public TemplatePrimaryDataStoreDaoImpl() { @@ -81,7 +79,7 @@ public class TemplatePrimaryDataStoreDaoImpl extends GenericDaoBase 0; diff --git a/framework/cluster/src/com/cloud/cluster/ClusterFenceManagerImpl.java b/framework/cluster/src/com/cloud/cluster/ClusterFenceManagerImpl.java index 3860ad45514..c921f7f8446 100644 --- a/framework/cluster/src/com/cloud/cluster/ClusterFenceManagerImpl.java +++ b/framework/cluster/src/com/cloud/cluster/ClusterFenceManagerImpl.java @@ -23,7 +23,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.component.ManagerBase; @@ -31,7 +30,6 @@ import com.cloud.utils.component.ManagerBase; @Component @Local(value = {ClusterFenceManager.class}) public class ClusterFenceManagerImpl extends ManagerBase implements ClusterFenceManager, ClusterManagerListener { - private static final Logger s_logger = Logger.getLogger(ClusterFenceManagerImpl.class); @Inject ClusterManager _clusterMgr; @@ -52,7 +50,7 @@ public class ClusterFenceManagerImpl extends ManagerBase implements ClusterFence @Override public void onManagementNodeIsolated() { - s_logger.error("Received node isolation notification, will perform self-fencing and shut myself down"); + logger.error("Received node isolation notification, will perform self-fencing and shut myself down"); System.exit(SELF_FENCING_EXIT_CODE); } } diff --git a/framework/cluster/src/com/cloud/cluster/ClusterManagerImpl.java b/framework/cluster/src/com/cloud/cluster/ClusterManagerImpl.java index ecf3aacb0b8..8dad8ff3507 100644 --- a/framework/cluster/src/com/cloud/cluster/ClusterManagerImpl.java +++ b/framework/cluster/src/com/cloud/cluster/ClusterManagerImpl.java @@ -46,7 +46,6 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.utils.identity.ManagementServerNode; -import org.apache.log4j.Logger; import com.cloud.cluster.dao.ManagementServerHostDao; import com.cloud.cluster.dao.ManagementServerHostPeerDao; @@ -70,7 +69,6 @@ import com.cloud.utils.net.NetUtils; @Local(value = {ClusterManager.class}) public class ClusterManagerImpl extends ManagerBase implements ClusterManager, Configurable { - private static final Logger s_logger = Logger.getLogger(ClusterManagerImpl.class); private static final int EXECUTOR_SHUTDOWN_TIMEOUT = 1000; // 1 second private static final int DEFAULT_OUTGOING_WORKERS = 5; @@ -167,7 +165,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } for (final ClusterServiceRequestPdu pdu : candidates) { - s_logger.warn("Cancel cluster request PDU to peer: " + strPeer + ", pdu: " + pdu.getJsonPackage()); + logger.warn("Cancel cluster request PDU to peer: " + strPeer + ", pdu: " + pdu.getJsonPackage()); synchronized (pdu) { pdu.notifyAll(); } @@ -251,13 +249,13 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C try { peerService = getPeerService(pdu.getDestPeer()); } catch (final RemoteException e) { - s_logger.error("Unable to get cluster service on peer : " + pdu.getDestPeer()); + logger.error("Unable to get cluster service on peer : " + pdu.getDestPeer()); } if (peerService != null) { try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cluster PDU " + getSelfPeerName() + " -> " + pdu.getDestPeer() + ". agent: " + pdu.getAgentId() + ", pdu seq: " + + if (logger.isDebugEnabled()) { + logger.debug("Cluster PDU " + getSelfPeerName() + " -> " + pdu.getDestPeer() + ". agent: " + pdu.getAgentId() + ", pdu seq: " + pdu.getSequenceId() + ", pdu ack seq: " + pdu.getAckSequenceId() + ", json: " + pdu.getJsonPackage()); } @@ -267,8 +265,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C final String strResult = peerService.execute(pdu); profiler.stop(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cluster PDU " + getSelfPeerName() + " -> " + pdu.getDestPeer() + " completed. time: " + + if (logger.isDebugEnabled()) { + logger.debug("Cluster PDU " + getSelfPeerName() + " -> " + pdu.getDestPeer() + " completed. time: " + profiler.getDurationInMillis() + "ms. agent: " + pdu.getAgentId() + ", pdu seq: " + pdu.getSequenceId() + ", pdu ack seq: " + pdu.getAckSequenceId() + ", json: " + pdu.getJsonPackage()); } @@ -279,15 +277,15 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } catch (final RemoteException e) { invalidatePeerService(pdu.getDestPeer()); - if (s_logger.isInfoEnabled()) { - s_logger.info("Exception on remote execution, peer: " + pdu.getDestPeer() + ", iteration: " + i + ", exception message :" + + if (logger.isInfoEnabled()) { + logger.info("Exception on remote execution, peer: " + pdu.getDestPeer() + ", iteration: " + i + ", exception message :" + e.getMessage()); } } } } } catch (final Throwable e) { - s_logger.error("Unexcpeted exception: ", e); + logger.error("Unexcpeted exception: ", e); } } } @@ -311,7 +309,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C requestPdu.notifyAll(); } } else { - s_logger.warn("Original request has already been cancelled. pdu: " + pdu.getJsonPackage()); + logger.warn("Original request has already been cancelled. pdu: " + pdu.getJsonPackage()); } } else { String result = _dispatcher.dispatch(pdu); @@ -333,7 +331,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } }); } catch (final Throwable e) { - s_logger.error("Unexcpeted exception: ", e); + logger.error("Unexcpeted exception: ", e); } } } @@ -366,12 +364,12 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C continue; // Skip myself. } try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Forwarding " + cmds + " to " + peer.getMsid()); + if (logger.isDebugEnabled()) { + logger.debug("Forwarding " + cmds + " to " + peer.getMsid()); } executeAsync(peerName, agentId, cmds, true); } catch (final Exception e) { - s_logger.warn("Caught exception while talkign to " + peer.getMsid()); + logger.warn("Caught exception while talkign to " + peer.getMsid()); } } } @@ -388,8 +386,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C @Override public String execute(final String strPeer, final long agentId, final String cmds, final boolean stopOnError) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(getSelfPeerName() + " -> " + strPeer + "." + agentId + " " + cmds); + if (logger.isDebugEnabled()) { + logger.debug(getSelfPeerName() + " -> " + strPeer + "." + agentId + " " + cmds); } final ClusterServiceRequestPdu pdu = new ClusterServiceRequestPdu(); @@ -408,8 +406,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } } - if (s_logger.isDebugEnabled()) { - s_logger.debug(getSelfPeerName() + " -> " + strPeer + "." + agentId + " completed. result: " + pdu.getResponseResult()); + if (logger.isDebugEnabled()) { + logger.debug(getSelfPeerName() + " -> " + strPeer + "." + agentId + " completed. result: " + pdu.getResponseResult()); } if (pdu.getResponseResult() != null && pdu.getResponseResult().length() > 0) { @@ -438,7 +436,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C // Note : we don't check duplicates synchronized (_listeners) { - s_logger.info("register cluster listener " + listener.getClass()); + logger.info("register cluster listener " + listener.getClass()); _listeners.add(listener); } @@ -447,18 +445,18 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C @Override public void unregisterListener(final ClusterManagerListener listener) { synchronized (_listeners) { - s_logger.info("unregister cluster listener " + listener.getClass()); + logger.info("unregister cluster listener " + listener.getClass()); _listeners.remove(listener); } } public void notifyNodeJoined(final List nodeList) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Notify management server node join to listeners."); + if (logger.isDebugEnabled()) { + logger.debug("Notify management server node join to listeners."); for (final ManagementServerHostVO mshost : nodeList) { - s_logger.debug("Joining node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); + logger.debug("Joining node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); } } @@ -472,13 +470,13 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } public void notifyNodeLeft(final List nodeList) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Notify management server node left to listeners."); + if (logger.isDebugEnabled()) { + logger.debug("Notify management server node left to listeners."); } for (final ManagementServerHostVO mshost : nodeList) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Leaving node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); + if (logger.isDebugEnabled()) { + logger.debug("Leaving node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); } cancelClusterRequestToPeer(String.valueOf(mshost.getMsid())); } @@ -493,8 +491,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } public void notifyNodeIsolated() { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Notify management server node isolation to listeners"); + if (logger.isDebugEnabled()) { + logger.debug("Notify management server node isolation to listeners"); } synchronized (_listeners) { @@ -549,16 +547,16 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C profilerHeartbeatUpdate.start(); txn.transitToUserManagedConnection(getHeartbeatConnection()); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Cluster manager heartbeat update, id:" + _mshostId); + if (logger.isTraceEnabled()) { + logger.trace("Cluster manager heartbeat update, id:" + _mshostId); } _mshostDao.update(_mshostId, _runId, DateUtil.currentGMTTime()); profilerHeartbeatUpdate.stop(); profilerPeerScan.start(); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Cluster manager peer-scan, id:" + _mshostId); + if (logger.isTraceEnabled()) { + logger.trace("Cluster manager peer-scan, id:" + _mshostId); } if (!_peerScanInited) { @@ -573,18 +571,18 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C profiler.stop(); if (profiler.getDurationInMillis() >= HeartbeatInterval.value()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Management server heartbeat takes too long to finish. profiler: " + profiler.toString() + ", profilerHeartbeatUpdate: " + + if (logger.isDebugEnabled()) { + logger.debug("Management server heartbeat takes too long to finish. profiler: " + profiler.toString() + ", profilerHeartbeatUpdate: " + profilerHeartbeatUpdate.toString() + ", profilerPeerScan: " + profilerPeerScan.toString()); } } } } catch (final CloudRuntimeException e) { - s_logger.error("Runtime DB exception ", e.getCause()); + logger.error("Runtime DB exception ", e.getCause()); if (e.getCause() instanceof ClusterInvalidSessionException) { - s_logger.error("Invalid cluster session found, fence it"); + logger.error("Invalid cluster session found, fence it"); queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeIsolated)); } @@ -594,7 +592,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } catch (final ActiveFencingException e) { queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeIsolated)); } catch (final Throwable e) { - s_logger.error("Unexpected exception in cluster heartbeat", e); + logger.error("Unexpected exception in cluster heartbeat", e); if (isRootCauseConnectionRelated(e.getCause())) { invalidHeartbeatConnection(); } @@ -633,7 +631,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C if (conn != null) { _heartbeatConnection.reset(conn); } else { - s_logger.error("DB communication problem detected, fence it"); + logger.error("DB communication problem detected, fence it"); queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeIsolated)); } // The stand-alone connection does not have to be closed here because there will be another reference to it. @@ -666,11 +664,11 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C profiler.stop(); if (profiler.getDurationInMillis() > 1000) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Notifying management server join event took " + profiler.getDurationInMillis() + " ms"); + if (logger.isDebugEnabled()) { + logger.debug("Notifying management server join event took " + profiler.getDurationInMillis() + " ms"); } } else { - s_logger.warn("Notifying management server join event took " + profiler.getDurationInMillis() + " ms"); + logger.warn("Notifying management server join event took " + profiler.getDurationInMillis() + " ms"); } } break; @@ -684,11 +682,11 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C profiler.stop(); if (profiler.getDurationInMillis() > 1000) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Notifying management server leave event took " + profiler.getDurationInMillis() + " ms"); + if (logger.isDebugEnabled()) { + logger.debug("Notifying management server leave event took " + profiler.getDurationInMillis() + " ms"); } } else { - s_logger.warn("Notifying management server leave event took " + profiler.getDurationInMillis() + " ms"); + logger.warn("Notifying management server leave event took " + profiler.getDurationInMillis() + " ms"); } } break; @@ -703,7 +701,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } } catch (final Throwable e) { - s_logger.warn("Unexpected exception during cluster notification. ", e); + logger.warn("Unexpected exception during cluster notification. ", e); } } @@ -770,18 +768,18 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C if (orphanList.size() > 0) { for (final Long orphanMsid : orphanList) { // construct fake ManagementServerHostVO based on orphan MSID - s_logger.info("Add orphan management server msid found in host table to initial clustering notification, orphan msid: " + orphanMsid); + logger.info("Add orphan management server msid found in host table to initial clustering notification, orphan msid: " + orphanMsid); inactiveList.add(new ManagementServerHostVO(orphanMsid, 0, "orphan", 0, new Date())); } } else { - s_logger.info("We are good, no orphan management server msid in host table is found"); + logger.info("We are good, no orphan management server msid in host table is found"); } if (inactiveList.size() > 0) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Found " + inactiveList.size() + " inactive management server node based on timestamp"); + if (logger.isInfoEnabled()) { + logger.info("Found " + inactiveList.size() + " inactive management server node based on timestamp"); for (final ManagementServerHostVO host : inactiveList) { - s_logger.info("management server node msid: " + host.getMsid() + ", name: " + host.getName() + ", service ip: " + host.getServiceIP() + + logger.info("management server node msid: " + host.getMsid() + ", name: " + host.getName() + ", service ip: " + host.getServiceIP() + ", version: " + host.getVersion()); } } @@ -789,7 +787,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C final List downHostList = new ArrayList(); for (final ManagementServerHostVO host : inactiveList) { if (!pingManagementNode(host)) { - s_logger.warn("Management node " + host.getId() + " is detected inactive by timestamp and also not pingable"); + logger.warn("Management node " + host.getId() + " is detected inactive by timestamp and also not pingable"); downHostList.add(host); } } @@ -798,7 +796,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeRemoved, downHostList)); } } else { - s_logger.info("No inactive management server node found"); + logger.info("No inactive management server node found"); } } @@ -823,7 +821,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C if (_mshostPeerDao.countStateSeenInPeers(_mshostId, _runId, ManagementServerHost.State.Down) > 0) { final String msg = "We have detected that at least one management server peer reports that this management server is down, perform active fencing to avoid split-brain situation"; - s_logger.error(msg); + logger.error(msg); throw new ActiveFencingException(msg); } @@ -833,24 +831,24 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C final ManagementServerHostVO current = getInListById(entry.getKey(), currentList); if (current == null) { if (entry.getKey().longValue() != _mshostId.longValue()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Detected management node left, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP()); + if (logger.isDebugEnabled()) { + logger.debug("Detected management node left, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP()); } removedNodeList.add(entry.getValue()); } } else { if (current.getRunid() == 0) { if (entry.getKey().longValue() != _mshostId.longValue()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Detected management node left because of invalidated session, id:" + entry.getKey() + ", nodeIP:" + + if (logger.isDebugEnabled()) { + logger.debug("Detected management node left because of invalidated session, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP()); } invalidatedNodeList.add(entry.getValue()); } } else { if (entry.getValue().getRunid() != current.getRunid()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Detected management node left and rejoined quickly, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP()); + if (logger.isDebugEnabled()) { + logger.debug("Detected management node left and rejoined quickly, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP()); } entry.getValue().setRunid(current.getRunid()); @@ -870,7 +868,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C try { JmxUtil.unregisterMBean("ClusterManager", "Node " + mshost.getId()); } catch (final Exception e) { - s_logger.warn("Unable to deregiester cluster node from JMX monitoring due to exception " + e.toString()); + logger.warn("Unable to deregiester cluster node from JMX monitoring due to exception " + e.toString()); } } @@ -885,15 +883,15 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C while (it.hasNext()) { final ManagementServerHostVO mshost = it.next(); if (!pingManagementNode(mshost)) { - s_logger.warn("Management node " + mshost.getId() + " is detected inactive by timestamp and also not pingable"); + logger.warn("Management node " + mshost.getId() + " is detected inactive by timestamp and also not pingable"); _activePeers.remove(mshost.getId()); try { JmxUtil.unregisterMBean("ClusterManager", "Node " + mshost.getId()); } catch (final Exception e) { - s_logger.warn("Unable to deregiester cluster node from JMX monitoring due to exception " + e.toString()); + logger.warn("Unable to deregiester cluster node from JMX monitoring due to exception " + e.toString()); } } else { - s_logger.info("Management node " + mshost.getId() + " is detected inactive by timestamp but is pingable"); + logger.info("Management node " + mshost.getId() + " is detected inactive by timestamp but is pingable"); it.remove(); } } @@ -908,15 +906,15 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C if (!_activePeers.containsKey(mshost.getId())) { _activePeers.put(mshost.getId(), mshost); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Detected management node joined, id:" + mshost.getId() + ", nodeIP:" + mshost.getServiceIP()); + if (logger.isDebugEnabled()) { + logger.debug("Detected management node joined, id:" + mshost.getId() + ", nodeIP:" + mshost.getServiceIP()); } newNodeList.add(mshost); try { JmxUtil.registerMBean("ClusterManager", "Node " + mshost.getId(), new ClusterManagerMBeanImpl(this, mshost)); } catch (final Exception e) { - s_logger.warn("Unable to regiester cluster node into JMX monitoring due to exception " + ExceptionUtil.toString(e)); + logger.warn("Unable to regiester cluster node into JMX monitoring due to exception " + ExceptionUtil.toString(e)); } } } @@ -928,8 +926,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C profiler.stop(); if (profiler.getDurationInMillis() >= HeartbeatInterval.value()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Peer scan takes too long to finish. profiler: " + profiler.toString() + ", profilerQueryActiveList: " + + if (logger.isDebugEnabled()) { + logger.debug("Peer scan takes too long to finish. profiler: " + profiler.toString() + ", profilerQueryActiveList: " + profilerQueryActiveList.toString() + ", profilerSyncClusterInfo: " + profilerSyncClusterInfo.toString() + ", profilerInvalidatedNodeList: " + profilerInvalidatedNodeList.toString() + ", profilerRemovedList: " + profilerRemovedList.toString()); } @@ -948,8 +946,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C @Override @DB public boolean start() { - if (s_logger.isInfoEnabled()) { - s_logger.info("Starting Cluster manager, msid : " + _msId); + if (logger.isInfoEnabled()) { + logger.info("Starting Cluster manager, msid : " + _msId); } final ManagementServerHostVO mshost = Transaction.execute(new TransactionCallback() { @@ -973,14 +971,14 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C mshost.setAlertCount(0); mshost.setState(ManagementServerHost.State.Up); _mshostDao.persist(mshost); - if (s_logger.isInfoEnabled()) { - s_logger.info("New instance of management server msid " + _msId + ", runId " + _runId + " is being started"); + if (logger.isInfoEnabled()) { + logger.info("New instance of management server msid " + _msId + ", runId " + _runId + " is being started"); } } else { _mshostDao.update(mshost.getId(), _runId, NetUtils.getHostName(), version, _clusterNodeIP, _currentServiceAdapter.getServicePort(), DateUtil.currentGMTTime()); - if (s_logger.isInfoEnabled()) { - s_logger.info("Management server " + _msId + ", runId " + _runId + " is being started"); + if (logger.isInfoEnabled()) { + logger.info("Management server " + _msId + ", runId " + _runId + " is being started"); } } @@ -989,8 +987,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C }); _mshostId = mshost.getId(); - if (s_logger.isInfoEnabled()) { - s_logger.info("Management server (host id : " + _mshostId + ") is being started at " + _clusterNodeIP + ":" + _currentServiceAdapter.getServicePort()); + if (logger.isInfoEnabled()) { + logger.info("Management server (host id : " + _mshostId + ") is being started at " + _clusterNodeIP + ":" + _currentServiceAdapter.getServicePort()); } _mshostPeerDao.clearPeerInfo(_mshostId); @@ -999,8 +997,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C _heartbeatScheduler.scheduleAtFixedRate(getHeartbeatTask(), HeartbeatInterval.value(), HeartbeatInterval.value(), TimeUnit.MILLISECONDS); _notificationExecutor.submit(getNotificationTask()); - if (s_logger.isInfoEnabled()) { - s_logger.info("Cluster manager was started successfully"); + if (logger.isInfoEnabled()) { + logger.info("Cluster manager was started successfully"); } return true; @@ -1009,8 +1007,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C @Override @DB public boolean stop() { - if (s_logger.isInfoEnabled()) { - s_logger.info("Stopping Cluster manager, msid : " + _msId); + if (logger.isInfoEnabled()) { + logger.info("Stopping Cluster manager, msid : " + _msId); } if (_mshostId != null) { @@ -1028,8 +1026,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } catch (final InterruptedException e) { } - if (s_logger.isInfoEnabled()) { - s_logger.info("Cluster manager is stopped"); + if (logger.isInfoEnabled()) { + logger.info("Cluster manager is stopped"); } return true; @@ -1037,8 +1035,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C @Override public boolean configure(final String name, final Map params) throws ConfigurationException { - if (s_logger.isInfoEnabled()) { - s_logger.info("Start configuring cluster manager : " + name); + if (logger.isInfoEnabled()) { + logger.info("Start configuring cluster manager : " + name); } final Properties dbProps = DbProperties.getDbProperties(); @@ -1048,8 +1046,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } _clusterNodeIP = _clusterNodeIP.trim(); - if (s_logger.isInfoEnabled()) { - s_logger.info("Cluster node IP : " + _clusterNodeIP); + if (logger.isInfoEnabled()) { + logger.info("Cluster node IP : " + _clusterNodeIP); } if (!NetUtils.isLocalAddress(_clusterNodeIP)) { @@ -1074,8 +1072,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C checkConflicts(); - if (s_logger.isInfoEnabled()) { - s_logger.info("Cluster manager is configured."); + if (logger.isInfoEnabled()) { + logger.info("Cluster manager is configured."); } return true; } @@ -1133,7 +1131,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C final String targetIp = mshost.getServiceIP(); if ("127.0.0.1".equals(targetIp) || "0.0.0.0".equals(targetIp)) { - s_logger.info("ping management node cluster service can not be performed on self"); + logger.info("ping management node cluster service can not be performed on self"); return false; } @@ -1141,7 +1139,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C while (--retry > 0) { SocketChannel sch = null; try { - s_logger.info("Trying to connect to " + targetIp); + logger.info("Trying to connect to " + targetIp); sch = SocketChannel.open(); sch.configureBlocking(true); sch.socket().setSoTimeout(5000); @@ -1151,7 +1149,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C return true; } catch (final IOException e) { if (e instanceof ConnectException) { - s_logger.error("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " due to ConnectException", e); + logger.error("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " due to ConnectException", e); return false; } } finally { @@ -1169,7 +1167,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } } - s_logger.error("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " after retries"); + logger.error("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " after retries"); return false; } @@ -1186,25 +1184,25 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C if ("127.0.0.1".equals(_clusterNodeIP)) { if (pingManagementNode(peer.getMsid())) { final String msg = "Detected another management node with localhost IP is already running, please check your cluster configuration"; - s_logger.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } else { final String msg = "Detected another management node with localhost IP is considered as running in DB, however it is not pingable, we will continue cluster initialization with this management server node"; - s_logger.info(msg); + logger.info(msg); } } else { if (pingManagementNode(peer.getMsid())) { final String msg = "Detected that another management node with the same IP " + peer.getServiceIP() + " is already running, please check your cluster configuration"; - s_logger.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } else { final String msg = "Detected that another management node with the same IP " + peer.getServiceIP() + " is considered as running in DB, however it is not pingable, we will continue cluster initialization with this management server node"; - s_logger.info(msg); + logger.info(msg); } } } diff --git a/framework/cluster/src/com/cloud/cluster/ClusterServiceServletAdapter.java b/framework/cluster/src/com/cloud/cluster/ClusterServiceServletAdapter.java index 7451b5f4226..937ef4a6249 100644 --- a/framework/cluster/src/com/cloud/cluster/ClusterServiceServletAdapter.java +++ b/framework/cluster/src/com/cloud/cluster/ClusterServiceServletAdapter.java @@ -23,7 +23,6 @@ import java.util.Properties; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.config.ConfigDepot; import com.cloud.cluster.dao.ManagementServerHostDao; @@ -34,7 +33,6 @@ import com.cloud.utils.db.DbProperties; public class ClusterServiceServletAdapter extends AdapterBase implements ClusterServiceAdapter { - private static final Logger s_logger = Logger.getLogger(ClusterServiceServletAdapter.class); private static final int DEFAULT_SERVICE_PORT = 9090; private static final int DEFAULT_REQUEST_TIMEOUT = 300; // 300 seconds @@ -59,7 +57,7 @@ public class ClusterServiceServletAdapter extends AdapterBase implements Cluster try { init(); } catch (ConfigurationException e) { - s_logger.error("Unable to init ClusterServiceServletAdapter"); + logger.error("Unable to init ClusterServiceServletAdapter"); throw new RemoteException("Unable to init ClusterServiceServletAdapter"); } @@ -75,7 +73,7 @@ public class ClusterServiceServletAdapter extends AdapterBase implements Cluster try { init(); } catch (ConfigurationException e) { - s_logger.error("Unable to init ClusterServiceServletAdapter"); + logger.error("Unable to init ClusterServiceServletAdapter"); return null; } @@ -126,7 +124,7 @@ public class ClusterServiceServletAdapter extends AdapterBase implements Cluster Properties dbProps = DbProperties.getDbProperties(); _clusterServicePort = NumbersUtil.parseInt(dbProps.getProperty("cluster.servlet.port"), DEFAULT_SERVICE_PORT); - if (s_logger.isInfoEnabled()) - s_logger.info("Cluster servlet port : " + _clusterServicePort); + if (logger.isInfoEnabled()) + logger.info("Cluster servlet port : " + _clusterServicePort); } } diff --git a/framework/cluster/src/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java b/framework/cluster/src/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java index ce4667ecc00..3fe94c28218 100644 --- a/framework/cluster/src/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java +++ b/framework/cluster/src/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java @@ -26,7 +26,6 @@ import java.util.TimeZone; import javax.ejb.Local; -import org.apache.log4j.Logger; import com.cloud.cluster.ClusterInvalidSessionException; import com.cloud.cluster.ManagementServerHost; @@ -43,7 +42,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Local(value = {ManagementServerHostDao.class}) public class ManagementServerHostDaoImpl extends GenericDaoBase implements ManagementServerHostDao { - private static final Logger s_logger = Logger.getLogger(ManagementServerHostDaoImpl.class); private final SearchBuilder MsIdSearch; private final SearchBuilder ActiveSearch; @@ -100,7 +98,7 @@ public class ManagementServerHostDaoImpl extends GenericDaoBase implements ManagementServerHostPeerDao { - private static final Logger s_logger = Logger.getLogger(ManagementServerHostPeerDaoImpl.class); private final SearchBuilder ClearPeerSearch; private final SearchBuilder FindForUpdateSearch; @@ -87,7 +85,7 @@ public class ManagementServerHostPeerDaoImpl extends GenericDaoBase implements ConfigurationDao { - private static final Logger s_logger = Logger.getLogger(ConfigurationDaoImpl.class); private Map _configs = null; private boolean _premium; @@ -148,7 +146,7 @@ public class ConfigurationDaoImpl extends GenericDaoBase extends ComponentLifecycleBase implements GenericDao, ComponentMethodInterceptable { - private final static Logger s_logger = Logger.getLogger(GenericDaoBase.class); protected final static TimeZone s_gmtTimeZone = TimeZone.getTimeZone("GMT"); @@ -255,26 +253,26 @@ public abstract class GenericDaoBase extends Compone _searchEnhancer.setSuperclass(_entityBeanType); _searchEnhancer.setCallback(new UpdateBuilder(this)); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Select SQL: " + _partialSelectSql.first().toString()); - s_logger.trace("Remove SQL: " + (_removeSql != null ? _removeSql.first() : "No remove sql")); - s_logger.trace("Select by Id SQL: " + _selectByIdSql); - s_logger.trace("Table References: " + _tables); - s_logger.trace("Insert SQLs:"); + if (logger.isTraceEnabled()) { + logger.trace("Select SQL: " + _partialSelectSql.first().toString()); + logger.trace("Remove SQL: " + (_removeSql != null ? _removeSql.first() : "No remove sql")); + logger.trace("Select by Id SQL: " + _selectByIdSql); + logger.trace("Table References: " + _tables); + logger.trace("Insert SQLs:"); for (final Pair insertSql : _insertSqls) { - s_logger.trace(insertSql.first()); + logger.trace(insertSql.first()); } - s_logger.trace("Delete SQLs"); + logger.trace("Delete SQLs"); for (final Pair deletSql : _deleteSqls) { - s_logger.trace(deletSql.first()); + logger.trace(deletSql.first()); } - s_logger.trace("Collection SQLs"); + logger.trace("Collection SQLs"); for (Attribute attr : _ecAttributes) { EcInfo info = (EcInfo)attr.attache; - s_logger.trace(info.insertSql); - s_logger.trace(info.selectSql); + logger.trace(info.insertSql); + logger.trace(info.selectSql); } } @@ -413,7 +411,7 @@ public abstract class GenericDaoBase extends Compone } } - if (s_logger.isDebugEnabled() && lock != null) { + if (logger.isDebugEnabled() && lock != null) { txn.registerLock(pstmt.toString()); } final ResultSet rs = pstmt.executeQuery(); @@ -778,8 +776,8 @@ public abstract class GenericDaoBase extends Compone } } - if (s_logger.isTraceEnabled()) { - s_logger.trace("join search statement is " + pstmt); + if (logger.isTraceEnabled()) { + logger.trace("join search statement is " + pstmt); } return count; } @@ -1597,7 +1595,7 @@ public abstract class GenericDaoBase extends Compone try { _cache.put(new Element(_idField.get(entity), entity)); } catch (final Exception e) { - s_logger.debug("Can't put it in the cache", e); + logger.debug("Can't put it in the cache", e); } } @@ -1619,7 +1617,7 @@ public abstract class GenericDaoBase extends Compone try { _cache.put(new Element(_idField.get(entity), entity)); } catch (final Exception e) { - s_logger.debug("Can't put it in the cache", e); + logger.debug("Can't put it in the cache", e); } } @@ -1798,7 +1796,7 @@ public abstract class GenericDaoBase extends Compone final int idle = NumbersUtil.parseInt((String)params.get("cache.time.to.idle"), 300); _cache = new Cache(getName(), maxElements, false, live == -1, live == -1 ? Integer.MAX_VALUE : live, idle); cm.addCache(_cache); - s_logger.info("Cache created: " + _cache.toString()); + logger.info("Cache created: " + _cache.toString()); } else { _cache = null; } diff --git a/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java b/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java index 26288d53b23..22556e8f236 100644 --- a/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java +++ b/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java @@ -21,7 +21,6 @@ import java.sql.SQLException; import java.util.Date; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; import org.apache.cloudstack.jobs.JobInfo; @@ -35,7 +34,6 @@ import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.TransactionLegacy; public class AsyncJobDaoImpl extends GenericDaoBase implements AsyncJobDao { - private static final Logger s_logger = Logger.getLogger(AsyncJobDaoImpl.class.getName()); private final SearchBuilder pendingAsyncJobSearch; private final SearchBuilder pendingAsyncJobsSearch; @@ -105,7 +103,7 @@ public class AsyncJobDaoImpl extends GenericDaoBase implements List l = listIncludingRemovedBy(sc); if (l != null && l.size() > 0) { if (l.size() > 1) { - s_logger.warn("Instance " + instanceType + "-" + instanceId + " has multiple pending async-job"); + logger.warn("Instance " + instanceType + "-" + instanceId + " has multiple pending async-job"); } return l.get(0); @@ -192,9 +190,9 @@ public class AsyncJobDaoImpl extends GenericDaoBase implements pstmt.setLong(6, msid); pstmt.execute(); } catch (SQLException e) { - s_logger.warn("Unable to reset job status for management server " + msid, e); + logger.warn("Unable to reset job status for management server " + msid, e); } catch (Throwable e) { - s_logger.warn("Unable to reset job status for management server " + msid, e); + logger.warn("Unable to reset job status for management server " + msid, e); } } diff --git a/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/AsyncJobJoinMapDaoImpl.java b/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/AsyncJobJoinMapDaoImpl.java index d70864c755b..da7ba36c919 100644 --- a/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/AsyncJobJoinMapDaoImpl.java +++ b/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/AsyncJobJoinMapDaoImpl.java @@ -24,7 +24,6 @@ import java.util.Date; import java.util.List; import java.util.TimeZone; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.jobs.impl.AsyncJobJoinMapVO; import org.apache.cloudstack.jobs.JobInfo; @@ -39,7 +38,6 @@ import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.exception.CloudRuntimeException; public class AsyncJobJoinMapDaoImpl extends GenericDaoBase implements AsyncJobJoinMapDao { - public static final Logger s_logger = Logger.getLogger(AsyncJobJoinMapDaoImpl.class); private final SearchBuilder RecordSearch; private final SearchBuilder RecordSearchByOwner; @@ -202,7 +200,7 @@ public class AsyncJobJoinMapDaoImpl extends GenericDaoBase implements SyncQueueDao { - private static final Logger s_logger = Logger.getLogger(SyncQueueDaoImpl.class.getName()); SearchBuilder TypeIdSearch = createSearchBuilder(); @@ -60,9 +58,9 @@ public class SyncQueueDaoImpl extends GenericDaoBase implemen pstmt.setString(4, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), dt)); pstmt.execute(); } catch (SQLException e) { - s_logger.warn("Unable to create sync queue " + syncObjType + "-" + syncObjId + ":" + e.getMessage(), e); + logger.warn("Unable to create sync queue " + syncObjType + "-" + syncObjId + ":" + e.getMessage(), e); } catch (Throwable e) { - s_logger.warn("Unable to create sync queue " + syncObjType + "-" + syncObjId + ":" + e.getMessage(), e); + logger.warn("Unable to create sync queue " + syncObjType + "-" + syncObjId + ":" + e.getMessage(), e); } } diff --git a/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/SyncQueueItemDaoImpl.java b/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/SyncQueueItemDaoImpl.java index 29c3f1b289f..756cbb7efb0 100644 --- a/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/SyncQueueItemDaoImpl.java +++ b/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/SyncQueueItemDaoImpl.java @@ -25,7 +25,6 @@ import java.util.Date; import java.util.List; import java.util.TimeZone; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.jobs.impl.SyncQueueItemVO; @@ -42,7 +41,6 @@ import com.cloud.utils.db.TransactionLegacy; @DB public class SyncQueueItemDaoImpl extends GenericDaoBase implements SyncQueueItemDao { - private static final Logger s_logger = Logger.getLogger(SyncQueueItemDaoImpl.class); final GenericSearchBuilder queueIdSearch; final GenericSearchBuilder queueActiveItemSearch; @@ -116,9 +114,9 @@ public class SyncQueueItemDaoImpl extends GenericDaoBase l.add(item); } } catch (SQLException e) { - s_logger.error("Unexpected sql exception, ", e); + logger.error("Unexpected sql exception, ", e); } catch (Throwable e) { - s_logger.error("Unexpected exception, ", e); + logger.error("Unexpected exception, ", e); } return l; } diff --git a/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java b/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java index e81ab1ebbf7..ecefac2ffb1 100644 --- a/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java +++ b/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java @@ -24,7 +24,6 @@ import java.util.List; import javax.annotation.PostConstruct; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO.Step; @@ -43,7 +42,6 @@ import com.cloud.utils.db.TransactionStatus; import com.cloud.vm.VirtualMachine; public class VmWorkJobDaoImpl extends GenericDaoBase implements VmWorkJobDao { - private static final Logger s_logger = Logger.getLogger(VmWorkJobDaoImpl.class); protected SearchBuilder PendingWorkJobSearch; protected SearchBuilder PendingWorkJobByCommandSearch; @@ -145,8 +143,8 @@ public class VmWorkJobDaoImpl extends GenericDaoBase implemen sc.setParameters("dispatcher", "VmWorkJobDispatcher"); List expungeList = listBy(sc); for (VmWorkJobVO job : expungeList) { - if (s_logger.isDebugEnabled()) - s_logger.debug("Expunge completed work job-" + job.getId()); + if (logger.isDebugEnabled()) + logger.debug("Expunge completed work job-" + job.getId()); expunge(job.getId()); _baseJobDao.expunge(job.getId()); } @@ -176,10 +174,10 @@ public class VmWorkJobDaoImpl extends GenericDaoBase implemen pstmt.execute(); } catch (SQLException e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "SQL failed to delete vm work job: " + e.getLocalizedMessage()); } catch (Throwable e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "caught an error during delete vm work job: " + e.getLocalizedMessage()); } @@ -191,10 +189,10 @@ public class VmWorkJobDaoImpl extends GenericDaoBase implemen pstmt.execute(); } catch (SQLException e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "SQL failed to delete async job: " + e.getLocalizedMessage()); } catch (Throwable e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "caught an error during delete async job: " + e.getLocalizedMessage()); } } diff --git a/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java b/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java index 1ea3c781016..cd48c779942 100644 --- a/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java +++ b/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java @@ -33,7 +33,6 @@ import java.util.concurrent.TimeUnit; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.log4j.NDC; import org.apache.cloudstack.api.ApiErrorCode; @@ -93,7 +92,6 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, Integer.class, "vm.job.lock.timeout", "1800", "Time in seconds to wait in acquiring lock to submit a vm worker job", false); - private static final Logger s_logger = Logger.getLogger(AsyncJobManagerImpl.class); private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 3; // 3 seconds @@ -179,8 +177,8 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, publishOnEventBus(job, "submit"); scheduleExecution(job, scheduleJobExecutionInContext); - if (s_logger.isDebugEnabled()) { - s_logger.debug("submit async job-" + job.getId() + ", details: " + StringUtils.cleanString(job.toString())); + if (logger.isDebugEnabled()) { + logger.debug("submit async job-" + job.getId() + ", details: " + StringUtils.cleanString(job.toString())); } return job.getId(); } @@ -216,7 +214,7 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, } } catch (Exception e) { String errMsg = "Unable to schedule async job for command " + job.getCmd() + ", unexpected exception."; - s_logger.warn(errMsg, e); + logger.warn(errMsg, e); throw new CloudRuntimeException(errMsg); } } @@ -224,14 +222,14 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, @Override @DB public void completeAsyncJob(final long jobId, final Status jobStatus, final int resultCode, final String resultObject) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Complete async job-" + jobId + ", jobStatus: " + jobStatus + ", resultCode: " + resultCode + ", result: " + resultObject); + if (logger.isDebugEnabled()) { + logger.debug("Complete async job-" + jobId + ", jobStatus: " + jobStatus + ", resultCode: " + resultCode + ", result: " + resultObject); } final AsyncJobVO job = _jobDao.findById(jobId); if (job == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("job-" + jobId + " no longer exists, we just log completion info here. " + jobStatus + ", resultCode: " + resultCode + ", result: " + + if (logger.isDebugEnabled()) { + logger.debug("job-" + jobId + " no longer exists, we just log completion info here. " + jobStatus + ", resultCode: " + resultCode + ", result: " + resultObject); } // still purge item from queue to avoid any blocking @@ -240,8 +238,8 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, } if (job.getStatus() != JobInfo.Status.IN_PROGRESS) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("job-" + jobId + " is already completed."); + if (logger.isDebugEnabled()) { + logger.debug("job-" + jobId + " is already completed."); } // still purge item from queue to avoid any blocking _queueMgr.purgeAsyncJobQueueItemId(jobId); @@ -252,18 +250,18 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, job.setResult(resultObject); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Publish async job-" + jobId + " complete on message bus"); + if (logger.isDebugEnabled()) { + logger.debug("Publish async job-" + jobId + " complete on message bus"); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Wake up jobs related to job-" + jobId); + if (logger.isDebugEnabled()) { + logger.debug("Wake up jobs related to job-" + jobId); } List wakeupList = Transaction.execute(new TransactionCallback>() { @Override public List doInTransaction(TransactionStatus status) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Update db status for job-" + jobId); + if (logger.isDebugEnabled()) { + logger.debug("Update db status for job-" + jobId); } job.setCompleteMsid(getMsid()); job.setStatus(jobStatus); @@ -279,8 +277,8 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, job.setExecutingMsid(null); _jobDao.update(jobId, job); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Wake up jobs joined with job-" + jobId + " and disjoin all subjobs created from job- " + jobId); + if (logger.isDebugEnabled()) { + logger.debug("Wake up jobs joined with job-" + jobId + " and disjoin all subjobs created from job- " + jobId); } List wakeupList = wakeupByJoinedJobCompletion(jobId); _joinMapDao.disjoinAllJobs(jobId); @@ -311,14 +309,14 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, @Override @DB public void updateAsyncJobStatus(final long jobId, final int processStatus, final String resultObject) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Update async-job progress, job-" + jobId + ", processStatus: " + processStatus + ", result: " + resultObject); + if (logger.isDebugEnabled()) { + logger.debug("Update async-job progress, job-" + jobId + ", processStatus: " + processStatus + ", result: " + resultObject); } final AsyncJobVO job = _jobDao.findById(jobId); if (job == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("job-" + jobId + " no longer exists, we just log progress info here. progress status: " + processStatus); + if (logger.isDebugEnabled()) { + logger.debug("job-" + jobId + " no longer exists, we just log progress info here. progress status: " + processStatus); } return; @@ -341,8 +339,8 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, @Override @DB public void updateAsyncJobAttachment(final long jobId, final String instanceType, final Long instanceId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Update async-job attachment, job-" + jobId + ", instanceType: " + instanceType + ", instanceId: " + instanceId); + if (logger.isDebugEnabled()) { + logger.debug("Update async-job attachment, job-" + jobId + ", instanceType: " + instanceType + ", instanceId: " + instanceId); } final AsyncJobVO job = _jobDao.findById(jobId); @@ -407,8 +405,8 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, @Override public void syncAsyncJobExecution(AsyncJob job, String syncObjType, long syncObjId, long queueSizeLimit) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sync job-" + job.getId() + " execution on object " + syncObjType + "." + syncObjId); + if (logger.isDebugEnabled()) { + logger.debug("Sync job-" + job.getId() + " execution on object " + syncObjType + "." + syncObjId); } SyncQueueVO queue = null; @@ -465,7 +463,7 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, return dispatcher; } } else { - s_logger.warn("job-" + job.getId() + " is scheduled for wakeup run, but there is no joining info anymore"); + logger.warn("job-" + job.getId() + " is scheduled for wakeup run, but there is no joining info anymore"); } } return null; @@ -510,16 +508,16 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, } catch (Exception e) { // Due to co-existence of normal-dispatched-job/wakeup-dispatched-job, MBean register() call // is expected to fail under situations - if (s_logger.isTraceEnabled()) - s_logger.trace("Unable to register active job " + job.getId() + " to JMX monitoring due to exception " + ExceptionUtil.toString(e)); + if (logger.isTraceEnabled()) + logger.trace("Unable to register active job " + job.getId() + " to JMX monitoring due to exception " + ExceptionUtil.toString(e)); } _jobMonitor.registerActiveTask(runNumber, job.getId()); AsyncJobExecutionContext.setCurrentExecutionContext(new AsyncJobExecutionContext(job)); // execute the job - if (s_logger.isDebugEnabled()) { - s_logger.debug("Executing " + StringUtils.cleanString(job.toString())); + if (logger.isDebugEnabled()) { + logger.debug("Executing " + StringUtils.cleanString(job.toString())); } if ((getAndResetPendingSignals(job) & AsyncJob.Constants.SIGNAL_MASK_WAKEUP) != 0) { @@ -528,25 +526,25 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, jobDispatcher.runJob(job); } else { // TODO, job wakeup is not in use yet - if (s_logger.isTraceEnabled()) - s_logger.trace("Unable to find a wakeup dispatcher from the joined job: " + job); + if (logger.isTraceEnabled()) + logger.trace("Unable to find a wakeup dispatcher from the joined job: " + job); } } else { AsyncJobDispatcher jobDispatcher = getDispatcher(job.getDispatcher()); if (jobDispatcher != null) { jobDispatcher.runJob(job); } else { - s_logger.error("Unable to find job dispatcher, job will be cancelled"); + logger.error("Unable to find job dispatcher, job will be cancelled"); completeAsyncJob(job.getId(), JobInfo.Status.FAILED, ApiErrorCode.INTERNAL_ERROR.getHttpCode(), null); } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Done executing " + job.getCmd() + " for job-" + job.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Done executing " + job.getCmd() + " for job-" + job.getId()); } } catch (Throwable e) { - s_logger.error("Unexpected exception", e); + logger.error("Unexpected exception", e); completeAsyncJob(job.getId(), JobInfo.Status.FAILED, ApiErrorCode.INTERNAL_ERROR.getHttpCode(), null); } finally { // guard final clause as well @@ -561,8 +559,8 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, } catch (Exception e) { // Due to co-existence of normal-dispatched-job/wakeup-dispatched-job, MBean unregister() call // is expected to fail under situations - if (s_logger.isTraceEnabled()) - s_logger.trace("Unable to unregister job " + job.getId() + " to JMX monitoring due to exception " + ExceptionUtil.toString(e)); + if (logger.isTraceEnabled()) + logger.trace("Unable to unregister job " + job.getId() + " to JMX monitoring due to exception " + ExceptionUtil.toString(e)); } // @@ -572,7 +570,7 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, _jobMonitor.unregisterActiveTask(runNumber); } catch (Throwable e) { - s_logger.error("Double exception", e); + logger.error("Double exception", e); } } } @@ -592,8 +590,8 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, private void executeQueueItem(SyncQueueItemVO item, boolean fromPreviousSession) { AsyncJobVO job = _jobDao.findById(item.getContentId()); if (job != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Schedule queued job-" + job.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Schedule queued job-" + job.getId()); } job.setSyncSource(item); @@ -607,37 +605,37 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, job.setExecutingMsid(getMsid()); _jobDao.update(job.getId(), job); } catch (Exception e) { - s_logger.warn("Unexpected exception while dispatching job-" + item.getContentId(), e); + logger.warn("Unexpected exception while dispatching job-" + item.getContentId(), e); try { _queueMgr.returnItem(item.getId()); } catch (Throwable thr) { - s_logger.error("Unexpected exception while returning job-" + item.getContentId() + " to queue", thr); + logger.error("Unexpected exception while returning job-" + item.getContentId() + " to queue", thr); } } try { scheduleExecution(job); } catch (RejectedExecutionException e) { - s_logger.warn("Execution for job-" + job.getId() + " is rejected, return it to the queue for next turn"); + logger.warn("Execution for job-" + job.getId() + " is rejected, return it to the queue for next turn"); try { _queueMgr.returnItem(item.getId()); } catch (Exception e2) { - s_logger.error("Unexpected exception while returning job-" + item.getContentId() + " to queue", e2); + logger.error("Unexpected exception while returning job-" + item.getContentId() + " to queue", e2); } try { job.setExecutingMsid(null); _jobDao.update(job.getId(), job); } catch (Exception e3) { - s_logger.warn("Unexpected exception while update job-" + item.getContentId() + " msid for bookkeeping"); + logger.warn("Unexpected exception while update job-" + item.getContentId() + " msid for bookkeeping"); } } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find related job for queue item: " + item.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find related job for queue item: " + item.toString()); } _queueMgr.purgeItem(item.getId()); @@ -650,8 +648,8 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, assert (executionContext != null); if (executionContext.getSyncSource() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Release sync source for job-" + executionContext.getJob().getId() + " sync source: " + executionContext.getSyncSource().getContentType() + + if (logger.isDebugEnabled()) { + logger.debug("Release sync source for job-" + executionContext.getJob().getId() + " sync source: " + executionContext.getSyncSource().getContentType() + "-" + executionContext.getSyncSource().getContentId()); } @@ -708,8 +706,8 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, try { SyncQueueItemVO item = _queueMgr.dequeueFromOne(queueId, getMsid()); if (item != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Executing sync queue item: " + item.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Executing sync queue item: " + item.toString()); } executeQueueItem(item, false); @@ -717,7 +715,7 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, break; } } catch (Throwable e) { - s_logger.error("Unexpected exception when kicking sync queue-" + queueId, e); + logger.error("Unexpected exception when kicking sync queue-" + queueId, e); break; } } @@ -747,8 +745,8 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, List l = _queueMgr.dequeueFromAny(getMsid(), MAX_ONETIME_SCHEDULE_SIZE); if (l != null && l.size() > 0) { for (SyncQueueItemVO item : l) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Execute sync-queue item: " + item.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Execute sync-queue item: " + item.toString()); } executeQueueItem(item, false); } @@ -762,7 +760,7 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, scheduleExecution(job, false); } } catch (Throwable e) { - s_logger.error("Unexpected exception when trying to execute queue item, ", e); + logger.error("Unexpected exception when trying to execute queue item, ", e); } } }; @@ -789,7 +787,7 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, public void reallyRun() { try { - s_logger.info("Begin cleanup expired async-jobs"); + logger.info("Begin cleanup expired async-jobs"); // forcefully cancel blocking queue items if they've been staying there for too long List blockItems = _queueMgr.getBlockedQueueItems(JobCancelThresholdMinutes.value() * 60000, false); @@ -797,7 +795,7 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, for (SyncQueueItemVO item : blockItems) { try { if (item.getContentType().equalsIgnoreCase(SyncQueueItem.AsyncJobContentType)) { - s_logger.info("Remove Job-" + item.getContentId() + " from Queue-" + item.getId() + " since it has been blocked for too long"); + logger.info("Remove Job-" + item.getContentId() + " from Queue-" + item.getId() + " since it has been blocked for too long"); completeAsyncJob(item.getContentId(), JobInfo.Status.FAILED, 0, "Job is cancelled as it has been blocking others for too long"); _jobMonitor.unregisterByJobId(item.getContentId()); @@ -806,7 +804,7 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, // purge the item and resume queue processing _queueMgr.purgeItem(item.getId()); } catch (Throwable e) { - s_logger.error("Unexpected exception when trying to remove job from sync queue, ", e); + logger.error("Unexpected exception when trying to remove job from sync queue, ", e); } } } @@ -818,12 +816,12 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, List unfinishedJobs = _jobDao.getExpiredUnfinishedJobs(cutTime, 100); for (AsyncJobVO job : unfinishedJobs) { try { - s_logger.info("Expunging unfinished job-" + job.getId()); + logger.info("Expunging unfinished job-" + job.getId()); _jobMonitor.unregisterByJobId(job.getId()); expungeAsyncJob(job); } catch (Throwable e) { - s_logger.error("Unexpected exception when trying to expunge job-" + job.getId(), e); + logger.error("Unexpected exception when trying to expunge job-" + job.getId(), e); } } @@ -831,17 +829,17 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, List completedJobs = _jobDao.getExpiredCompletedJobs(cutTime, 100); for (AsyncJobVO job : completedJobs) { try { - s_logger.info("Expunging completed job-" + job.getId()); + logger.info("Expunging completed job-" + job.getId()); expungeAsyncJob(job); } catch (Throwable e) { - s_logger.error("Unexpected exception when trying to expunge job-" + job.getId(), e); + logger.error("Unexpected exception when trying to expunge job-" + job.getId(), e); } } - s_logger.info("End cleanup expired async-jobs"); + logger.info("End cleanup expired async-jobs"); } catch (Throwable e) { - s_logger.error("Unexpected exception when trying to execute queue item, ", e); + logger.error("Unexpected exception when trying to execute queue item, ", e); } } }; @@ -933,10 +931,10 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, int apiPoolSize = cloudMaxActive / 2; int workPoolSize = (cloudMaxActive * 2) / 3; - s_logger.info("Start AsyncJobManager API executor thread pool in size " + apiPoolSize); + logger.info("Start AsyncJobManager API executor thread pool in size " + apiPoolSize); _apiJobExecutor = Executors.newFixedThreadPool(apiPoolSize, new NamedThreadFactory(AsyncJobManager.API_JOB_POOL_THREAD_PREFIX)); - s_logger.info("Start AsyncJobManager Work executor thread pool in size " + workPoolSize); + logger.info("Start AsyncJobManager Work executor thread pool in size " + workPoolSize); _workerJobExecutor = Executors.newFixedThreadPool(workPoolSize, new NamedThreadFactory(AsyncJobManager.WORK_JOB_POOL_THREAD_PREFIX)); } catch (final Exception e) { throw new ConfigurationException("Unable to load db.properties to configure AsyncJobManagerImpl"); @@ -983,22 +981,22 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, // reset job status for all jobs running on this ms node List jobs = _jobDao.getResetJobs(msid); for (AsyncJobVO job : jobs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cancel left-over job-" + job.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Cancel left-over job-" + job.getId()); } job.setStatus(JobInfo.Status.FAILED); job.setResultCode(ApiErrorCode.INTERNAL_ERROR.getHttpCode()); job.setResult("job cancelled because of management server restart or shutdown"); _jobDao.update(job.getId(), job); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Purge queue item for cancelled job-" + job.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Purge queue item for cancelled job-" + job.getId()); } _queueMgr.purgeAsyncJobQueueItemId(job.getId()); } } }); } catch (Throwable e) { - s_logger.warn("Unexpected exception in cleaning up left over jobs for mamagement server node " + msid, e); + logger.warn("Unexpected exception in cleaning up left over jobs for mamagement server node " + msid, e); } } diff --git a/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/AsyncJobMonitor.java b/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/AsyncJobMonitor.java index b1cac3e79a5..b2216cb7502 100644 --- a/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/AsyncJobMonitor.java +++ b/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/AsyncJobMonitor.java @@ -25,7 +25,6 @@ import java.util.concurrent.atomic.AtomicInteger; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.jobs.AsyncJob; import org.apache.cloudstack.framework.jobs.AsyncJobManager; @@ -37,7 +36,6 @@ import org.apache.cloudstack.managed.context.ManagedContextTimerTask; import com.cloud.utils.component.ManagerBase; public class AsyncJobMonitor extends ManagerBase { - public static final Logger s_logger = Logger.getLogger(AsyncJobMonitor.class); @Inject private MessageBus _messageBus; @@ -86,7 +84,7 @@ public class AsyncJobMonitor extends ManagerBase { synchronized (this) { for (Map.Entry entry : _activeTasks.entrySet()) { if (entry.getValue().millisSinceLastJobHeartbeat() > _inactivityWarningThresholdMs) { - s_logger.warn("Task (job-" + entry.getValue().getJobId() + ") has been pending for " + logger.warn("Task (job-" + entry.getValue().getJobId() + ") has been pending for " + entry.getValue().millisSinceLastJobHeartbeat() / 1000 + " seconds"); } } @@ -110,7 +108,7 @@ public class AsyncJobMonitor extends ManagerBase { public void registerActiveTask(long runNumber, long jobId) { synchronized (this) { - s_logger.info("Add job-" + jobId + " into job monitoring"); + logger.info("Add job-" + jobId + " into job monitoring"); assert (_activeTasks.get(runNumber) == null); @@ -130,7 +128,7 @@ public class AsyncJobMonitor extends ManagerBase { ActiveTaskRecord record = _activeTasks.get(runNumber); assert (record != null); if (record != null) { - s_logger.info("Remove job-" + record.getJobId() + " from job monitoring"); + logger.info("Remove job-" + record.getJobId() + " from job monitoring"); if (record.isPoolThread()) _activePoolThreads.decrementAndGet(); @@ -148,7 +146,7 @@ public class AsyncJobMonitor extends ManagerBase { while (it.hasNext()) { Map.Entry entry = it.next(); if (entry.getValue().getJobId() == jobId) { - s_logger.info("Remove Job-" + entry.getValue().getJobId() + " from job monitoring due to job cancelling"); + logger.info("Remove Job-" + entry.getValue().getJobId() + " from job monitoring due to job cancelling"); if (entry.getValue().isPoolThread()) _activePoolThreads.decrementAndGet(); diff --git a/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/SyncQueueManagerImpl.java b/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/SyncQueueManagerImpl.java index 2f97991e3e3..3397daa5819 100644 --- a/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/SyncQueueManagerImpl.java +++ b/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/SyncQueueManagerImpl.java @@ -22,7 +22,6 @@ import java.util.List; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.jobs.dao.SyncQueueDao; import org.apache.cloudstack.framework.jobs.dao.SyncQueueItemDao; @@ -36,7 +35,6 @@ import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; public class SyncQueueManagerImpl extends ManagerBase implements SyncQueueManager { - public static final Logger s_logger = Logger.getLogger(SyncQueueManagerImpl.class.getName()); @Inject private SyncQueueDao _syncQueueDao; @@ -70,7 +68,7 @@ public class SyncQueueManagerImpl extends ManagerBase implements SyncQueueManage } }); } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); } return null; } @@ -84,7 +82,7 @@ public class SyncQueueManagerImpl extends ManagerBase implements SyncQueueManage public SyncQueueItemVO doInTransaction(TransactionStatus status) { SyncQueueVO queueVO = _syncQueueDao.findById(queueId); if(queueVO == null) { - s_logger.error("Sync queue(id: " + queueId + ") does not exist"); + logger.error("Sync queue(id: " + queueId + ") does not exist"); return null; } @@ -109,19 +107,19 @@ public class SyncQueueManagerImpl extends ManagerBase implements SyncQueueManage return itemVO; } else { - if (s_logger.isDebugEnabled()) - s_logger.debug("Sync queue (" + queueId + ") is currently empty"); + if (logger.isDebugEnabled()) + logger.debug("Sync queue (" + queueId + ") is currently empty"); } } else { - if (s_logger.isDebugEnabled()) - s_logger.debug("There is a pending process in sync queue(id: " + queueId + ")"); + if (logger.isDebugEnabled()) + logger.debug("There is a pending process in sync queue(id: " + queueId + ")"); } return null; } }); } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); } return null; @@ -169,7 +167,7 @@ public class SyncQueueManagerImpl extends ManagerBase implements SyncQueueManage return resultList; } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); } return null; @@ -200,14 +198,14 @@ public class SyncQueueManagerImpl extends ManagerBase implements SyncQueueManage } }); } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); } } @Override @DB public void returnItem(final long queueItemId) { - s_logger.info("Returning queue item " + queueItemId + " back to queue for second try in case of DB deadlock"); + logger.info("Returning queue item " + queueItemId + " back to queue for second try in case of DB deadlock"); try { Transaction.execute(new TransactionCallbackNoReturn() { @Override @@ -228,7 +226,7 @@ public class SyncQueueManagerImpl extends ManagerBase implements SyncQueueManage } }); } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); } } @@ -247,8 +245,8 @@ public class SyncQueueManagerImpl extends ManagerBase implements SyncQueueManage if (nActiveItems < queueVO.getQueueSizeLimit()) return true; - if (s_logger.isDebugEnabled()) - s_logger.debug("Queue (queue id, sync type, sync id) - (" + queueVO.getId() + if (logger.isDebugEnabled()) + logger.debug("Queue (queue id, sync type, sync id) - (" + queueVO.getId() + "," + queueVO.getSyncObjType() + ", " + queueVO.getSyncObjId() + ") is reaching concurrency limit " + queueVO.getQueueSizeLimit()); return false; @@ -266,8 +264,8 @@ public class SyncQueueManagerImpl extends ManagerBase implements SyncQueueManage public void cleanupActiveQueueItems(Long msid, boolean exclusive) { List l = getActiveQueueItems(msid, false); for (SyncQueueItemVO item : l) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Discard left-over queue item: " + item.toString()); + if (logger.isInfoEnabled()) { + logger.info("Discard left-over queue item: " + item.toString()); } purgeItem(item.getId()); } diff --git a/framework/jobs/test/org/apache/cloudstack/framework/jobs/AsyncJobTestDispatcher.java b/framework/jobs/test/org/apache/cloudstack/framework/jobs/AsyncJobTestDispatcher.java index eb30a804978..604eae74afc 100644 --- a/framework/jobs/test/org/apache/cloudstack/framework/jobs/AsyncJobTestDispatcher.java +++ b/framework/jobs/test/org/apache/cloudstack/framework/jobs/AsyncJobTestDispatcher.java @@ -20,15 +20,12 @@ import java.util.Random; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.jobs.JobInfo.Status; import com.cloud.utils.component.AdapterBase; public class AsyncJobTestDispatcher extends AdapterBase implements AsyncJobDispatcher { - private static final Logger s_logger = - Logger.getLogger(AsyncJobTestDispatcher.class); @Inject private AsyncJobManager _asyncJobMgr; @@ -45,14 +42,14 @@ public class AsyncJobTestDispatcher extends AdapterBase implements AsyncJobDispa public void runJob(final AsyncJob job) { _testDashboard.increaseConcurrency(); - s_logger.info("Execute job " + job.getId() + ", current concurrency " + _testDashboard.getConcurrencyCount()); + logger.info("Execute job " + job.getId() + ", current concurrency " + _testDashboard.getConcurrencyCount()); int interval = 3000; try { Thread.sleep(interval); } catch (InterruptedException e) { - s_logger.debug("[ignored] ."); + logger.debug("[ignored] ."); } _asyncJobMgr.completeAsyncJob(job.getId(), Status.SUCCEEDED, 0, null); diff --git a/framework/security/src/org/apache/cloudstack/framework/security/keystore/KeystoreManagerImpl.java b/framework/security/src/org/apache/cloudstack/framework/security/keystore/KeystoreManagerImpl.java index 374c080413f..ec1ead7516c 100644 --- a/framework/security/src/org/apache/cloudstack/framework/security/keystore/KeystoreManagerImpl.java +++ b/framework/security/src/org/apache/cloudstack/framework/security/keystore/KeystoreManagerImpl.java @@ -32,7 +32,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.Ternary; @@ -43,7 +42,6 @@ import com.cloud.utils.security.CertificateHelper; @Component @Local(value = KeystoreManager.class) public class KeystoreManagerImpl extends ManagerBase implements KeystoreManager { - private static final Logger s_logger = Logger.getLogger(KeystoreManagerImpl.class); @Inject private KeystoreDao _ksDao; @@ -51,7 +49,7 @@ public class KeystoreManagerImpl extends ManagerBase implements KeystoreManager @Override public boolean validateCertificate(String certificate, String key, String domainSuffix) { if (certificate == null || certificate.isEmpty() || key == null || key.isEmpty() || domainSuffix == null || domainSuffix.isEmpty()) { - s_logger.error("Invalid parameter found in (certificate, key, domainSuffix) tuple for domain: " + domainSuffix); + logger.error("Invalid parameter found in (certificate, key, domainSuffix) tuple for domain: " + domainSuffix); return false; } @@ -62,9 +60,9 @@ public class KeystoreManagerImpl extends ManagerBase implements KeystoreManager if (ks != null) return true; - s_logger.error("Unabled to construct keystore for domain: " + domainSuffix); + logger.error("Unabled to construct keystore for domain: " + domainSuffix); } catch (Exception e) { - s_logger.error("Certificate validation failed due to exception for domain: " + domainSuffix, e); + logger.error("Certificate validation failed due to exception for domain: " + domainSuffix, e); } return false; } @@ -110,15 +108,15 @@ public class KeystoreManagerImpl extends ManagerBase implements KeystoreManager try { return CertificateHelper.buildAndSaveKeystore(certs, storePassword); } catch (KeyStoreException e) { - s_logger.warn("Unable to build keystore for " + name + " due to KeyStoreException"); + logger.warn("Unable to build keystore for " + name + " due to KeyStoreException"); } catch (CertificateException e) { - s_logger.warn("Unable to build keystore for " + name + " due to CertificateException"); + logger.warn("Unable to build keystore for " + name + " due to CertificateException"); } catch (NoSuchAlgorithmException e) { - s_logger.warn("Unable to build keystore for " + name + " due to NoSuchAlgorithmException"); + logger.warn("Unable to build keystore for " + name + " due to NoSuchAlgorithmException"); } catch (InvalidKeySpecException e) { - s_logger.warn("Unable to build keystore for " + name + " due to InvalidKeySpecException"); + logger.warn("Unable to build keystore for " + name + " due to InvalidKeySpecException"); } catch (IOException e) { - s_logger.warn("Unable to build keystore for " + name + " due to IOException"); + logger.warn("Unable to build keystore for " + name + " due to IOException"); } return null; } diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/DumpRegistry.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/DumpRegistry.java index 79e35f11d0c..100431cd409 100644 --- a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/DumpRegistry.java +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/DumpRegistry.java @@ -22,17 +22,12 @@ import java.util.List; import javax.inject.Inject; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import com.cloud.utils.component.ComponentLifecycleBase; import com.cloud.utils.component.Named; import com.cloud.utils.component.Registry; public class DumpRegistry extends ComponentLifecycleBase { - private static final Logger log = LoggerFactory.getLogger(DumpRegistry.class); - List> registries; public List> getRegistries() { @@ -55,10 +50,8 @@ public class DumpRegistry extends ComponentLifecycleBase { buffer.append(getName(o)); } - - log.info("Registry [{}] contains [{}]", registry.getName(), buffer); + logger.info(String.format("Registry [%s] contains [%s]", registry.getName(), buffer)); } - return super.start(); } diff --git a/plugins/acl/static-role-based/src/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java b/plugins/acl/static-role-based/src/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java index 4383b4573f9..edc51b66df4 100644 --- a/plugins/acl/static-role-based/src/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java +++ b/plugins/acl/static-role-based/src/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java @@ -26,7 +26,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; @@ -43,7 +42,6 @@ import com.cloud.utils.component.PluggableService; @Local(value = APIChecker.class) public class StaticRoleBasedAPIAccessChecker extends AdapterBase implements APIChecker { - protected static final Logger s_logger = Logger.getLogger(StaticRoleBasedAPIAccessChecker.class); Set commandPropertyFiles = new HashSet(); Set commandsPropertiesOverrides = new HashSet(); @@ -118,7 +116,7 @@ public class StaticRoleBasedAPIAccessChecker extends AdapterBase implements APIC commandsPropertiesRoleBasedApisMap.get(roleType).add(apiName); } } catch (NumberFormatException nfe) { - s_logger.info("Malformed key=value pair for entry: " + entry.toString()); + logger.info("Malformed key=value pair for entry: " + entry.toString()); } } } diff --git a/plugins/affinity-group-processors/explicit-dedication/src/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java b/plugins/affinity-group-processors/explicit-dedication/src/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java index 9aad5f49972..8eef201d22c 100644 --- a/plugins/affinity-group-processors/explicit-dedication/src/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java +++ b/plugins/affinity-group-processors/explicit-dedication/src/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java @@ -25,7 +25,6 @@ import javax.inject.Inject; import org.apache.cloudstack.affinity.dao.AffinityGroupDao; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; -import org.apache.log4j.Logger; import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenter; @@ -58,7 +57,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Local(value = AffinityGroupProcessor.class) public class ExplicitDedicationProcessor extends AffinityProcessorBase implements AffinityGroupProcessor { - private static final Logger s_logger = Logger.getLogger(ExplicitDedicationProcessor.class); @Inject protected UserVmDao _vmDao; @Inject @@ -98,8 +96,8 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { if (vmGroupMapping != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Processing affinity group " + vmGroupMapping.getAffinityGroupId() + "of type 'ExplicitDedication' for VM Id: " + vm.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Processing affinity group " + vmGroupMapping.getAffinityGroupId() + "of type 'ExplicitDedication' for VM Id: " + vm.getId()); } long affinityGroupId = vmGroupMapping.getAffinityGroupId(); @@ -236,13 +234,13 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement avoid = updateAvoidList(resourceList, avoid, dc); } else { avoid.addDataCenter(dc.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("No dedicated resources available for this domain or account under this group"); + if (logger.isDebugEnabled()) { + logger.debug("No dedicated resources available for this domain or account under this group"); } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("ExplicitDedicationProcessor returns Avoid List as: Deploy avoids pods: " + avoid.getPodsToAvoid() + ", clusters: " + + if (logger.isDebugEnabled()) { + logger.debug("ExplicitDedicationProcessor returns Avoid List as: Deploy avoids pods: " + avoid.getPodsToAvoid() + ", clusters: " + avoid.getClustersToAvoid() + ", hosts: " + avoid.getHostsToAvoid()); } } @@ -409,8 +407,8 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement if (group != null) { List dedicatedResources = _dedicatedDao.listByAffinityGroupId(group.getId()); if (!dedicatedResources.isEmpty()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing the dedicated resources under group: " + group); + if (logger.isDebugEnabled()) { + logger.debug("Releasing the dedicated resources under group: " + group); } Transaction.execute(new TransactionCallbackNoReturn() { @@ -427,8 +425,8 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement } }); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("No dedicated resources to releease under group: " + group); + if (logger.isDebugEnabled()) { + logger.debug("No dedicated resources to releease under group: " + group); } } } diff --git a/plugins/affinity-group-processors/host-anti-affinity/src/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java b/plugins/affinity-group-processors/host-anti-affinity/src/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java index 0504dc63e0d..35039fd2dc5 100644 --- a/plugins/affinity-group-processors/host-anti-affinity/src/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java +++ b/plugins/affinity-group-processors/host-anti-affinity/src/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java @@ -23,7 +23,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.affinity.dao.AffinityGroupDao; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; @@ -47,7 +46,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Local(value = AffinityGroupProcessor.class) public class HostAntiAffinityProcessor extends AffinityProcessorBase implements AffinityGroupProcessor { - private static final Logger s_logger = Logger.getLogger(HostAntiAffinityProcessor.class); @Inject protected UserVmDao _vmDao; @Inject @@ -72,8 +70,8 @@ public class HostAntiAffinityProcessor extends AffinityProcessorBase implements if (vmGroupMapping != null) { AffinityGroupVO group = _affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); } List groupVMIds = _affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId()); @@ -84,15 +82,15 @@ public class HostAntiAffinityProcessor extends AffinityProcessorBase implements if (groupVM != null && !groupVM.isRemoved()) { if (groupVM.getHostId() != null) { avoid.addHost(groupVM.getHostId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Added host " + groupVM.getHostId() + " to avoid set, since VM " + groupVM.getId() + " is present on the host"); + if (logger.isDebugEnabled()) { + logger.debug("Added host " + groupVM.getHostId() + " to avoid set, since VM " + groupVM.getId() + " is present on the host"); } } else if (VirtualMachine.State.Stopped.equals(groupVM.getState()) && groupVM.getLastHostId() != null) { long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - groupVM.getUpdateTime().getTime()) / 1000; if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { avoid.addHost(groupVM.getLastHostId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Added host " + groupVM.getLastHostId() + " to avoid set, since VM " + groupVM.getId() + + if (logger.isDebugEnabled()) { + logger.debug("Added host " + groupVM.getLastHostId() + " to avoid set, since VM " + groupVM.getId() + " is present on the host, in Stopped state but has reserved capacity"); } } @@ -132,8 +130,8 @@ public class HostAntiAffinityProcessor extends AffinityProcessorBase implements for (Long groupVMId : groupVMIds) { VMReservationVO vmReservation = _reservationDao.findByVmId(groupVMId); if (vmReservation != null && vmReservation.getHostId() != null && vmReservation.getHostId().equals(plannedHostId)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Planned destination for VM " + vm.getId() + " conflicts with an existing VM " + vmReservation.getVmId() + + if (logger.isDebugEnabled()) { + logger.debug("Planned destination for VM " + vm.getId() + " conflicts with an existing VM " + vmReservation.getVmId() + " reserved on the same host " + plannedHostId); } return false; diff --git a/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java b/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java index cb691a9c8ed..5c6682a09b2 100644 --- a/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java +++ b/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java @@ -27,7 +27,6 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.google.gson.annotations.SerializedName; @@ -55,7 +54,6 @@ import com.cloud.utils.component.PluggableService; @Component @Local(value = ApiDiscoveryService.class) public class ApiDiscoveryServiceImpl extends ComponentLifecycleBase implements ApiDiscoveryService { - private static final Logger s_logger = Logger.getLogger(ApiDiscoveryServiceImpl.class); List _apiAccessCheckers = null; List _services = null; @@ -72,13 +70,13 @@ public class ApiDiscoveryServiceImpl extends ComponentLifecycleBase implements A s_apiNameDiscoveryResponseMap = new HashMap(); Set> cmdClasses = new HashSet>(); for (PluggableService service : _services) { - s_logger.debug(String.format("getting api commands of service: %s", service.getClass().getName())); + logger.debug(String.format("getting api commands of service: %s", service.getClass().getName())); cmdClasses.addAll(service.getCommands()); } cmdClasses.addAll(this.getCommands()); cacheResponseMap(cmdClasses); long endTime = System.nanoTime(); - s_logger.info("Api Discovery Service: Annotation, docstrings, api relation graph processed in " + (endTime - startTime) / 1000000.0 + " ms"); + logger.info("Api Discovery Service: Annotation, docstrings, api relation graph processed in " + (endTime - startTime) / 1000000.0 + " ms"); } return true; @@ -97,8 +95,8 @@ public class ApiDiscoveryServiceImpl extends ComponentLifecycleBase implements A } String apiName = apiCmdAnnotation.name(); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Found api: " + apiName); + if (logger.isTraceEnabled()) { + logger.trace("Found api: " + apiName); } ApiDiscoveryResponse response = getCmdRequestMap(cmdClass, apiCmdAnnotation); @@ -227,7 +225,7 @@ public class ApiDiscoveryServiceImpl extends ComponentLifecycleBase implements A try { apiChecker.checkAccess(user, name); } catch (Exception ex) { - s_logger.debug("API discovery access check failed for " + name + " with " + ex.getMessage()); + logger.debug("API discovery access check failed for " + name + " with " + ex.getMessage()); return null; } } diff --git a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/ApiRateLimitServiceImpl.java b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/ApiRateLimitServiceImpl.java index b46dc15b7e4..55b3b28e95a 100644 --- a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/ApiRateLimitServiceImpl.java +++ b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/ApiRateLimitServiceImpl.java @@ -27,7 +27,6 @@ import javax.naming.ConfigurationException; import net.sf.ehcache.Cache; import net.sf.ehcache.CacheManager; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.acl.APIChecker; @@ -47,7 +46,6 @@ import com.cloud.utils.component.AdapterBase; @Component @Local(value = APIChecker.class) public class ApiRateLimitServiceImpl extends AdapterBase implements APIChecker, ApiRateLimitService { - private static final Logger s_logger = Logger.getLogger(ApiRateLimitServiceImpl.class); /** * True if api rate limiting is enabled @@ -100,7 +98,7 @@ public class ApiRateLimitServiceImpl extends AdapterBase implements APIChecker, CacheManager cm = CacheManager.create(); Cache cache = new Cache("api-limit-cache", maxElements, false, false, timeToLive, timeToLive); cm.addCache(cache); - s_logger.info("Limit Cache created with timeToLive=" + timeToLive + ", maxAllowed=" + maxAllowed + ", maxElements=" + maxElements); + logger.info("Limit Cache created with timeToLive=" + timeToLive + ", maxAllowed=" + maxAllowed + ", maxElements=" + maxElements); cacheStore.setCache(cache); _store = cacheStore; @@ -165,13 +163,13 @@ public class ApiRateLimitServiceImpl extends AdapterBase implements APIChecker, int current = entry.incrementAndGet(); if (current <= maxAllowed) { - s_logger.trace("account (" + account.getAccountId() + "," + account.getAccountName() + ") has current count = " + current); + logger.trace("account (" + account.getAccountId() + "," + account.getAccountName() + ") has current count = " + current); return true; } else { long expireAfter = entry.getExpireDuration(); // for this exception, we can just show the same message to user and admin users. String msg = "The given user has reached his/her account api limit, please retry after " + expireAfter + " ms."; - s_logger.warn(msg); + logger.warn(msg); throw new RequestLimitException(msg); } } diff --git a/plugins/deployment-planners/implicit-dedication/src/com/cloud/deploy/ImplicitDedicationPlanner.java b/plugins/deployment-planners/implicit-dedication/src/com/cloud/deploy/ImplicitDedicationPlanner.java index 9500cac7a75..6970965bfc2 100644 --- a/plugins/deployment-planners/implicit-dedication/src/com/cloud/deploy/ImplicitDedicationPlanner.java +++ b/plugins/deployment-planners/implicit-dedication/src/com/cloud/deploy/ImplicitDedicationPlanner.java @@ -26,7 +26,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.configuration.Config; import com.cloud.exception.InsufficientServerCapacityException; @@ -44,7 +43,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = DeploymentPlanner.class) public class ImplicitDedicationPlanner extends FirstFitPlanner implements DeploymentClusterPlanner { - private static final Logger s_logger = Logger.getLogger(ImplicitDedicationPlanner.class); @Inject private ServiceOfferingDao serviceOfferingDao; @@ -159,12 +157,12 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy for (VMInstanceVO vm : allVmsOnHost) { if (vm.getAccountId() != accountId) { - s_logger.info("Host " + vm.getHostId() + " found to be unsuitable for implicit dedication as it is " + "running instances of another account"); + logger.info("Host " + vm.getHostId() + " found to be unsuitable for implicit dedication as it is " + "running instances of another account"); suitable = false; break; } else { if (!isImplicitPlannerUsedByOffering(vm.getServiceOfferingId())) { - s_logger.info("Host " + vm.getHostId() + " found to be unsuitable for implicit dedication as it " + + logger.info("Host " + vm.getHostId() + " found to be unsuitable for implicit dedication as it " + "is running instances of this account which haven't been created using implicit dedication."); suitable = false; break; @@ -180,11 +178,11 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy return false; for (VMInstanceVO vm : allVmsOnHost) { if (!isImplicitPlannerUsedByOffering(vm.getServiceOfferingId())) { - s_logger.info("Host " + vm.getHostId() + " found to be running a vm created by a planner other" + " than implicit."); + logger.info("Host " + vm.getHostId() + " found to be running a vm created by a planner other" + " than implicit."); createdByImplicitStrict = false; break; } else if (isServiceOfferingUsingPlannerInPreferredMode(vm.getServiceOfferingId())) { - s_logger.info("Host " + vm.getHostId() + " found to be running a vm created by an implicit planner" + " in preferred mode."); + logger.info("Host " + vm.getHostId() + " found to be running a vm created by an implicit planner" + " in preferred mode."); createdByImplicitStrict = false; break; } @@ -196,7 +194,7 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy boolean implicitPlannerUsed = false; ServiceOfferingVO offering = serviceOfferingDao.findByIdIncludingRemoved(offeringId); if (offering == null) { - s_logger.error("Couldn't retrieve the offering by the given id : " + offeringId); + logger.error("Couldn't retrieve the offering by the given id : " + offeringId); } else { String plannerName = offering.getDeploymentPlanner(); if (plannerName == null) { diff --git a/plugins/deployment-planners/user-concentrated-pod/src/com/cloud/deploy/UserConcentratedPodPlanner.java b/plugins/deployment-planners/user-concentrated-pod/src/com/cloud/deploy/UserConcentratedPodPlanner.java index df6531e8abd..6c747151bf7 100644 --- a/plugins/deployment-planners/user-concentrated-pod/src/com/cloud/deploy/UserConcentratedPodPlanner.java +++ b/plugins/deployment-planners/user-concentrated-pod/src/com/cloud/deploy/UserConcentratedPodPlanner.java @@ -22,7 +22,6 @@ import java.util.Map; import javax.ejb.Local; -import org.apache.log4j.Logger; import com.cloud.utils.Pair; import com.cloud.vm.VirtualMachineProfile; @@ -30,7 +29,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = DeploymentPlanner.class) public class UserConcentratedPodPlanner extends FirstFitPlanner implements DeploymentClusterPlanner { - private static final Logger s_logger = Logger.getLogger(UserConcentratedPodPlanner.class); /** * This method should reorder the given list of Cluster Ids by applying any necessary heuristic @@ -64,14 +62,14 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo private List reorderClustersByPods(List clusterIds, List podIds) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Reordering cluster list as per pods ordered by user concentration"); + if (logger.isDebugEnabled()) { + logger.debug("Reordering cluster list as per pods ordered by user concentration"); } Map> podClusterMap = _clusterDao.getPodClusterIdMap(clusterIds); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Pod To cluster Map is: " + podClusterMap); + if (logger.isTraceEnabled()) { + logger.trace("Pod To cluster Map is: " + podClusterMap); } List reorderedClusters = new ArrayList(); @@ -90,22 +88,22 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo } reorderedClusters.addAll(clusterIds); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Reordered cluster list: " + reorderedClusters); + if (logger.isTraceEnabled()) { + logger.trace("Reordered cluster list: " + reorderedClusters); } return reorderedClusters; } protected List listPodsByUserConcentration(long zoneId, long accountId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Applying UserConcentratedPod heuristic for account: " + accountId); + if (logger.isDebugEnabled()) { + logger.debug("Applying UserConcentratedPod heuristic for account: " + accountId); } List prioritizedPods = _vmDao.listPodIdsHavingVmsforAccount(zoneId, accountId); - if (s_logger.isTraceEnabled()) { - s_logger.trace("List of pods to be considered, after applying UserConcentratedPod heuristic: " + prioritizedPods); + if (logger.isTraceEnabled()) { + logger.trace("List of pods to be considered, after applying UserConcentratedPod heuristic: " + prioritizedPods); } return prioritizedPods; diff --git a/plugins/deployment-planners/user-dispersing/src/com/cloud/deploy/UserDispersingPlanner.java b/plugins/deployment-planners/user-dispersing/src/com/cloud/deploy/UserDispersingPlanner.java index 5df4d13756d..2b1e7c7e93d 100644 --- a/plugins/deployment-planners/user-dispersing/src/com/cloud/deploy/UserDispersingPlanner.java +++ b/plugins/deployment-planners/user-dispersing/src/com/cloud/deploy/UserDispersingPlanner.java @@ -26,7 +26,6 @@ import java.util.TreeMap; import javax.ejb.Local; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.configuration.Config; import com.cloud.utils.NumbersUtil; @@ -36,7 +35,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = DeploymentPlanner.class) public class UserDispersingPlanner extends FirstFitPlanner implements DeploymentClusterPlanner { - private static final Logger s_logger = Logger.getLogger(UserDispersingPlanner.class); /** * This method should reorder the given list of Cluster Ids by applying any necessary heuristic @@ -99,8 +97,8 @@ public class UserDispersingPlanner extends FirstFitPlanner implements Deployment } protected Pair, Map> listClustersByUserDispersion(long id, boolean isZone, long accountId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Applying Userdispersion heuristic to clusters for account: " + accountId); + if (logger.isDebugEnabled()) { + logger.debug("Applying Userdispersion heuristic to clusters for account: " + accountId); } Pair, Map> clusterIdsVmCountInfo; if (isZone) { @@ -108,19 +106,19 @@ public class UserDispersingPlanner extends FirstFitPlanner implements Deployment } else { clusterIdsVmCountInfo = _vmInstanceDao.listClusterIdsInPodByVmCount(id, accountId); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("List of clusters in ascending order of number of VMs: " + clusterIdsVmCountInfo.first()); + if (logger.isTraceEnabled()) { + logger.trace("List of clusters in ascending order of number of VMs: " + clusterIdsVmCountInfo.first()); } return clusterIdsVmCountInfo; } protected Pair, Map> listPodsByUserDispersion(long dataCenterId, long accountId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Applying Userdispersion heuristic to pods for account: " + accountId); + if (logger.isDebugEnabled()) { + logger.debug("Applying Userdispersion heuristic to pods for account: " + accountId); } Pair, Map> podIdsVmCountInfo = _vmInstanceDao.listPodIdsInZoneByVmCount(dataCenterId, accountId); - if (s_logger.isTraceEnabled()) { - s_logger.trace("List of pods in ascending order of number of VMs: " + podIdsVmCountInfo.first()); + if (logger.isTraceEnabled()) { + logger.trace("List of pods in ascending order of number of VMs: " + podIdsVmCountInfo.first()); } return podIdsVmCountInfo; @@ -132,25 +130,25 @@ public class UserDispersingPlanner extends FirstFitPlanner implements Deployment Map capacityMap = capacityInfo.second(); Map vmCountMap = vmCountInfo.second(); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Capacity Id list: " + capacityOrderedIds + " , capacityMap:" + capacityMap); + if (logger.isTraceEnabled()) { + logger.trace("Capacity Id list: " + capacityOrderedIds + " , capacityMap:" + capacityMap); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Vm Count Id list: " + vmCountOrderedIds + " , vmCountMap:" + vmCountMap); + if (logger.isTraceEnabled()) { + logger.trace("Vm Count Id list: " + vmCountOrderedIds + " , vmCountMap:" + vmCountMap); } List idsReorderedByWeights = new ArrayList(); float capacityWeight = (1.0f - _userDispersionWeight); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Applying userDispersionWeight: " + _userDispersionWeight); + if (logger.isDebugEnabled()) { + logger.debug("Applying userDispersionWeight: " + _userDispersionWeight); } //normalize the vmCountMap LinkedHashMap normalisedVmCountIdMap = new LinkedHashMap(); Long totalVmsOfAccount = _vmInstanceDao.countRunningByAccount(accountId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Total VMs for account: " + totalVmsOfAccount); + if (logger.isDebugEnabled()) { + logger.debug("Total VMs for account: " + totalVmsOfAccount); } for (Long id : vmCountOrderedIds) { Double normalisedCount = vmCountMap.get(id) / totalVmsOfAccount; @@ -179,8 +177,8 @@ public class UserDispersingPlanner extends FirstFitPlanner implements Deployment idsReorderedByWeights.addAll(idList); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Reordered Id list: " + idsReorderedByWeights); + if (logger.isTraceEnabled()) { + logger.trace("Reordered Id list: " + idsReorderedByWeights); } return idsReorderedByWeights; diff --git a/plugins/event-bus/inmemory/src/org/apache/cloudstack/mom/inmemory/InMemoryEventBus.java b/plugins/event-bus/inmemory/src/org/apache/cloudstack/mom/inmemory/InMemoryEventBus.java index fd2f9f357f3..ebed8074e4c 100644 --- a/plugins/event-bus/inmemory/src/org/apache/cloudstack/mom/inmemory/InMemoryEventBus.java +++ b/plugins/event-bus/inmemory/src/org/apache/cloudstack/mom/inmemory/InMemoryEventBus.java @@ -26,7 +26,6 @@ import java.util.concurrent.ConcurrentHashMap; import javax.ejb.Local; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.events.Event; import org.apache.cloudstack.framework.events.EventBus; @@ -40,7 +39,6 @@ import com.cloud.utils.component.ManagerBase; @Local(value = EventBus.class) public class InMemoryEventBus extends ManagerBase implements EventBus { - private static final Logger s_logger = Logger.getLogger(InMemoryEventBus.class); private final static Map> subscribers; diff --git a/plugins/event-bus/kafka/src/org/apache/cloudstack/mom/kafka/KafkaEventBus.java b/plugins/event-bus/kafka/src/org/apache/cloudstack/mom/kafka/KafkaEventBus.java index 6d943dfaba3..a981645ee86 100644 --- a/plugins/event-bus/kafka/src/org/apache/cloudstack/mom/kafka/KafkaEventBus.java +++ b/plugins/event-bus/kafka/src/org/apache/cloudstack/mom/kafka/KafkaEventBus.java @@ -28,7 +28,6 @@ import java.util.Properties; import javax.ejb.Local; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.events.Event; import org.apache.cloudstack.framework.events.EventBus; @@ -52,7 +51,6 @@ public class KafkaEventBus extends ManagerBase implements EventBus { private String _topic = null; private Producer _producer; - private static final Logger s_logger = Logger.getLogger(KafkaEventBus.class); @Override public boolean configure(String name, Map params) throws ConfigurationException { diff --git a/plugins/event-bus/rabbitmq/src/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java b/plugins/event-bus/rabbitmq/src/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java index e53d2e9ad88..d69147bdbe0 100644 --- a/plugins/event-bus/rabbitmq/src/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java +++ b/plugins/event-bus/rabbitmq/src/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java @@ -30,7 +30,6 @@ import java.util.concurrent.Executors; import javax.ejb.Local; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.rabbitmq.client.AMQP; import com.rabbitmq.client.AlreadyClosedException; @@ -93,7 +92,6 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { private ExecutorService executorService; private static DisconnectHandler disconnectHandler; - private static final Logger s_logger = Logger.getLogger(RabbitMQEventBus.class); @Override public boolean configure(String name, Map params) throws ConfigurationException { @@ -235,9 +233,9 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { s_subscribers.put(queueName, queueDetails); } catch (AlreadyClosedException closedException) { - s_logger.warn("Connection to AMQP service is lost. Subscription:" + queueName + " will be active after reconnection"); + logger.warn("Connection to AMQP service is lost. Subscription:" + queueName + " will be active after reconnection"); } catch (ConnectException connectException) { - s_logger.warn("Connection to AMQP service is lost. Subscription:" + queueName + " will be active after reconnection"); + logger.warn("Connection to AMQP service is lost. Subscription:" + queueName + " will be active after reconnection"); } catch (Exception e) { throw new EventBusException("Failed to subscribe to event due to " + e.getMessage()); } @@ -357,7 +355,7 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { try { return createConnection(); } catch (Exception e) { - s_logger.error("Failed to create a connection to AMQP server due to " + e.getMessage()); + logger.error("Failed to create a connection to AMQP server due to " + e.getMessage()); throw e; } } else { @@ -397,7 +395,7 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { s_connection.close(); } } catch (Exception e) { - s_logger.warn("Failed to close connection to AMQP server due to " + e.getMessage()); + logger.warn("Failed to close connection to AMQP server due to " + e.getMessage()); } s_connection = null; } @@ -409,7 +407,7 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { try { s_connection.abort(); } catch (Exception e) { - s_logger.warn("Failed to abort connection due to " + e.getMessage()); + logger.warn("Failed to abort connection due to " + e.getMessage()); } s_connection = null; } @@ -426,7 +424,7 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { try { return connection.createChannel(); } catch (java.io.IOException exception) { - s_logger.warn("Failed to create a channel due to " + exception.getMessage()); + logger.warn("Failed to create a channel due to " + exception.getMessage()); throw exception; } } @@ -435,7 +433,7 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { try { channel.exchangeDeclare(exchangeName, "topic", true); } catch (java.io.IOException exception) { - s_logger.error("Failed to create exchange" + exchangeName + " on RabbitMQ server"); + logger.error("Failed to create exchange" + exchangeName + " on RabbitMQ server"); throw exception; } } @@ -445,7 +443,7 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { byte[] messageBodyBytes = eventDescription.getBytes(); channel.basicPublish(exchangeName, routingKey, MessageProperties.PERSISTENT_TEXT_PLAIN, messageBodyBytes); } catch (Exception e) { - s_logger.error("Failed to publish event " + routingKey + " on exchange " + exchangeName + " of message broker due to " + e.getMessage()); + logger.error("Failed to publish event " + routingKey + " on exchange " + exchangeName + " of message broker due to " + e.getMessage()); throw e; } } @@ -498,7 +496,7 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { channel.queueDelete(queueName); channel.abort(); } catch (IOException ioe) { - s_logger.warn("Failed to delete queue: " + queueName + " on AMQP server due to " + ioe.getMessage()); + logger.warn("Failed to delete queue: " + queueName + " on AMQP server due to " + ioe.getMessage()); } } } @@ -521,7 +519,7 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { } abortConnection(); // disconnected to AMQP server, so abort the connection and channels - s_logger.warn("Connection has been shutdown by AMQP server. Attempting to reconnect."); + logger.warn("Connection has been shutdown by AMQP server. Attempting to reconnect."); // initiate re-connect process ReconnectionTask reconnect = new ReconnectionTask(); @@ -599,7 +597,7 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { s_subscribers.put(subscriberId, subscriberDetails); } } catch (Exception e) { - s_logger.warn("Failed to recreate queues and binding for the subscribers due to " + e.getMessage()); + logger.warn("Failed to recreate queues and binding for the subscribers due to " + e.getMessage()); } } return; diff --git a/plugins/file-systems/netapp/src/com/cloud/netapp/NetappManagerImpl.java b/plugins/file-systems/netapp/src/com/cloud/netapp/NetappManagerImpl.java index 510e6c6b260..15014af5b55 100644 --- a/plugins/file-systems/netapp/src/com/cloud/netapp/NetappManagerImpl.java +++ b/plugins/file-systems/netapp/src/com/cloud/netapp/NetappManagerImpl.java @@ -35,7 +35,6 @@ import netapp.manage.NaElement; import netapp.manage.NaException; import netapp.manage.NaServer; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.commands.netapp.AssociateLunCmd; @@ -68,7 +67,6 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { roundrobin, leastfull } - public static final Logger s_logger = Logger.getLogger(NetappManagerImpl.class.getName()); @Inject public VolumeDao _volumeDao; @Inject @@ -85,8 +83,8 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { @Override public void createPool(String poolName, String algorithm) throws InvalidParameterValueException { - if (s_logger.isDebugEnabled()) - s_logger.debug("Request --> createPool "); + if (logger.isDebugEnabled()) + logger.debug("Request --> createPool "); PoolVO pool = null; validAlgorithm(algorithm); @@ -94,8 +92,8 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { pool = new PoolVO(poolName, algorithm); _poolDao.persist(pool); - if (s_logger.isDebugEnabled()) - s_logger.debug("Response --> createPool:success"); + if (logger.isDebugEnabled()) + logger.debug("Response --> createPool:success"); } catch (CloudRuntimeException cre) { pool = _poolDao.findPool(poolName); @@ -174,8 +172,8 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { @Override public void deletePool(String poolName) throws InvalidParameterValueException, ResourceInUseException { - if (s_logger.isDebugEnabled()) - s_logger.debug("Request --> deletePool "); + if (logger.isDebugEnabled()) + logger.debug("Request --> deletePool "); PoolVO pool = _poolDao.findPool(poolName); if (pool == null) { @@ -186,8 +184,8 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { if (volCount == 0) { _poolDao.remove(pool.getId()); - if (s_logger.isDebugEnabled()) - s_logger.debug("Request --> deletePool: Success "); + if (logger.isDebugEnabled()) + logger.debug("Request --> deletePool: Success "); } else { throw new ResourceInUseException("Cannot delete non-empty pool"); @@ -218,14 +216,14 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { volume = _volumeDao.findVolume(ipAddress, aggrName, volName); if (volume == null) { - s_logger.warn("The volume does not exist in our system"); + logger.warn("The volume does not exist in our system"); throw new InvalidParameterValueException("The given tuple:" + ipAddress + "," + aggrName + "," + volName + " doesn't exist in our system"); } List lunsOnVol = _lunDao.listLunsByVolId(volume.getId()); if (lunsOnVol != null && lunsOnVol.size() > 0) { - s_logger.warn("There are luns on the volume"); + logger.warn("There are luns on the volume"); throw new ResourceInUseException("There are luns on the volume"); } @@ -258,12 +256,12 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { txn.commit(); } catch (UnknownHostException uhe) { - s_logger.warn("Unable to delete volume on filer ", uhe); + logger.warn("Unable to delete volume on filer ", uhe); throw new ServerException("Unable to delete volume on filer", uhe); } catch (NaAPIFailedException naf) { - s_logger.warn("Unable to delete volume on filer ", naf); + logger.warn("Unable to delete volume on filer ", naf); if (naf.getErrno() == 13040) { - s_logger.info("Deleting the volume: " + volName); + logger.info("Deleting the volume: " + volName); _volumeDao.remove(volume.getId()); txn.commit(); } @@ -271,11 +269,11 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { throw new ServerException("Unable to delete volume on filer", naf); } catch (NaException nae) { txn.rollback(); - s_logger.warn("Unable to delete volume on filer ", nae); + logger.warn("Unable to delete volume on filer ", nae); throw new ServerException("Unable to delete volume on filer", nae); } catch (IOException ioe) { txn.rollback(); - s_logger.warn("Unable to delete volume on filer ", ioe); + logger.warn("Unable to delete volume on filer ", ioe); throw new ServerException("Unable to delete volume on filer", ioe); } finally { if (pool != null) { @@ -306,8 +304,8 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { public void createVolumeOnFiler(String ipAddress, String aggName, String poolName, String volName, String volSize, String snapshotPolicy, Integer snapshotReservation, String username, String password) throws UnknownHostException, ServerException, InvalidParameterValueException { - if (s_logger.isDebugEnabled()) - s_logger.debug("Request --> createVolume " + "serverIp:" + ipAddress); + if (logger.isDebugEnabled()) + logger.debug("Request --> createVolume " + "serverIp:" + ipAddress); boolean snapPolicy = false; boolean snapshotRes = false; @@ -394,7 +392,7 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { } pool = _poolDao.acquireInLockTable(pool.getId()); if (pool == null) { - s_logger.warn("Failed to acquire lock on pool " + poolName); + logger.warn("Failed to acquire lock on pool " + poolName); throw new ConcurrentModificationException("Failed to acquire lock on pool " + poolName); } volume = new NetappVolumeVO(ipAddress, aggName, pool.getId(), volName, volSize, "", 0, username, password, 0, pool.getName()); @@ -419,34 +417,34 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { txn.commit(); } catch (NaException nae) { //zapi call failed, log and throw e - s_logger.warn("Failed to create volume on the netapp filer:", nae); + logger.warn("Failed to create volume on the netapp filer:", nae); txn.rollback(); if (volumeCreated) { try { deleteRogueVolume(volName, s);//deletes created volume on filer } catch (NaException e) { - s_logger.warn("Failed to cleanup created volume whilst rolling back on the netapp filer:", e); + logger.warn("Failed to cleanup created volume whilst rolling back on the netapp filer:", e); throw new ServerException("Unable to create volume via cloudtools." + "Failed to cleanup created volume on netapp filer whilst rolling back on the cloud db:", e); } catch (IOException e) { - s_logger.warn("Failed to cleanup created volume whilst rolling back on the netapp filer:", e); + logger.warn("Failed to cleanup created volume whilst rolling back on the netapp filer:", e); throw new ServerException("Unable to create volume via cloudtools." + "Failed to cleanup created volume on netapp filer whilst rolling back on the cloud db:", e); } } throw new ServerException("Unable to create volume", nae); } catch (IOException ioe) { - s_logger.warn("Failed to create volume on the netapp filer:", ioe); + logger.warn("Failed to create volume on the netapp filer:", ioe); txn.rollback(); if (volumeCreated) { try { deleteRogueVolume(volName, s);//deletes created volume on filer } catch (NaException e) { - s_logger.warn("Failed to cleanup created volume whilst rolling back on the netapp filer:", e); + logger.warn("Failed to cleanup created volume whilst rolling back on the netapp filer:", e); throw new ServerException("Unable to create volume via cloudtools." + "Failed to cleanup created volume on netapp filer whilst rolling back on the cloud db:", e); } catch (IOException e) { - s_logger.warn("Failed to cleanup created volume whilst rolling back on the netapp filer:", e); + logger.warn("Failed to cleanup created volume whilst rolling back on the netapp filer:", e); throw new ServerException("Unable to create volume via cloudtools." + "Failed to cleanup created volume on netapp filer whilst rolling back on the cloud db:", e); } @@ -497,7 +495,7 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { vol.setSnapshotPolicy(snapScheduleOnFiler); } catch (ServerException e) { - s_logger.warn("Error trying to get snapshot schedule for volume" + vol.getVolumeName()); + logger.warn("Error trying to get snapshot schedule for volume" + vol.getVolumeName()); } } return vols; @@ -538,10 +536,10 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { .append(whichMinutes); return sB.toString(); } catch (NaException nae) { - s_logger.warn("Failed to get volume size ", nae); + logger.warn("Failed to get volume size ", nae); throw new ServerException("Failed to get volume size", nae); } catch (IOException ioe) { - s_logger.warn("Failed to get volume size ", ioe); + logger.warn("Failed to get volume size ", ioe); throw new ServerException("Failed to get volume size", ioe); } finally { if (s != null) @@ -597,10 +595,10 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { } } catch (NaException nae) { - s_logger.warn("Failed to get volume size ", nae); + logger.warn("Failed to get volume size ", nae); throw new ServerException("Failed to get volume size", nae); } catch (IOException ioe) { - s_logger.warn("Failed to get volume size ", ioe); + logger.warn("Failed to get volume size ", ioe); throw new ServerException("Failed to get volume size", ioe); } finally { if (s != null) @@ -646,7 +644,7 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { } pool = _poolDao.acquireInLockTable(pool.getId()); if (pool == null) { - s_logger.warn("Failed to acquire lock on the pool " + poolName); + logger.warn("Failed to acquire lock on the pool " + poolName); return result; } NaServer s = null; @@ -663,8 +661,8 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { throw new ServerException("Could not find a suitable volume to create lun on"); } - if (s_logger.isDebugEnabled()) - s_logger.debug("Request --> createLun " + "serverIp:" + selectedVol.getIpAddress()); + if (logger.isDebugEnabled()) + logger.debug("Request --> createLun " + "serverIp:" + selectedVol.getIpAddress()); StringBuilder exportPath = new StringBuilder("/vol/"); exportPath.append(selectedVol.getVolumeName()); @@ -715,7 +713,7 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { } catch (NaAPIFailedException e) { if (e.getErrno() == 9004) { //igroup already exists hence no error - s_logger.warn("Igroup already exists"); + logger.warn("Igroup already exists"); } } @@ -794,15 +792,15 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { NetappVolumeVO vol = _volumeDao.acquireInLockTable(lun.getVolumeId()); if (vol == null) { - s_logger.warn("Failed to lock volume id= " + lun.getVolumeId()); + logger.warn("Failed to lock volume id= " + lun.getVolumeId()); return; } NaServer s = null; try { s = getServer(vol.getIpAddress(), vol.getUsername(), vol.getPassword()); - if (s_logger.isDebugEnabled()) - s_logger.debug("Request --> destroyLun " + ":serverIp:" + vol.getIpAddress()); + if (logger.isDebugEnabled()) + logger.debug("Request --> destroyLun " + ":serverIp:" + vol.getIpAddress()); try { //Unmap lun @@ -812,7 +810,7 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { s.invokeElem(xi2); } catch (NaAPIFailedException naf) { if (naf.getErrno() == 9016) - s_logger.warn("no map exists excpn 9016 caught in deletelun, continuing with delete"); + logger.warn("no map exists excpn 9016 caught in deletelun, continuing with delete"); } //destroy lun @@ -831,30 +829,30 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { txn.commit(); } catch (UnknownHostException uhe) { txn.rollback(); - s_logger.warn("Failed to delete lun", uhe); + logger.warn("Failed to delete lun", uhe); throw new ServerException("Failed to delete lun", uhe); } catch (IOException ioe) { txn.rollback(); - s_logger.warn("Failed to delete lun", ioe); + logger.warn("Failed to delete lun", ioe); throw new ServerException("Failed to delete lun", ioe); } catch (NaAPIFailedException naf) { if (naf.getErrno() == 9017) {//no such group exists excpn - s_logger.warn("no such group exists excpn 9017 caught in deletelun, continuing with delete"); + logger.warn("no such group exists excpn 9017 caught in deletelun, continuing with delete"); _lunDao.remove(lun.getId()); txn.commit(); } else if (naf.getErrno() == 9029) {//LUN maps for this initiator group exist - s_logger.warn("LUN maps for this initiator group exist errno 9029 caught in deletelun, continuing with delete"); + logger.warn("LUN maps for this initiator group exist errno 9029 caught in deletelun, continuing with delete"); _lunDao.remove(lun.getId()); txn.commit(); } else { txn.rollback(); - s_logger.warn("Failed to delete lun", naf); + logger.warn("Failed to delete lun", naf); throw new ServerException("Failed to delete lun", naf); } } catch (NaException nae) { txn.rollback(); - s_logger.warn("Failed to delete lun", nae); + logger.warn("Failed to delete lun", nae); throw new ServerException("Failed to delete lun", nae); } finally { if (vol != null) { @@ -875,8 +873,8 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { */ @Override public List listLunsOnFiler(String poolName) { - if (s_logger.isDebugEnabled()) - s_logger.debug("Request --> listLunsOnFiler "); + if (logger.isDebugEnabled()) + logger.debug("Request --> listLunsOnFiler "); List luns = new ArrayList(); @@ -886,8 +884,8 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { luns.addAll(_lunDao.listLunsByVolId(vol.getId())); } - if (s_logger.isDebugEnabled()) - s_logger.debug("Response --> listLunsOnFiler:success"); + if (logger.isDebugEnabled()) + logger.debug("Response --> listLunsOnFiler:success"); return luns; } @@ -910,8 +908,8 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { try { s = getServer(vol.getIpAddress(), vol.getUsername(), vol.getPassword()); - if (s_logger.isDebugEnabled()) - s_logger.debug("Request --> disassociateLun " + ":serverIp:" + vol.getIpAddress()); + if (logger.isDebugEnabled()) + logger.debug("Request --> disassociateLun " + ":serverIp:" + vol.getIpAddress()); xi = new NaElement("igroup-remove"); xi.addNewChild("force", "true"); @@ -972,8 +970,8 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { try { s = getServer(vol.getIpAddress(), vol.getUsername(), vol.getPassword()); - if (s_logger.isDebugEnabled()) - s_logger.debug("Request --> associateLun " + ":serverIp:" + vol.getIpAddress()); + if (logger.isDebugEnabled()) + logger.debug("Request --> associateLun " + ":serverIp:" + vol.getIpAddress()); //add iqn to the group xi2 = new NaElement("igroup-add"); @@ -984,19 +982,19 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager { return returnVal; } catch (UnknownHostException uhe) { - s_logger.warn("Unable to associate LUN ", uhe); + logger.warn("Unable to associate LUN ", uhe); throw new ServerException("Unable to associate LUN", uhe); } catch (NaAPIFailedException naf) { if (naf.getErrno() == 9008) { //initiator group already contains node return returnVal; } - s_logger.warn("Unable to associate LUN ", naf); + logger.warn("Unable to associate LUN ", naf); throw new ServerException("Unable to associate LUN", naf); } catch (NaException nae) { - s_logger.warn("Unable to associate LUN ", nae); + logger.warn("Unable to associate LUN ", nae); throw new ServerException("Unable to associate LUN", nae); } catch (IOException ioe) { - s_logger.warn("Unable to associate LUN ", ioe); + logger.warn("Unable to associate LUN ", ioe); throw new ServerException("Unable to associate LUN", ioe); } finally { if (s != null) diff --git a/plugins/file-systems/netapp/src/com/cloud/netapp/dao/LunDaoImpl.java b/plugins/file-systems/netapp/src/com/cloud/netapp/dao/LunDaoImpl.java index 60a15b58789..1bb2da89bd9 100644 --- a/plugins/file-systems/netapp/src/com/cloud/netapp/dao/LunDaoImpl.java +++ b/plugins/file-systems/netapp/src/com/cloud/netapp/dao/LunDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.netapp.LunVO; @@ -32,7 +31,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {LunDao.class}) public class LunDaoImpl extends GenericDaoBase implements LunDao { - private static final Logger s_logger = Logger.getLogger(PoolDaoImpl.class); protected final SearchBuilder LunSearch; protected final SearchBuilder LunNameSearch; diff --git a/plugins/file-systems/netapp/src/com/cloud/netapp/dao/PoolDaoImpl.java b/plugins/file-systems/netapp/src/com/cloud/netapp/dao/PoolDaoImpl.java index 4ac76df8f7c..ff2573f1d70 100644 --- a/plugins/file-systems/netapp/src/com/cloud/netapp/dao/PoolDaoImpl.java +++ b/plugins/file-systems/netapp/src/com/cloud/netapp/dao/PoolDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.netapp.PoolVO; @@ -31,7 +30,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {PoolDao.class}) public class PoolDaoImpl extends GenericDaoBase implements PoolDao { - private static final Logger s_logger = Logger.getLogger(PoolDaoImpl.class); protected final SearchBuilder PoolSearch; diff --git a/plugins/file-systems/netapp/src/com/cloud/netapp/dao/VolumeDaoImpl.java b/plugins/file-systems/netapp/src/com/cloud/netapp/dao/VolumeDaoImpl.java index e239b1ec560..ea4fdbd86fb 100644 --- a/plugins/file-systems/netapp/src/com/cloud/netapp/dao/VolumeDaoImpl.java +++ b/plugins/file-systems/netapp/src/com/cloud/netapp/dao/VolumeDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.netapp.NetappVolumeVO; @@ -32,7 +31,6 @@ import com.cloud.utils.db.SearchCriteria; @Component(value = "netappVolumeDaoImpl") @Local(value = {VolumeDao.class}) public class VolumeDaoImpl extends GenericDaoBase implements VolumeDao { - private static final Logger s_logger = Logger.getLogger(VolumeDaoImpl.class); protected final SearchBuilder NetappVolumeSearch; protected final SearchBuilder NetappListVolumeSearch; diff --git a/plugins/ha-planners/skip-heurestics/src/com/cloud/deploy/SkipHeuresticsPlanner.java b/plugins/ha-planners/skip-heurestics/src/com/cloud/deploy/SkipHeuresticsPlanner.java index b67d112f825..9ea3d82bdef 100644 --- a/plugins/ha-planners/skip-heurestics/src/com/cloud/deploy/SkipHeuresticsPlanner.java +++ b/plugins/ha-planners/skip-heurestics/src/com/cloud/deploy/SkipHeuresticsPlanner.java @@ -17,7 +17,6 @@ package com.cloud.deploy; import com.cloud.vm.VirtualMachineProfile; -import org.apache.log4j.Logger; import javax.ejb.Local; @@ -27,7 +26,6 @@ import java.util.Map; @Local(value=HAPlanner.class) public class SkipHeuresticsPlanner extends FirstFitPlanner implements HAPlanner { - private static final Logger s_logger = Logger.getLogger(SkipHeuresticsPlanner.class); /** @@ -39,8 +37,8 @@ public class SkipHeuresticsPlanner extends FirstFitPlanner implements HAPlanner @Override protected void removeClustersCrossingThreshold(List clusterListForVmAllocation, ExcludeList avoid, VirtualMachineProfile vmProfile, DeploymentPlan plan){ - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deploying vm during HA process, so skipping disable threshold check"); + if (logger.isDebugEnabled()) { + logger.debug("Deploying vm during HA process, so skipping disable threshold check"); } return; } diff --git a/plugins/host-allocators/random/src/com/cloud/agent/manager/allocator/impl/RandomAllocator.java b/plugins/host-allocators/random/src/com/cloud/agent/manager/allocator/impl/RandomAllocator.java index 390a8afd089..1df6458eef3 100644 --- a/plugins/host-allocators/random/src/com/cloud/agent/manager/allocator/impl/RandomAllocator.java +++ b/plugins/host-allocators/random/src/com/cloud/agent/manager/allocator/impl/RandomAllocator.java @@ -23,7 +23,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.manager.allocator.HostAllocator; @@ -42,7 +41,6 @@ import com.cloud.vm.VirtualMachineProfile; @Component @Local(value = HostAllocator.class) public class RandomAllocator extends AdapterBase implements HostAllocator { - private static final Logger s_logger = Logger.getLogger(RandomAllocator.class); @Inject private HostDao _hostDao; @Inject @@ -69,9 +67,9 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { String hostTag = offering.getHostTag(); if (hostTag != null) { - s_logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId + " having host tag:" + hostTag); + logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId + " having host tag:" + hostTag); } else { - s_logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); + logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); } // list all computing hosts, regardless of whether they support routing...it's random after all @@ -81,7 +79,7 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { hostsCopy.retainAll(_resourceMgr.listAllUpAndEnabledHosts(type, clusterId, podId, dcId)); } - s_logger.debug("Random Allocator found " + hostsCopy.size() + " hosts"); + logger.debug("Random Allocator found " + hostsCopy.size() + " hosts"); if (hostsCopy.size() == 0) { return suitableHosts; } @@ -95,14 +93,14 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { if (!avoid.shouldAvoid(host)) { suitableHosts.add(host); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " is in avoid set, " + "skipping this and trying other available hosts"); + if (logger.isDebugEnabled()) { + logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " is in avoid set, " + "skipping this and trying other available hosts"); } } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Random Host Allocator returning " + suitableHosts.size() + " suitable hosts"); + if (logger.isDebugEnabled()) { + logger.debug("Random Host Allocator returning " + suitableHosts.size() + " suitable hosts"); } return suitableHosts; @@ -124,9 +122,9 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { String hostTag = offering.getHostTag(); if (hostTag != null) { - s_logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId + " having host tag:" + hostTag); + logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId + " having host tag:" + hostTag); } else { - s_logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); + logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); } // list all computing hosts, regardless of whether they support routing...it's random after all @@ -137,7 +135,7 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { hosts = _resourceMgr.listAllUpAndEnabledHosts(type, clusterId, podId, dcId); } - s_logger.debug("Random Allocator found " + hosts.size() + " hosts"); + logger.debug("Random Allocator found " + hosts.size() + " hosts"); if (hosts.size() == 0) { return suitableHosts; @@ -152,13 +150,13 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { if (!avoid.shouldAvoid(host)) { suitableHosts.add(host); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " is in avoid set, skipping this and trying other available hosts"); + if (logger.isDebugEnabled()) { + logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " is in avoid set, skipping this and trying other available hosts"); } } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Random Host Allocator returning " + suitableHosts.size() + " suitable hosts"); + if (logger.isDebugEnabled()) { + logger.debug("Random Host Allocator returning " + suitableHosts.size() + " suitable hosts"); } return suitableHosts; } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalDiscoverer.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalDiscoverer.java index 8b4b45348fd..47fa239fc3b 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalDiscoverer.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalDiscoverer.java @@ -34,7 +34,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.api.ApiConstants; -import org.apache.log4j.Logger; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupRoutingCommand; @@ -63,7 +62,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Local(value = Discoverer.class) public class BareMetalDiscoverer extends DiscovererBase implements Discoverer, ResourceStateAdapter { - protected static final Logger s_logger = Logger.getLogger(BareMetalDiscoverer.class); @Inject protected VMInstanceDao _vmDao = null; @@ -94,25 +92,25 @@ public class BareMetalDiscoverer extends DiscovererBase implements Discoverer, R if (!url.getScheme().equals("http")) { String msg = "urlString is not http so we're not taking care of the discovery for this: " + url; - s_logger.debug(msg); + logger.debug(msg); return null; } if (clusterId == null) { String msg = "must specify cluster Id when add host"; - s_logger.debug(msg); + logger.debug(msg); throw new RuntimeException(msg); } if (podId == null) { String msg = "must specify pod Id when add host"; - s_logger.debug(msg); + logger.debug(msg); throw new RuntimeException(msg); } ClusterVO cluster = _clusterDao.findById(clusterId); if (cluster == null || (cluster.getHypervisorType() != HypervisorType.BareMetal)) { - if (s_logger.isInfoEnabled()) - s_logger.info("invalid cluster id or cluster is not for Bare Metal hosts"); + if (logger.isInfoEnabled()) + logger.info("invalid cluster id or cluster is not for Bare Metal hosts"); return null; } @@ -134,14 +132,14 @@ public class BareMetalDiscoverer extends DiscovererBase implements Discoverer, R + injectScript); } - final Script2 command = new Script2(scriptPath, s_logger); + final Script2 command = new Script2(scriptPath, logger); command.add("ping"); command.add("hostname="+ipmiIp); command.add("usrname="+username); command.add("password="+password, ParamType.PASSWORD); final String result = command.execute(); if (result != null) { - s_logger.warn(String.format("Can not set up ipmi connection(ip=%1$s, username=%2$s, password=%3$s, args) because %4$s", ipmiIp, username, "******", result)); + logger.warn(String.format("Can not set up ipmi connection(ip=%1$s, username=%2$s, password=%3$s, args) because %4$s", ipmiIp, username, "******", result)); return null; } @@ -207,11 +205,11 @@ public class BareMetalDiscoverer extends DiscovererBase implements Discoverer, R zone.setDhcpProvider(Network.Provider.ExternalDhcpServer.getName()); _dcDao.update(zone.getId(), zone); - s_logger.debug(String.format("Discover Bare Metal host successfully(ip=%1$s, username=%2$s, password=%3%s," + + logger.debug(String.format("Discover Bare Metal host successfully(ip=%1$s, username=%2$s, password=%3%s," + "cpuNum=%4$s, cpuCapacity-%5$s, memCapacity=%6$s)", ipmiIp, username, "******", cpuNum, cpuCapacity, memCapacity)); return resources; } catch (Exception e) { - s_logger.warn("Can not set up bare metal agent", e); + logger.warn("Can not set up bare metal agent", e); } return null; diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalGuru.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalGuru.java index 8f439cdc1dd..9b46cfd3d76 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalGuru.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalGuru.java @@ -28,7 +28,6 @@ import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.host.dao.HostDao; @@ -43,7 +42,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Local(value = HypervisorGuru.class) public class BareMetalGuru extends HypervisorGuruBase implements HypervisorGuru { - private static final Logger s_logger = Logger.getLogger(BareMetalGuru.class); @Inject GuestOSDao _guestOsDao; @Inject diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalPlanner.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalPlanner.java index 2f93ec7d819..aa11448750a 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalPlanner.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalPlanner.java @@ -23,7 +23,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -53,7 +52,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = DeploymentPlanner.class) public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner { - private static final Logger s_logger = Logger.getLogger(BareMetalPlanner.class); @Inject protected DataCenterDao _dcDao; @Inject @@ -84,7 +82,7 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner { DataCenter dc = _dcDao.findById(h.getDataCenterId()); Pod pod = _podDao.findById(h.getPodId()); Cluster c = _clusterDao.findById(h.getClusterId()); - s_logger.debug("Start baremetal vm " + vm.getId() + " on last stayed host " + h.getId()); + logger.debug("Start baremetal vm " + vm.getId() + " on last stayed host " + h.getId()); return new DeployDestination(dc, pod, c, h); } @@ -116,7 +114,7 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner { } if (target == null) { - s_logger.warn("Cannot find host with tag " + hostTag + " use capacity from service offering"); + logger.warn("Cannot find host with tag " + hostTag + " use capacity from service offering"); cpu_requested = offering.getCpu() * offering.getSpeed(); ram_requested = offering.getRamSize() * 1024L * 1024L; } else { @@ -128,7 +126,7 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner { if (haVmTag == null) { hosts = _resourceMgr.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId()); } else { - s_logger.warn("Cannot find HA host with tag " + haVmTag + " in cluster id=" + cluster.getId() + ", pod id=" + cluster.getPodId() + ", data center id=" + + logger.warn("Cannot find HA host with tag " + haVmTag + " in cluster id=" + cluster.getId() + ", pod id=" + cluster.getPodId() + ", data center id=" + cluster.getDataCenterId()); return null; } @@ -140,7 +138,7 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner { Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); if (_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { - s_logger.debug("Find host " + h.getId() + " has enough capacity"); + logger.debug("Find host " + h.getId() + " has enough capacity"); DataCenter dc = _dcDao.findById(h.getDataCenterId()); Pod pod = _podDao.findById(h.getPodId()); return new DeployDestination(dc, pod, cluster, h); @@ -148,7 +146,7 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner { } } - s_logger.warn(String.format("Cannot find enough capacity(requested cpu=%1$s memory=%2$s)", cpu_requested, ram_requested)); + logger.warn(String.format("Cannot find enough capacity(requested cpu=%1$s memory=%2$s)", cpu_requested, ram_requested)); return null; } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java index 17dab005874..6d74119c45e 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java @@ -44,7 +44,6 @@ import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; -import org.apache.log4j.Logger; import javax.ejb.Local; import javax.inject.Inject; @@ -53,7 +52,6 @@ import java.util.List; @Local(value = TemplateAdapter.class) public class BareMetalTemplateAdapter extends TemplateAdapterBase implements TemplateAdapter { - private final static Logger s_logger = Logger.getLogger(BareMetalTemplateAdapter.class); @Inject HostDao _hostDao; @Inject @@ -131,7 +129,7 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem zoneName = "all zones"; } - s_logger.debug("Attempting to mark template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName); + logger.debug("Attempting to mark template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName); Account account = _accountDao.findByIdIncludingRemoved(template.getAccountId()); String eventType = EventTypes.EVENT_TEMPLATE_DELETE; List templateHostVOs = this._tmpltStoreDao.listByTemplate(templateId); @@ -141,7 +139,7 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem try { lock = _tmpltStoreDao.acquireInLockTable(vo.getId()); if (lock == null) { - s_logger.debug("Failed to acquire lock when deleting templateDataStoreVO with ID: " + vo.getId()); + logger.debug("Failed to acquire lock when deleting templateDataStoreVO with ID: " + vo.getId()); success = false; break; } @@ -173,7 +171,7 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem _tmpltZoneDao.remove(templateZone.getId()); } - s_logger.debug("Successfully marked template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName); + logger.debug("Successfully marked template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName); // If there are no more non-destroyed template host entries for this template, delete it if (success && (_tmpltStoreDao.listByTemplate(templateId).size() == 0)) { @@ -183,7 +181,7 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem try { if (lock == null) { - s_logger.debug("Failed to acquire lock when deleting template with ID: " + templateId); + logger.debug("Failed to acquire lock when deleting template with ID: " + templateId); success = false; } else if (_tmpltDao.remove(templateId)) { // Decrement the number of templates and total secondary storage space used by the account. @@ -196,7 +194,7 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem _tmpltDao.releaseFromLockTable(lock.getId()); } } - s_logger.debug("Removed template: " + template.getName() + " because all of its template host refs were marked as destroyed."); + logger.debug("Removed template: " + template.getName() + " because all of its template host refs were marked as destroyed."); } return success; diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalManagerImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalManagerImpl.java index f826ae91fde..61f421550c2 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalManagerImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalManagerImpl.java @@ -33,7 +33,6 @@ import com.cloud.utils.fsm.StateMachine2; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.dao.VMInstanceDao; import org.apache.cloudstack.api.BaremetalProvisionDoneNotificationCmd; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.AddBaremetalHostCmd; @@ -48,7 +47,6 @@ import com.cloud.vm.VirtualMachine.State; @Local(value = {BaremetalManager.class}) public class BaremetalManagerImpl extends ManagerBase implements BaremetalManager, StateListener { - private static final Logger s_logger = Logger.getLogger(BaremetalManagerImpl.class); @Inject protected HostDao _hostDao; @@ -95,17 +93,17 @@ public class BaremetalManagerImpl extends ManagerBase implements BaremetalManage HostVO host = _hostDao.findById(vo.getHostId()); if (host == null) { - s_logger.debug("Skip oldState " + oldState + " to " + "newState " + newState + " transimtion"); + logger.debug("Skip oldState " + oldState + " to " + "newState " + newState + " transimtion"); return true; } _hostDao.loadDetails(host); if (newState == State.Starting) { host.setDetail("vmName", vo.getInstanceName()); - s_logger.debug("Add vmName " + host.getDetail("vmName") + " to host " + host.getId() + " details"); + logger.debug("Add vmName " + host.getDetail("vmName") + " to host " + host.getId() + " details"); } else { if (host.getDetail("vmName") != null && host.getDetail("vmName").equalsIgnoreCase(vo.getInstanceName())) { - s_logger.debug("Remove vmName " + host.getDetail("vmName") + " from host " + host.getId() + " details"); + logger.debug("Remove vmName " + host.getDetail("vmName") + " from host " + host.getId() + " details"); host.getDetails().remove("vmName"); } } @@ -152,7 +150,7 @@ public class BaremetalManagerImpl extends ManagerBase implements BaremetalManage vm.setState(State.Running); vm.setLastHostId(vm.getHostId()); vmDao.update(vm.getId(), vm); - s_logger.debug(String.format("received baremetal provision done notification for vm[id:%s name:%s] running on host[mac:%s, ip:%s]", + logger.debug(String.format("received baremetal provision done notification for vm[id:%s name:%s] running on host[mac:%s, ip:%s]", vm.getId(), vm.getInstanceName(), host.getPrivateMacAddress(), host.getPrivateIpAddress())); } } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalPingServiceImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalPingServiceImpl.java index 5ad48dc7493..136179dcc04 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalPingServiceImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalPingServiceImpl.java @@ -33,7 +33,6 @@ import javax.inject.Inject; import org.apache.cloudstack.api.AddBaremetalPxeCmd; import org.apache.cloudstack.api.AddBaremetalPxePingServerCmd; import org.apache.cloudstack.api.ListBaremetalPxeServersCmd; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.baremetal.IpmISetBootDevCommand; @@ -70,7 +69,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = BaremetalPxeService.class) public class BareMetalPingServiceImpl extends BareMetalPxeServiceBase implements BaremetalPxeService { - private static final Logger s_logger = Logger.getLogger(BareMetalPingServiceImpl.class); @Inject ResourceManager _resourceMgr; @Inject @@ -109,19 +107,19 @@ public class BareMetalPingServiceImpl extends BareMetalPxeServiceBase implements new PreparePxeServerCommand(ip, mac, mask, gateway, dns, tpl, profile.getVirtualMachine().getInstanceName(), dest.getHost().getName()); PreparePxeServerAnswer ans = (PreparePxeServerAnswer)_agentMgr.send(pxeServerId, cmd); if (!ans.getResult()) { - s_logger.warn("Unable tot program PXE server: " + pxeVo.getId() + " because " + ans.getDetails()); + logger.warn("Unable tot program PXE server: " + pxeVo.getId() + " because " + ans.getDetails()); return false; } IpmISetBootDevCommand bootCmd = new IpmISetBootDevCommand(BootDev.pxe); Answer anw = _agentMgr.send(dest.getHost().getId(), bootCmd); if (!anw.getResult()) { - s_logger.warn("Unable to set host: " + dest.getHost().getId() + " to PXE boot because " + anw.getDetails()); + logger.warn("Unable to set host: " + dest.getHost().getId() + " to PXE boot because " + anw.getDetails()); } return anw.getResult(); } catch (Exception e) { - s_logger.warn("Cannot prepare PXE server", e); + logger.warn("Cannot prepare PXE server", e); return false; } } @@ -152,7 +150,7 @@ public class BareMetalPingServiceImpl extends BareMetalPxeServiceBase implements Answer ans = _agentMgr.send(pxeServerId, cmd); return ans.getResult(); } catch (Exception e) { - s_logger.debug("Prepare for creating baremetal template failed", e); + logger.debug("Prepare for creating baremetal template failed", e); return false; } } @@ -221,7 +219,7 @@ public class BareMetalPingServiceImpl extends BareMetalPxeServiceBase implements try { uri = new URI(cmd.getUrl()); } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); throw new IllegalArgumentException(e.getMessage()); } String ipAddress = uri.getHost(); @@ -246,7 +244,7 @@ public class BareMetalPingServiceImpl extends BareMetalPxeServiceBase implements try { resource.configure("PING PXE resource", params); } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); throw new CloudRuntimeException(e.getMessage()); } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalResourceBase.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalResourceBase.java index 5ef861d8139..c2bdc6b55b0 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalResourceBase.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalResourceBase.java @@ -72,7 +72,6 @@ import com.cloud.vm.VirtualMachine.PowerState; import com.cloud.vm.dao.VMInstanceDao; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import javax.ejb.Local; import javax.naming.ConfigurationException; @@ -83,7 +82,6 @@ import java.util.concurrent.TimeUnit; @Local(value = ServerResource.class) public class BareMetalResourceBase extends ManagerBase implements ServerResource { - private static final Logger s_logger = Logger.getLogger(BareMetalResourceBase.class); protected String _uuid; protected String _zone; protected String _pod; @@ -176,20 +174,20 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource try { ipmiIface = configDao.getValue(Config.BaremetalIpmiLanInterface.key()); } catch (Exception e) { - s_logger.debug(e.getMessage(), e); + logger.debug(e.getMessage(), e); } try { ipmiRetryTimes = Integer.parseInt(configDao.getValue(Config.BaremetalIpmiRetryTimes.key())); } catch (Exception e) { - s_logger.debug(e.getMessage(), e); + logger.debug(e.getMessage(), e); } try { provisionDoneNotificationOn = Boolean.valueOf(configDao.getValue(Config.BaremetalProvisionDoneNotificationEnabled.key())); isProvisionDoneNotificationTimeout = Integer.parseInt(configDao.getValue(Config.BaremetalProvisionDoneNotificationTimeout.key())); } catch (Exception e) { - s_logger.debug(e.getMessage(), e); + logger.debug(e.getMessage(), e); } String injectScript = "scripts/util/ipmi.py"; @@ -198,7 +196,7 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource throw new ConfigurationException("Cannot find ping script " + scriptPath); } String pythonPath = "/usr/bin/python"; - _pingCommand = new Script2(pythonPath, s_logger); + _pingCommand = new Script2(pythonPath, logger); _pingCommand.add(scriptPath); _pingCommand.add("ping"); _pingCommand.add("interface=" + ipmiIface); @@ -206,7 +204,7 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource _pingCommand.add("usrname=" + _username); _pingCommand.add("password=" + _password, ParamType.PASSWORD); - _setPxeBootCommand = new Script2(pythonPath, s_logger); + _setPxeBootCommand = new Script2(pythonPath, logger); _setPxeBootCommand.add(scriptPath); _setPxeBootCommand.add("boot_dev"); _setPxeBootCommand.add("interface=" + ipmiIface); @@ -215,7 +213,7 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource _setPxeBootCommand.add("password=" + _password, ParamType.PASSWORD); _setPxeBootCommand.add("dev=pxe"); - _setDiskBootCommand = new Script2(pythonPath, s_logger); + _setDiskBootCommand = new Script2(pythonPath, logger); _setDiskBootCommand.add(scriptPath); _setDiskBootCommand.add("boot_dev"); _setDiskBootCommand.add("interface=" + ipmiIface); @@ -224,7 +222,7 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource _setDiskBootCommand.add("password=" + _password, ParamType.PASSWORD); _setDiskBootCommand.add("dev=disk"); - _rebootCommand = new Script2(pythonPath, s_logger); + _rebootCommand = new Script2(pythonPath, logger); _rebootCommand.add(scriptPath); _rebootCommand.add("reboot"); _rebootCommand.add("interface=" + ipmiIface); @@ -232,7 +230,7 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource _rebootCommand.add("usrname=" + _username); _rebootCommand.add("password=" + _password, ParamType.PASSWORD); - _getStatusCommand = new Script2(pythonPath, s_logger); + _getStatusCommand = new Script2(pythonPath, logger); _getStatusCommand.add(scriptPath); _getStatusCommand.add("ping"); _getStatusCommand.add("interface=" + ipmiIface); @@ -240,7 +238,7 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource _getStatusCommand.add("usrname=" + _username); _getStatusCommand.add("password=" + _password, ParamType.PASSWORD); - _powerOnCommand = new Script2(pythonPath, s_logger); + _powerOnCommand = new Script2(pythonPath, logger); _powerOnCommand.add(scriptPath); _powerOnCommand.add("power"); _powerOnCommand.add("interface=" + ipmiIface); @@ -249,7 +247,7 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource _powerOnCommand.add("password=" + _password, ParamType.PASSWORD); _powerOnCommand.add("action=on"); - _powerOffCommand = new Script2(pythonPath, s_logger); + _powerOffCommand = new Script2(pythonPath, logger); _powerOffCommand.add(scriptPath); _powerOffCommand.add("power"); _powerOffCommand.add("interface=" + ipmiIface); @@ -258,7 +256,7 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource _powerOffCommand.add("password=" + _password, ParamType.PASSWORD); _powerOffCommand.add("action=soft"); - _forcePowerOffCommand = new Script2(pythonPath, s_logger); + _forcePowerOffCommand = new Script2(pythonPath, logger); _forcePowerOffCommand.add(scriptPath); _forcePowerOffCommand.add("power"); _forcePowerOffCommand.add("interface=" + ipmiIface); @@ -267,7 +265,7 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource _forcePowerOffCommand.add("password=" + _password, ParamType.PASSWORD); _forcePowerOffCommand.add("action=off"); - _bootOrRebootCommand = new Script2(pythonPath, s_logger); + _bootOrRebootCommand = new Script2(pythonPath, logger); _bootOrRebootCommand.add(scriptPath); _bootOrRebootCommand.add("boot_or_reboot"); _bootOrRebootCommand.add("interface=" + ipmiIface); @@ -299,11 +297,11 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource res = cmd.execute(interpreter); } if (res != null && res.startsWith("Error: Unable to establish LAN")) { - s_logger.warn("IPMI script timeout(" + cmd.toString() + "), will retry " + retry + " times"); + logger.warn("IPMI script timeout(" + cmd.toString() + "), will retry " + retry + " times"); try { TimeUnit.SECONDS.sleep(1); } catch (InterruptedException e) { - s_logger.debug("[ignored] interupted while waiting to retry running script."); + logger.debug("[ignored] interupted while waiting to retry running script."); } continue; } else if (res == null) { @@ -313,7 +311,7 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource } } - s_logger.warn("IPMI Scirpt failed due to " + res + "(" + cmd.toString() + ")"); + logger.warn("IPMI Scirpt failed due to " + res + "(" + cmd.toString() + ")"); return false; } @@ -379,12 +377,12 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource if (!ipmiPing()) { Thread.sleep(1000); if (!ipmiPing()) { - s_logger.warn("Cannot ping ipmi nic " + _ip); + logger.warn("Cannot ping ipmi nic " + _ip); return null; } } } catch (Exception e) { - s_logger.debug("Cannot ping ipmi nic " + _ip, e); + logger.debug("Cannot ping ipmi nic " + _ip, e); return null; } @@ -419,11 +417,11 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource String bootDev = cmd.getBootDev().name(); if (!doScript(bootCmd)) { - s_logger.warn("Set " + _ip + " boot dev to " + bootDev + "failed"); + logger.warn("Set " + _ip + " boot dev to " + bootDev + "failed"); return new Answer(cmd, false, "Set " + _ip + " boot dev to " + bootDev + "failed"); } - s_logger.warn("Set " + _ip + " boot dev to " + bootDev + "Success"); + logger.warn("Set " + _ip + " boot dev to " + bootDev + "Success"); return new Answer(cmd, true, "Set " + _ip + " boot dev to " + bootDev + "Success"); } @@ -494,7 +492,7 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource return Answer.createUnsupportedCommandAnswer(cmd); } } catch (Throwable t) { - s_logger.debug(t.getMessage(), t); + logger.debug(t.getMessage(), t); return new Answer(cmd, false, t.getMessage()); } } @@ -545,7 +543,7 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource OutputInterpreter.AllLinesParser interpreter = new OutputInterpreter.AllLinesParser(); if (!doScript(_getStatusCommand, interpreter)) { success = true; - s_logger.warn("Cannot get power status of " + getName() + ", assume VM state changed successfully"); + logger.warn("Cannot get power status of " + getName() + ", assume VM state changed successfully"); break; } @@ -600,7 +598,7 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException e) { - s_logger.warn(e.getMessage(), e); + logger.warn(e.getMessage(), e); } q = QueryBuilder.create(VMInstanceVO.class); @@ -614,21 +612,21 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource return new StartAnswer(cmd); } - s_logger.debug(String.format("still wait for baremetal provision done notification for vm[name:%s], current vm state is %s", vmvo.getInstanceName(), vmvo.getState())); + logger.debug(String.format("still wait for baremetal provision done notification for vm[name:%s], current vm state is %s", vmvo.getInstanceName(), vmvo.getState())); } return new StartAnswer(cmd, String.format("timeout after %s seconds, no baremetal provision done notification received. vm[name:%s] failed to start", isProvisionDoneNotificationTimeout, vm.getName())); } } - s_logger.debug("Start bare metal vm " + vm.getName() + "successfully"); + logger.debug("Start bare metal vm " + vm.getName() + "successfully"); _vmName = vm.getName(); return new StartAnswer(cmd); } protected ReadyAnswer execute(ReadyCommand cmd) { // derived resource should check if the PXE server is ready - s_logger.debug("Bare metal resource " + getName() + " is ready"); + logger.debug("Bare metal resource " + getName() + " is ready"); return new ReadyAnswer(cmd); } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetaNetworkGuru.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetaNetworkGuru.java index 292e6ef051d..b6500b0f1a7 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetaNetworkGuru.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetaNetworkGuru.java @@ -23,7 +23,6 @@ import javax.inject.Inject; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.dc.Pod; @@ -60,7 +59,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = {NetworkGuru.class}) public class BaremetaNetworkGuru extends DirectPodBasedNetworkGuru { - private static final Logger s_logger = Logger.getLogger(BaremetaNetworkGuru.class); @Inject private HostDao _hostDao; @Inject @@ -149,14 +147,14 @@ public class BaremetaNetworkGuru extends DirectPodBasedNetworkGuru { * nic.setBroadcastUri(null); nic.setIsolationUri(null); */ - s_logger.debug("Allocated a nic " + nic + " for " + vm); + logger.debug("Allocated a nic " + nic + " for " + vm); } private void getBaremetalIp(NicProfile nic, Pod pod, VirtualMachineProfile vm, Network network, String requiredIp) throws InsufficientAddressCapacityException, ConcurrentOperationException { DataCenter dc = _dcDao.findById(pod.getDataCenterId()); if (nic.getIPv4Address() == null) { - s_logger.debug(String.format("Requiring ip address: %s", nic.getIPv4Address())); + logger.debug(String.format("Requiring ip address: %s", nic.getIPv4Address())); PublicIp ip = _ipAddrMgr.assignPublicIpAddress(dc.getId(), pod.getId(), vm.getOwner(), VlanType.DirectAttached, network.getId(), requiredIp, false); nic.setIPv4Address(ip.getAddress().toString()); nic.setFormat(AddressFormat.Ip4); diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpElement.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpElement.java index 5f2c7466b0f..1a11644bda6 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpElement.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpElement.java @@ -25,7 +25,6 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.baremetal.database.BaremetalDhcpVO; import com.cloud.dc.DataCenter.NetworkType; @@ -59,7 +58,6 @@ import com.cloud.vm.dao.NicDao; @Local(value = NetworkElement.class) public class BaremetalDhcpElement extends AdapterBase implements DhcpServiceProvider { - private static final Logger s_logger = Logger.getLogger(BaremetalDhcpElement.class); private static final Map> capabilities; @Inject @@ -101,7 +99,7 @@ public class BaremetalDhcpElement extends AdapterBase implements DhcpServiceProv public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { if (offering.isSystemOnly() || !canHandle(dest, offering.getTrafficType(), network.getGuestType())) { - s_logger.debug("BaremetalDhcpElement can not handle networkoffering: " + offering.getName()); + logger.debug("BaremetalDhcpElement can not handle networkoffering: " + offering.getName()); return false; } return true; diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpManagerImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpManagerImpl.java index c2182e19d81..fe969884888 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpManagerImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpManagerImpl.java @@ -34,7 +34,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.api.AddBaremetalDhcpCmd; import org.apache.cloudstack.api.ListBaremetalDhcpCmd; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -77,7 +76,6 @@ import com.cloud.vm.dao.UserVmDao; @Local(value = {BaremetalDhcpManager.class}) public class BaremetalDhcpManagerImpl extends ManagerBase implements BaremetalDhcpManager, ResourceStateAdapter { - private static final org.apache.log4j.Logger s_logger = Logger.getLogger(BaremetalDhcpManagerImpl.class); protected String _name; @Inject DataCenterDao _dcDao; @@ -157,15 +155,15 @@ public class BaremetalDhcpManagerImpl extends ManagerBase implements BaremetalDh try { Answer ans = _agentMgr.send(h.getId(), dhcpCommand); if (ans.getResult()) { - s_logger.debug(String.format("Set dhcp entry on external DHCP %1$s successfully(ip=%2$s, mac=%3$s, vmname=%4$s)", h.getPrivateIpAddress(), + logger.debug(String.format("Set dhcp entry on external DHCP %1$s successfully(ip=%2$s, mac=%3$s, vmname=%4$s)", h.getPrivateIpAddress(), nic.getIPv4Address(), nic.getMacAddress(), profile.getVirtualMachine().getHostName())); return true; } else { - s_logger.debug(errMsg + " " + ans.getDetails()); + logger.debug(errMsg + " " + ans.getDetails()); throw new ResourceUnavailableException(errMsg, DataCenter.class, zoneId); } } catch (Exception e) { - s_logger.debug(errMsg, e); + logger.debug(errMsg, e); throw new ResourceUnavailableException(errMsg + e.getMessage(), DataCenter.class, zoneId); } } @@ -228,7 +226,7 @@ public class BaremetalDhcpManagerImpl extends ManagerBase implements BaremetalDh try { uri = new URI(cmd.getUrl()); } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); throw new IllegalArgumentException(e.getMessage()); } @@ -262,7 +260,7 @@ public class BaremetalDhcpManagerImpl extends ManagerBase implements BaremetalDh throw new CloudRuntimeException("Unsupport DHCP server type: " + cmd.getDhcpType()); } } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); throw new CloudRuntimeException(e.getMessage()); } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpResourceBase.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpResourceBase.java index 0d3cdce74e8..9fe3f6ad516 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpResourceBase.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpResourceBase.java @@ -27,7 +27,6 @@ import java.util.Map; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; @@ -44,7 +43,6 @@ import com.cloud.resource.ServerResource; import com.cloud.utils.component.ManagerBase; public class BaremetalDhcpResourceBase extends ManagerBase implements ServerResource { - private static final Logger s_logger = Logger.getLogger(BaremetalDhcpResourceBase.class); String _name; String _guid; String _username; @@ -129,7 +127,7 @@ public class BaremetalDhcpResourceBase extends ManagerBase implements ServerReso } protected ReadyAnswer execute(ReadyCommand cmd) { - s_logger.debug("External DHCP resource " + _name + " is ready"); + logger.debug("External DHCP resource " + _name + " is ready"); return new ReadyAnswer(cmd); } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpdResource.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpdResource.java index 8fd2c35ced9..e92cbf2c204 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpdResource.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpdResource.java @@ -27,7 +27,6 @@ import java.util.Map; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.trilead.ssh2.SCPClient; @@ -41,14 +40,13 @@ import com.cloud.utils.script.Script; import com.cloud.utils.ssh.SSHCmdHelper; public class BaremetalDhcpdResource extends BaremetalDhcpResourceBase { - private static final Logger s_logger = Logger.getLogger(BaremetalDhcpdResource.class); @Override public boolean configure(String name, Map params) throws ConfigurationException { com.trilead.ssh2.Connection sshConnection = null; try { super.configure(name, params); - s_logger.debug(String.format("Trying to connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s)", _ip, _username, "******")); + logger.debug(String.format("Trying to connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s)", _ip, _username, "******")); sshConnection = SSHCmdHelper.acquireAuthorizedConnection(_ip, _username, _password); if (sshConnection == null) { throw new ConfigurationException(String.format("Cannot connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******")); @@ -89,10 +87,10 @@ public class BaremetalDhcpdResource extends BaremetalDhcpResourceBase { throw new ConfigurationException("prepare Dhcpd at " + _ip + " failed, command:" + cmd); } - s_logger.debug("Dhcpd resource configure successfully"); + logger.debug("Dhcpd resource configure successfully"); return true; } catch (Exception e) { - s_logger.debug("Dhcpd resource configure failed", e); + logger.debug("Dhcpd resource configure failed", e); throw new ConfigurationException(e.getMessage()); } finally { SSHCmdHelper.releaseSshConnection(sshConnection); diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDnsmasqResource.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDnsmasqResource.java index 79f23cc46db..51acfe93d39 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDnsmasqResource.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDnsmasqResource.java @@ -27,7 +27,6 @@ import java.util.Map; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.trilead.ssh2.SCPClient; @@ -41,14 +40,13 @@ import com.cloud.utils.script.Script; import com.cloud.utils.ssh.SSHCmdHelper; public class BaremetalDnsmasqResource extends BaremetalDhcpResourceBase { - private static final Logger s_logger = Logger.getLogger(BaremetalDnsmasqResource.class); @Override public boolean configure(String name, Map params) throws ConfigurationException { com.trilead.ssh2.Connection sshConnection = null; try { super.configure(name, params); - s_logger.debug(String.format("Trying to connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s)", _ip, _username, _password)); + logger.debug(String.format("Trying to connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s)", _ip, _username, _password)); sshConnection = SSHCmdHelper.acquireAuthorizedConnection(_ip, _username, _password); if (sshConnection == null) { throw new ConfigurationException(String.format("Cannot connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); @@ -81,10 +79,10 @@ public class BaremetalDnsmasqResource extends BaremetalDhcpResourceBase { } */ - s_logger.debug("Dnsmasq resource configure successfully"); + logger.debug("Dnsmasq resource configure successfully"); return true; } catch (Exception e) { - s_logger.debug("Dnsmasq resorce configure failed", e); + logger.debug("Dnsmasq resorce configure failed", e); throw new ConfigurationException(e.getMessage()); } finally { SSHCmdHelper.releaseSshConnection(sshConnection); diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java index 260f4f16e87..0da8a6ee44d 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java @@ -25,7 +25,6 @@ import java.util.Map; import javax.naming.ConfigurationException; import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; import com.trilead.ssh2.SCPClient; @@ -40,7 +39,6 @@ import com.cloud.utils.script.Script; import com.cloud.utils.ssh.SSHCmdHelper; public class BaremetalKickStartPxeResource extends BaremetalPxeResourceBase { - private static final Logger s_logger = Logger.getLogger(BaremetalKickStartPxeResource.class); private static final String Name = "BaremetalKickStartPxeResource"; String _tftpDir; @@ -54,11 +52,11 @@ public class BaremetalKickStartPxeResource extends BaremetalPxeResourceBase { com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(_ip, 22); - s_logger.debug(String.format("Trying to connect to kickstart PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******")); + logger.debug(String.format("Trying to connect to kickstart PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******")); try { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { - s_logger.debug("SSH Failed to authenticate"); + logger.debug("SSH Failed to authenticate"); throw new ConfigurationException(String.format("Cannot connect to kickstart PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******")); } @@ -132,7 +130,7 @@ public class BaremetalKickStartPxeResource extends BaremetalPxeResourceBase { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { - s_logger.debug("SSH Failed to authenticate"); + logger.debug("SSH Failed to authenticate"); throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); } @@ -143,7 +141,7 @@ public class BaremetalKickStartPxeResource extends BaremetalPxeResourceBase { return new Answer(cmd, true, "Success"); } catch (Exception e) { - s_logger.debug("Prepare for creating baremetal template failed", e); + logger.debug("Prepare for creating baremetal template failed", e); return new Answer(cmd, false, e.getMessage()); } finally { if (sshConnection != null) { @@ -168,7 +166,7 @@ public class BaremetalKickStartPxeResource extends BaremetalPxeResourceBase { try { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { - s_logger.debug("SSH Failed to authenticate"); + logger.debug("SSH Failed to authenticate"); throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); } @@ -188,10 +186,10 @@ public class BaremetalKickStartPxeResource extends BaremetalPxeResourceBase { return new Answer(cmd, false, "prepare kickstart at pxe server " + _ip + " failed, command:" + script); } - s_logger.debug("Prepare kickstart PXE server successfully"); + logger.debug("Prepare kickstart PXE server successfully"); return new Answer(cmd, true, "Success"); } catch (Exception e) { - s_logger.debug("Prepare for kickstart server failed", e); + logger.debug("Prepare for kickstart server failed", e); return new Answer(cmd, false, e.getMessage()); } finally { if (sshConnection != null) { diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java index beffa3e7fcc..6e179d606c7 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.AddBaremetalKickStartPxeCmd; import org.apache.cloudstack.api.AddBaremetalPxeCmd; import org.apache.cloudstack.api.ListBaremetalPxeServersCmd; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.baremetal.IpmISetBootDevCommand; @@ -82,7 +81,6 @@ import com.cloud.vm.dao.NicDao; @Local(value = BaremetalPxeService.class) public class BaremetalKickStartServiceImpl extends BareMetalPxeServiceBase implements BaremetalPxeService { - private static final Logger s_logger = Logger.getLogger(BaremetalKickStartServiceImpl.class); @Inject ResourceManager _resourceMgr; @Inject @@ -172,7 +170,7 @@ public class BaremetalKickStartServiceImpl extends BareMetalPxeServiceBase imple throw new CloudRuntimeException(String.format("cannot find id_rsa.cloud")); } if (!keyFile.exists()) { - s_logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile.toString()); + logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile.toString()); } return keyFile; } @@ -201,7 +199,7 @@ public class BaremetalKickStartServiceImpl extends BareMetalPxeServiceBase imple cmd.setTemplateUuid(template.getUuid()); Answer aws = _agentMgr.send(pxeVo.getHostId(), cmd); if (!aws.getResult()) { - s_logger.warn("Unable to set host: " + dest.getHost().getId() + " to PXE boot because " + aws.getDetails()); + logger.warn("Unable to set host: " + dest.getHost().getId() + " to PXE boot because " + aws.getDetails()); return false; } @@ -231,7 +229,7 @@ public class BaremetalKickStartServiceImpl extends BareMetalPxeServiceBase imple List tuple = parseKickstartUrl(profile); String cmd = String.format("/opt/cloud/bin/prepare_pxe.sh %s %s %s %s %s %s", tuple.get(1), tuple.get(2), profile.getTemplate().getUuid(), String.format("01-%s", nic.getMacAddress().replaceAll(":", "-")).toLowerCase(), tuple.get(0), nic.getMacAddress().toLowerCase()); - s_logger.debug(String.format("prepare pxe on virtual router[ip:%s], cmd: %s", mgmtNic.getIPv4Address(), cmd)); + logger.debug(String.format("prepare pxe on virtual router[ip:%s], cmd: %s", mgmtNic.getIPv4Address(), cmd)); Pair ret = SshHelper.sshExecute(mgmtNic.getIPv4Address(), 3922, "root", getSystemVMKeyFile(), null, cmd); if (!ret.first()) { throw new CloudRuntimeException(String.format("failed preparing PXE in virtual router[id:%s], because %s", vr.getId(), ret.second())); @@ -239,7 +237,7 @@ public class BaremetalKickStartServiceImpl extends BareMetalPxeServiceBase imple //String internalServerIp = "10.223.110.231"; cmd = String.format("/opt/cloud/bin/baremetal_snat.sh %s %s %s", mgmtNic.getIPv4Address(), internalServerIp, mgmtNic.getIPv4Gateway()); - s_logger.debug(String.format("prepare SNAT on virtual router[ip:%s], cmd: %s", mgmtNic.getIPv4Address(), cmd)); + logger.debug(String.format("prepare SNAT on virtual router[ip:%s], cmd: %s", mgmtNic.getIPv4Address(), cmd)); ret = SshHelper.sshExecute(mgmtNic.getIPv4Address(), 3922, "root", getSystemVMKeyFile(), null, cmd); if (!ret.first()) { throw new CloudRuntimeException(String.format("failed preparing PXE in virtual router[id:%s], because %s", vr.getId(), ret.second())); @@ -264,12 +262,12 @@ public class BaremetalKickStartServiceImpl extends BareMetalPxeServiceBase imple IpmISetBootDevCommand bootCmd = new IpmISetBootDevCommand(BootDev.pxe); Answer aws = _agentMgr.send(dest.getHost().getId(), bootCmd); if (!aws.getResult()) { - s_logger.warn("Unable to set host: " + dest.getHost().getId() + " to PXE boot because " + aws.getDetails()); + logger.warn("Unable to set host: " + dest.getHost().getId() + " to PXE boot because " + aws.getDetails()); } return aws.getResult(); } catch (Exception e) { - s_logger.warn("Cannot prepare PXE server", e); + logger.warn("Cannot prepare PXE server", e); return false; } } @@ -321,7 +319,7 @@ public class BaremetalKickStartServiceImpl extends BareMetalPxeServiceBase imple try { uri = new URI(cmd.getUrl()); } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); throw new IllegalArgumentException(e.getMessage()); } String ipAddress = uri.getHost(); diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java index 80827a25ac8..0ca2f921e21 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java @@ -28,7 +28,6 @@ import java.util.Map; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.trilead.ssh2.SCPClient; @@ -45,7 +44,6 @@ import com.cloud.utils.script.Script; import com.cloud.utils.ssh.SSHCmdHelper; public class BaremetalPingPxeResource extends BaremetalPxeResourceBase { - private static final Logger s_logger = Logger.getLogger(BaremetalPingPxeResource.class); private static final String Name = "BaremetalPingPxeResource"; String _storageServer; String _pingDir; @@ -97,11 +95,11 @@ public class BaremetalPingPxeResource extends BaremetalPxeResourceBase { com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(_ip, 22); - s_logger.debug(String.format("Trying to connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******")); + logger.debug(String.format("Trying to connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******")); try { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { - s_logger.debug("SSH Failed to authenticate"); + logger.debug("SSH Failed to authenticate"); throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******")); } @@ -151,7 +149,7 @@ public class BaremetalPingPxeResource extends BaremetalPxeResourceBase { try { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { - s_logger.debug("SSH Failed to authenticate"); + logger.debug("SSH Failed to authenticate"); throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); } @@ -161,11 +159,11 @@ public class BaremetalPingPxeResource extends BaremetalPxeResourceBase { if (!SSHCmdHelper.sshExecuteCmd(sshConnection, script)) { return new PreparePxeServerAnswer(cmd, "prepare PING at " + _ip + " failed, command:" + script); } - s_logger.debug("Prepare Ping PXE server successfully"); + logger.debug("Prepare Ping PXE server successfully"); return new PreparePxeServerAnswer(cmd); } catch (Exception e) { - s_logger.debug("Prepare PING pxe server failed", e); + logger.debug("Prepare PING pxe server failed", e); return new PreparePxeServerAnswer(cmd, e.getMessage()); } finally { if (sshConnection != null) { @@ -179,7 +177,7 @@ public class BaremetalPingPxeResource extends BaremetalPxeResourceBase { try { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { - s_logger.debug("SSH Failed to authenticate"); + logger.debug("SSH Failed to authenticate"); throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); } @@ -189,11 +187,11 @@ public class BaremetalPingPxeResource extends BaremetalPxeResourceBase { if (!SSHCmdHelper.sshExecuteCmd(sshConnection, script)) { return new Answer(cmd, false, "prepare for creating template failed, command:" + script); } - s_logger.debug("Prepare for creating template successfully"); + logger.debug("Prepare for creating template successfully"); return new Answer(cmd, true, "Success"); } catch (Exception e) { - s_logger.debug("Prepare for creating baremetal template failed", e); + logger.debug("Prepare for creating baremetal template failed", e); return new Answer(cmd, false, e.getMessage()); } finally { if (sshConnection != null) { @@ -237,7 +235,7 @@ public class BaremetalPingPxeResource extends BaremetalPxeResourceBase { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { - s_logger.debug("SSH Failed to authenticate"); + logger.debug("SSH Failed to authenticate"); throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); } @@ -248,7 +246,7 @@ public class BaremetalPingPxeResource extends BaremetalPxeResourceBase { return new Answer(cmd, true, "Success"); } catch (Exception e) { - s_logger.debug("Prepare for creating baremetal template failed", e); + logger.debug("Prepare for creating baremetal template failed", e); return new Answer(cmd, false, e.getMessage()); } finally { if (sshConnection != null) { diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeElement.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeElement.java index da2866896da..7612bab3ad1 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeElement.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeElement.java @@ -51,7 +51,6 @@ import com.cloud.vm.VirtualMachine.Type; import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.NicDao; import com.cloud.vm.dao.VMInstanceDao; -import org.apache.log4j.Logger; import javax.ejb.Local; import javax.inject.Inject; @@ -61,7 +60,6 @@ import java.util.Set; @Local(value = NetworkElement.class) public class BaremetalPxeElement extends AdapterBase implements NetworkElement { - private static final Logger s_logger = Logger.getLogger(BaremetalPxeElement.class); private static final Map> capabilities; @Inject @@ -112,7 +110,7 @@ public class BaremetalPxeElement extends AdapterBase implements NetworkElement { } if (offering.isSystemOnly() || !canHandle(dest, offering.getTrafficType(), network.getGuestType())) { - s_logger.debug("BaremetalPxeElement can not handle network offering: " + offering.getName()); + logger.debug("BaremetalPxeElement can not handle network offering: " + offering.getName()); return false; } return true; diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeManagerImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeManagerImpl.java index e99292a6436..2e9b6910651 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeManagerImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeManagerImpl.java @@ -35,7 +35,6 @@ import org.apache.cloudstack.api.AddBaremetalPxeCmd; import org.apache.cloudstack.api.AddBaremetalPxePingServerCmd; import org.apache.cloudstack.api.ListBaremetalPxeServersCmd; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -73,7 +72,6 @@ import com.cloud.vm.dao.UserVmDao; @Local(value = {BaremetalPxeManager.class}) public class BaremetalPxeManagerImpl extends ManagerBase implements BaremetalPxeManager, ResourceStateAdapter { - private static final org.apache.log4j.Logger s_logger = Logger.getLogger(BaremetalPxeManagerImpl.class); @Inject DataCenterDao _dcDao; @Inject @@ -235,13 +233,13 @@ public class BaremetalPxeManagerImpl extends ManagerBase implements BaremetalPxe try { Answer ans = _agentMgr.send(pxeVo.getHostId(), cmd); if (!ans.getResult()) { - s_logger.debug(String.format("Add userdata to vm:%s failed because %s", vm.getInstanceName(), ans.getDetails())); + logger.debug(String.format("Add userdata to vm:%s failed because %s", vm.getInstanceName(), ans.getDetails())); return false; } else { return true; } } catch (Exception e) { - s_logger.debug(String.format("Add userdata to vm:%s failed", vm.getInstanceName()), e); + logger.debug(String.format("Add userdata to vm:%s failed", vm.getInstanceName()), e); return false; } } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeResourceBase.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeResourceBase.java index 5b5a959d597..01d1bf62717 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeResourceBase.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeResourceBase.java @@ -26,7 +26,6 @@ import java.util.Map; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; @@ -41,7 +40,6 @@ import com.cloud.resource.ServerResource; import com.cloud.utils.component.ManagerBase; public class BaremetalPxeResourceBase extends ManagerBase implements ServerResource { - private static final Logger s_logger = Logger.getLogger(BaremetalPxeResourceBase.class); String _name; String _guid; String _username; @@ -84,7 +82,7 @@ public class BaremetalPxeResourceBase extends ManagerBase implements ServerResou } protected ReadyAnswer execute(ReadyCommand cmd) { - s_logger.debug("Pxe resource " + _name + " is ready"); + logger.debug("Pxe resource " + _name + " is ready"); return new ReadyAnswer(cmd); } diff --git a/plugins/hypervisors/hyperv/src/com/cloud/ha/HypervInvestigator.java b/plugins/hypervisors/hyperv/src/com/cloud/ha/HypervInvestigator.java index 634f242bfe7..75ae7097c28 100644 --- a/plugins/hypervisors/hyperv/src/com/cloud/ha/HypervInvestigator.java +++ b/plugins/hypervisors/hyperv/src/com/cloud/ha/HypervInvestigator.java @@ -23,7 +23,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -38,7 +37,6 @@ import com.cloud.utils.component.AdapterBase; @Local(value=Investigator.class) public class HypervInvestigator extends AdapterBase implements Investigator { - private final static Logger s_logger = Logger.getLogger(HypervInvestigator.class); @Inject HostDao _hostDao; @Inject AgentManager _agentMgr; @Inject ResourceManager _resourceMgr; @@ -70,7 +68,7 @@ public class HypervInvestigator extends AdapterBase implements Investigator { return answer.getResult() ? Status.Down : Status.Up; } } catch (Exception e) { - s_logger.debug("Failed to send command to host: " + neighbor.getId()); + logger.debug("Failed to send command to host: " + neighbor.getId()); } } diff --git a/plugins/hypervisors/hyperv/src/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java b/plugins/hypervisors/hyperv/src/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java index c9281240913..c34bfae8090 100644 --- a/plugins/hypervisors/hyperv/src/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java +++ b/plugins/hypervisors/hyperv/src/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java @@ -30,7 +30,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -74,7 +73,6 @@ import com.cloud.storage.StorageLayer; */ @Local(value = Discoverer.class) public class HypervServerDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(HypervServerDiscoverer.class); Random _rand = new Random(System.currentTimeMillis()); Map _storageMounts = new HashMap(); @@ -119,7 +117,7 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer // assert if (startup.getHypervisorType() != HypervisorType.Hyperv) { - s_logger.debug("Not Hyper-V hypervisor, so moving on."); + logger.debug("Not Hyper-V hypervisor, so moving on."); return; } @@ -135,8 +133,8 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer _clusterDao.update(cluster.getId(), cluster); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Setting up host " + agentId); + if (logger.isDebugEnabled()) { + logger.debug("Setting up host " + agentId); } HostEnvironment env = new HostEnvironment(); @@ -161,14 +159,14 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer if (reason == null) { reason = " details were null"; } - s_logger.warn("Unable to setup agent " + agentId + " due to " + reason); + logger.warn("Unable to setup agent " + agentId + " due to " + reason); } // Error handling borrowed from XcpServerDiscoverer, may need to be // updated. } catch (AgentUnavailableException e) { - s_logger.warn("Unable to setup agent " + agentId + " because it became unavailable.", e); + logger.warn("Unable to setup agent " + agentId + " because it became unavailable.", e); } catch (OperationTimedoutException e) { - s_logger.warn("Unable to setup agent " + agentId + " because it timed out", e); + logger.warn("Unable to setup agent " + agentId + " because it timed out", e); } throw new ConnectionException(true, "Reinitialize agent after setup."); } @@ -203,14 +201,14 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer public final Map> find(final long dcId, final Long podId, final Long clusterId, final URI uri, final String username, final String password, final List hostTags) throws DiscoveryException { - if (s_logger.isInfoEnabled()) { - s_logger.info("Discover host. dc(zone): " + dcId + ", pod: " + podId + ", cluster: " + clusterId + ", uri host: " + uri.getHost()); + if (logger.isInfoEnabled()) { + logger.info("Discover host. dc(zone): " + dcId + ", pod: " + podId + ", cluster: " + clusterId + ", uri host: " + uri.getHost()); } // Assertions if (podId == null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("No pod is assigned, skipping the discovery in" + " Hyperv discoverer"); + if (logger.isInfoEnabled()) { + logger.info("No pod is assigned, skipping the discovery in" + " Hyperv discoverer"); } return null; } @@ -218,20 +216,20 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer // in the // database if (cluster == null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("No cluster in database for cluster id " + clusterId); + if (logger.isInfoEnabled()) { + logger.info("No cluster in database for cluster id " + clusterId); } return null; } if (cluster.getHypervisorType() != HypervisorType.Hyperv) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Cluster " + clusterId + "is not for Hyperv hypervisors"); + if (logger.isInfoEnabled()) { + logger.info("Cluster " + clusterId + "is not for Hyperv hypervisors"); } return null; } if (!uri.getScheme().equals("http")) { String msg = "urlString is not http so we're not taking care of" + " the discovery for this: " + uri; - s_logger.debug(msg); + logger.debug(msg); return null; } @@ -243,11 +241,11 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer String guidWithTail = calcServerResourceGuid(uuidSeed) + "-HypervResource"; if (_resourceMgr.findHostByGuid(guidWithTail) != null) { - s_logger.debug("Skipping " + agentIp + " because " + guidWithTail + " is already in the database."); + logger.debug("Skipping " + agentIp + " because " + guidWithTail + " is already in the database."); return null; } - s_logger.info("Creating" + HypervDirectConnectResource.class.getName() + " HypervDummyResourceBase for zone/pod/cluster " + dcId + "/" + podId + "/" + + logger.info("Creating" + HypervDirectConnectResource.class.getName() + " HypervDummyResourceBase for zone/pod/cluster " + dcId + "/" + podId + "/" + clusterId); // Some Hypervisors organise themselves in pools. @@ -288,7 +286,7 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer Answer pingAns = resource.executeRequest(ping); if (pingAns == null || !pingAns.getResult()) { String errMsg = "Agent not running, or no route to agent on at " + uri; - s_logger.debug(errMsg); + logger.debug(errMsg); throw new DiscoveryException(errMsg); } @@ -299,14 +297,14 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer return resources; } catch (ConfigurationException e) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, podId, "Unable to add " + uri.getHost(), "Error is " + e.getMessage()); - s_logger.warn("Unable to instantiate " + uri.getHost(), e); + logger.warn("Unable to instantiate " + uri.getHost(), e); } catch (UnknownHostException e) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, podId, "Unable to add " + uri.getHost(), "Error is " + e.getMessage()); - s_logger.warn("Unable to instantiate " + uri.getHost(), e); + logger.warn("Unable to instantiate " + uri.getHost(), e); } catch (Exception e) { String msg = " can't setup agent, due to " + e.toString() + " - " + e.getMessage(); - s_logger.warn(msg); + logger.warn(msg); } return null; } @@ -383,7 +381,7 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer return null; } - s_logger.info("Host: " + host.getName() + " connected with hypervisor type: " + HypervisorType.Hyperv + ". Checking CIDR..."); + logger.info("Host: " + host.getName() + " connected with hypervisor type: " + HypervisorType.Hyperv + ". Checking CIDR..."); HostPodVO pod = _podDao.findById(host.getPodId()); DataCenterVO dc = _dcDao.findById(host.getDataCenterId()); diff --git a/plugins/hypervisors/kvm/src/com/cloud/ha/KVMInvestigator.java b/plugins/hypervisors/kvm/src/com/cloud/ha/KVMInvestigator.java index 000d37ce157..afad4500375 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/ha/KVMInvestigator.java +++ b/plugins/hypervisors/kvm/src/com/cloud/ha/KVMInvestigator.java @@ -28,7 +28,6 @@ import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.resource.ResourceManager; import com.cloud.utils.component.AdapterBase; -import org.apache.log4j.Logger; import javax.ejb.Local; import javax.inject.Inject; @@ -36,7 +35,6 @@ import java.util.List; @Local(value = Investigator.class) public class KVMInvestigator extends AdapterBase implements Investigator { - private final static Logger s_logger = Logger.getLogger(KVMInvestigator.class); @Inject HostDao _hostDao; @Inject @@ -72,7 +70,7 @@ public class KVMInvestigator extends AdapterBase implements Investigator { hostStatus = answer.getResult() ? Status.Down : Status.Up; } } catch (Exception e) { - s_logger.debug("Failed to send command to host: " + agent.getId()); + logger.debug("Failed to send command to host: " + agent.getId()); } if (hostStatus == null) { hostStatus = Status.Disconnected; @@ -83,18 +81,18 @@ public class KVMInvestigator extends AdapterBase implements Investigator { if (neighbor.getId() == agent.getId() || (neighbor.getHypervisorType() != Hypervisor.HypervisorType.KVM && neighbor.getHypervisorType() != Hypervisor.HypervisorType.LXC)) { continue; } - s_logger.debug("Investigating host:" + agent.getId() + " via neighbouring host:" + neighbor.getId()); + logger.debug("Investigating host:" + agent.getId() + " via neighbouring host:" + neighbor.getId()); try { Answer answer = _agentMgr.easySend(neighbor.getId(), cmd); if (answer != null) { neighbourStatus = answer.getResult() ? Status.Down : Status.Up; - s_logger.debug("Neighbouring host:" + neighbor.getId() + " returned status:" + neighbourStatus + " for the investigated host:" + agent.getId()); + logger.debug("Neighbouring host:" + neighbor.getId() + " returned status:" + neighbourStatus + " for the investigated host:" + agent.getId()); if (neighbourStatus == Status.Up) { break; } } } catch (Exception e) { - s_logger.debug("Failed to send command to host: " + neighbor.getId()); + logger.debug("Failed to send command to host: " + neighbor.getId()); } } if (neighbourStatus == Status.Up && (hostStatus == Status.Disconnected || hostStatus == Status.Down)) { diff --git a/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmDiscoverer.java b/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmDiscoverer.java index b72004038c2..6281bd59a43 100644 --- a/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmDiscoverer.java +++ b/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmDiscoverer.java @@ -28,7 +28,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.StartupCommand; @@ -55,7 +54,6 @@ import com.cloud.utils.ssh.SSHCmdHelper; @Local(value = Discoverer.class) public class OvmDiscoverer extends DiscovererBase implements Discoverer, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(OvmDiscoverer.class); protected String _publicNetworkDevice; protected String _privateNetworkDevice; protected String _guestNetworkDevice; @@ -99,25 +97,25 @@ public class OvmDiscoverer extends DiscovererBase implements Discoverer, Resourc if (!url.getScheme().equals("http")) { String msg = "urlString is not http so we're not taking care of the discovery for this: " + url; - s_logger.debug(msg); + logger.debug(msg); return null; } if (clusterId == null) { String msg = "must specify cluster Id when add host"; - s_logger.debug(msg); + logger.debug(msg); throw new CloudRuntimeException(msg); } if (podId == null) { String msg = "must specify pod Id when add host"; - s_logger.debug(msg); + logger.debug(msg); throw new CloudRuntimeException(msg); } ClusterVO cluster = _clusterDao.findById(clusterId); if (cluster == null || (cluster.getHypervisorType() != HypervisorType.Ovm)) { - if (s_logger.isInfoEnabled()) - s_logger.info("invalid cluster id or cluster is not for Ovm hypervisors"); + if (logger.isInfoEnabled()) + logger.info("invalid cluster id or cluster is not for Ovm hypervisors"); return null; } @@ -141,7 +139,7 @@ public class OvmDiscoverer extends DiscovererBase implements Discoverer, Resourc throw new CloudRuntimeException("The host " + hostIp + " has been added before"); } - s_logger.debug("Ovm discover is going to disover host having guid " + guid); + logger.debug("Ovm discover is going to disover host having guid " + guid); ClusterVO clu = _clusterDao.findById(clusterId); if (clu.getGuid() == null) { @@ -198,16 +196,16 @@ public class OvmDiscoverer extends DiscovererBase implements Discoverer, Resourc resources.put(ovmResource, details); return resources; } catch (XmlRpcException e) { - s_logger.debug("XmlRpc exception, Unable to discover OVM: " + url, e); + logger.debug("XmlRpc exception, Unable to discover OVM: " + url, e); return null; } catch (UnknownHostException e) { - s_logger.debug("Host name resolve failed exception, Unable to discover OVM: " + url, e); + logger.debug("Host name resolve failed exception, Unable to discover OVM: " + url, e); return null; } catch (ConfigurationException e) { - s_logger.debug("Configure resource failed, Unable to discover OVM: " + url, e); + logger.debug("Configure resource failed, Unable to discover OVM: " + url, e); return null; } catch (Exception e) { - s_logger.debug("Unable to discover OVM: " + url, e); + logger.debug("Unable to discover OVM: " + url, e); return null; } } diff --git a/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmFencer.java b/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmFencer.java index a4276a4c84f..6426acc3c70 100644 --- a/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmFencer.java +++ b/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmFencer.java @@ -23,7 +23,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.FenceAnswer; @@ -41,7 +40,6 @@ import com.cloud.vm.VirtualMachine; @Local(value = FenceBuilder.class) public class OvmFencer extends AdapterBase implements FenceBuilder { - private static final Logger s_logger = Logger.getLogger(OvmFencer.class); @Inject AgentManager _agentMgr; @Inject @@ -71,7 +69,7 @@ public class OvmFencer extends AdapterBase implements FenceBuilder { @Override public Boolean fenceOff(VirtualMachine vm, Host host) { if (host.getHypervisorType() != HypervisorType.Ovm) { - s_logger.debug("Don't know how to fence non Ovm hosts " + host.getHypervisorType()); + logger.debug("Don't know how to fence non Ovm hosts " + host.getHypervisorType()); return null; } @@ -95,13 +93,13 @@ public class OvmFencer extends AdapterBase implements FenceBuilder { try { answer = (FenceAnswer)_agentMgr.send(h.getId(), fence); } catch (AgentUnavailableException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Moving on to the next host because " + h.toString() + " is unavailable"); + if (logger.isDebugEnabled()) { + logger.debug("Moving on to the next host because " + h.toString() + " is unavailable"); } continue; } catch (OperationTimedoutException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Moving on to the next host because " + h.toString() + " is unavailable"); + if (logger.isDebugEnabled()) { + logger.debug("Moving on to the next host because " + h.toString() + " is unavailable"); } continue; } @@ -111,8 +109,8 @@ public class OvmFencer extends AdapterBase implements FenceBuilder { } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to fence off " + vm.toString() + " on " + host.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Unable to fence off " + vm.toString() + " on " + host.toString()); } return false; diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/ha/Ovm3Investigator.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/ha/Ovm3Investigator.java index f9e88c0b57c..36362903ed2 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/ha/Ovm3Investigator.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/ha/Ovm3Investigator.java @@ -22,7 +22,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -37,7 +36,6 @@ import com.cloud.utils.component.AdapterBase; @Local(value = Investigator.class) public class Ovm3Investigator extends AdapterBase implements Investigator { - private static final Logger LOGGER = Logger.getLogger(Ovm3Investigator.class); @Inject HostDao hostDao; @Inject @@ -47,7 +45,7 @@ public class Ovm3Investigator extends AdapterBase implements Investigator { @Override public boolean isVmAlive(com.cloud.vm.VirtualMachine vm, Host host) throws UnknownVM { - LOGGER.debug("isVmAlive: " + vm.getHostName() + " on " + host.getName()); + logger.debug("isVmAlive: " + vm.getHostName() + " on " + host.getName()); if (host.getHypervisorType() != Hypervisor.HypervisorType.Ovm3) { throw new UnknownVM(); } @@ -60,7 +58,7 @@ public class Ovm3Investigator extends AdapterBase implements Investigator { @Override public Status isAgentAlive(Host agent) { - LOGGER.debug("isAgentAlive: " + agent.getName()); + logger.debug("isAgentAlive: " + agent.getName()); if (agent.getHypervisorType() != Hypervisor.HypervisorType.Ovm3) { return null; } @@ -76,7 +74,7 @@ public class Ovm3Investigator extends AdapterBase implements Investigator { return answer.getResult() ? Status.Down : Status.Up; } } catch (Exception e) { - LOGGER.error("Failed to send command to host: " + neighbor.getId(), e); + logger.error("Failed to send command to host: " + neighbor.getId(), e); } } diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3Discoverer.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3Discoverer.java index 5dab20abbd7..83f8ccdf3ea 100755 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3Discoverer.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3Discoverer.java @@ -30,7 +30,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -68,7 +67,6 @@ import com.cloud.utils.ssh.SSHCmdHelper; @Local(value = Discoverer.class) public class Ovm3Discoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { - private static final Logger LOGGER = Logger.getLogger(Ovm3Discoverer.class); protected String publicNetworkDevice; protected String privateNetworkDevice; protected String guestNetworkDevice; @@ -125,11 +123,11 @@ public class Ovm3Discoverer extends DiscovererBase implements Discoverer, private boolean CheckUrl(URI url) throws DiscoveryException { if ("http".equals(url.getScheme()) || "https".equals(url.getScheme())) { String msg = "Discovering " + url + ": " + _params; - LOGGER.debug(msg); + logger.debug(msg); } else { String msg = "urlString is not http(s) so we're not taking care of the discovery for this: " + url; - LOGGER.info(msg); + logger.info(msg); throw new DiscoveryException(msg); } return true; @@ -144,13 +142,13 @@ public class Ovm3Discoverer extends DiscovererBase implements Discoverer, CheckUrl(url); if (clusterId == null) { String msg = "must specify cluster Id when add host"; - LOGGER.info(msg); + logger.info(msg); throw new DiscoveryException(msg); } if (podId == null) { String msg = "must specify pod Id when add host"; - LOGGER.info(msg); + logger.info(msg); throw new DiscoveryException(msg); } @@ -158,30 +156,30 @@ public class Ovm3Discoverer extends DiscovererBase implements Discoverer, if (cluster == null || (cluster.getHypervisorType() != HypervisorType.Ovm3)) { String msg = "invalid cluster id or cluster is not for Ovm3 hypervisors"; - LOGGER.info(msg); + logger.info(msg); throw new DiscoveryException(msg); } else { - LOGGER.debug("cluster: " + cluster); + logger.debug("cluster: " + cluster); } String agentUsername = _params.get("agentusername"); if (agentUsername == null) { String msg = "Agent user name must be specified"; - LOGGER.info(msg); + logger.info(msg); throw new DiscoveryException(msg); } String agentPassword = _params.get("agentpassword"); if (agentPassword == null) { String msg = "Agent password must be specified"; - LOGGER.info(msg); + logger.info(msg); throw new DiscoveryException(msg); } String agentPort = _params.get("agentport"); if (agentPort == null) { String msg = "Agent port must be specified"; - LOGGER.info(msg); + logger.info(msg); throw new DiscoveryException(msg); } @@ -195,11 +193,11 @@ public class Ovm3Discoverer extends DiscovererBase implements Discoverer, if (checkIfExisted(guid)) { String msg = "The host " + hostIp + " has been added before"; - LOGGER.info(msg); + logger.info(msg); throw new DiscoveryException(msg); } - LOGGER.debug("Ovm3 discover is going to disover host having guid " + logger.debug("Ovm3 discover is going to disover host having guid " + guid); ClusterVO clu = clusterDao.findById(clusterId); @@ -226,7 +224,7 @@ public class Ovm3Discoverer extends DiscovererBase implements Discoverer, String msg = "Cannot Ssh to Ovm3 host(IP=" + hostIp + ", username=" + username + ", password=*******), discovery failed"; - LOGGER.warn(msg); + logger.warn(msg); throw new DiscoveryException(msg); } @@ -283,17 +281,17 @@ public class Ovm3Discoverer extends DiscovererBase implements Discoverer, resources.put(ovmResource, details); return resources; } catch (UnknownHostException e) { - LOGGER.error( + logger.error( "Host name resolve failed exception, Unable to discover Ovm3 host: " + url.getHost(), e); return null; } catch (ConfigurationException e) { - LOGGER.error( + logger.error( "Configure resource failed, Unable to discover Ovm3 host: " + url.getHost(), e); return null; } catch (IOException | Ovm3ResourceException e) { - LOGGER.error("Unable to discover Ovm3 host: " + url.getHost(), e); + logger.error("Unable to discover Ovm3 host: " + url.getHost(), e); return null; } } @@ -301,7 +299,7 @@ public class Ovm3Discoverer extends DiscovererBase implements Discoverer, @Override public void postDiscovery(List hosts, long msId) throws CloudRuntimeException { - LOGGER.debug("postDiscovery: " + hosts); + logger.debug("postDiscovery: " + hosts); } @Override @@ -317,26 +315,26 @@ public class Ovm3Discoverer extends DiscovererBase implements Discoverer, @Override public HostVO createHostVOForConnectedAgent(HostVO host, StartupCommand[] cmd) { - LOGGER.debug("createHostVOForConnectedAgent: " + host); + logger.debug("createHostVOForConnectedAgent: " + host); return null; } @Override public boolean processAnswers(long agentId, long seq, Answer[] answers) { - LOGGER.debug("processAnswers: " + agentId); + logger.debug("processAnswers: " + agentId); return false; } @Override public boolean processCommands(long agentId, long seq, Command[] commands) { - LOGGER.debug("processCommands: " + agentId); + logger.debug("processCommands: " + agentId); return false; } @Override public AgentControlAnswer processControlCommand(long agentId, AgentControlCommand cmd) { - LOGGER.debug("processControlCommand: " + agentId); + logger.debug("processControlCommand: " + agentId); return null; } @@ -344,12 +342,12 @@ public class Ovm3Discoverer extends DiscovererBase implements Discoverer, @Override public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) { - LOGGER.debug("processConnect"); + logger.debug("processConnect"); } @Override public boolean processDisconnect(long agentId, Status state) { - LOGGER.debug("processDisconnect"); + logger.debug("processDisconnect"); return false; } @@ -360,13 +358,13 @@ public class Ovm3Discoverer extends DiscovererBase implements Discoverer, @Override public int getTimeout() { - LOGGER.debug("getTimeout"); + logger.debug("getTimeout"); return 0; } @Override public boolean processTimeout(long agentId, long seq) { - LOGGER.debug("processTimeout: " + agentId); + logger.debug("processTimeout: " + agentId); return false; } @@ -374,7 +372,7 @@ public class Ovm3Discoverer extends DiscovererBase implements Discoverer, public HostVO createHostVOForDirectConnectAgent(HostVO host, StartupCommand[] startup, ServerResource resource, Map details, List hostTags) { - LOGGER.debug("createHostVOForDirectConnectAgent: " + host); + logger.debug("createHostVOForDirectConnectAgent: " + host); StartupCommand firstCmd = startup[0]; if (!(firstCmd instanceof StartupRoutingCommand)) { return null; @@ -392,7 +390,7 @@ public class Ovm3Discoverer extends DiscovererBase implements Discoverer, @Override public DeleteHostAnswer deleteHost(HostVO host, boolean isForced, boolean isForceDeleteStorage) throws UnableDeleteHostException { - LOGGER.debug("deleteHost: " + host); + logger.debug("deleteHost: " + host); if (host.getType() != com.cloud.host.Host.Type.Routing || host.getHypervisorType() != HypervisorType.Ovm3) { return null; diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3FenceBuilder.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3FenceBuilder.java index c997a589623..099d3adea12 100755 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3FenceBuilder.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3FenceBuilder.java @@ -24,7 +24,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.FenceAnswer; @@ -43,7 +42,6 @@ import com.cloud.vm.VirtualMachine; @Local(value = FenceBuilder.class) public class Ovm3FenceBuilder extends AdapterBase implements FenceBuilder { Map fenceParams; - private static final Logger LOGGER = Logger.getLogger(Ovm3FenceBuilder.class); @Inject AgentManager agentMgr; @Inject @@ -76,11 +74,11 @@ public class Ovm3FenceBuilder extends AdapterBase implements FenceBuilder { @Override public Boolean fenceOff(VirtualMachine vm, Host host) { if (host.getHypervisorType() != HypervisorType.Ovm3) { - LOGGER.debug("Don't know how to fence non Ovm3 hosts " + logger.debug("Don't know how to fence non Ovm3 hosts " + host.getHypervisorType()); return null; } else { - LOGGER.debug("Fencing " + vm + " on host " + host + logger.debug("Fencing " + vm + " on host " + host + " with params: "+ fenceParams ); } @@ -96,8 +94,8 @@ public class Ovm3FenceBuilder extends AdapterBase implements FenceBuilder { try { answer = (FenceAnswer) agentMgr.send(h.getId(), fence); } catch (AgentUnavailableException | OperationTimedoutException e) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Moving on to the next host because " + if (logger.isDebugEnabled()) { + logger.debug("Moving on to the next host because " + h.toString() + " is unavailable", e); } continue; @@ -108,8 +106,8 @@ public class Ovm3FenceBuilder extends AdapterBase implements FenceBuilder { } } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Unable to fence off " + vm.toString() + " on " + if (logger.isDebugEnabled()) { + logger.debug("Unable to fence off " + vm.toString() + " on " + host.toString()); } diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorGuru.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorGuru.java index 6ec77414a53..dd1f8b0e74c 100755 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorGuru.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorGuru.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.StorageSubSystemCommand; -import org.apache.log4j.Logger; import com.cloud.agent.api.Command; import com.cloud.agent.api.to.DataObjectType; @@ -45,7 +44,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = HypervisorGuru.class) public class Ovm3HypervisorGuru extends HypervisorGuruBase implements HypervisorGuru { - private final Logger LOGGER = Logger.getLogger(Ovm3HypervisorGuru.class); @Inject GuestOSDao guestOsDao; @Inject @@ -86,7 +84,7 @@ public class Ovm3HypervisorGuru extends HypervisorGuruBase implements Hypervisor * @see com.cloud.hypervisor.HypervisorGuruBase#getCommandHostDelegation(long, com.cloud.agent.api.Command) */ public Pair getCommandHostDelegation(long hostId, Command cmd) { - LOGGER.debug("getCommandHostDelegation: " + cmd.getClass()); + logger.debug("getCommandHostDelegation: " + cmd.getClass()); if (cmd instanceof StorageSubSystemCommand) { StorageSubSystemCommand c = (StorageSubSystemCommand)cmd; c.setExecuteInSequence(true); @@ -97,7 +95,7 @@ public class Ovm3HypervisorGuru extends HypervisorGuruBase implements Hypervisor DataTO destData = cpyCommand.getDestTO(); if (srcData.getObjectType() == DataObjectType.SNAPSHOT && destData.getObjectType() == DataObjectType.TEMPLATE) { - LOGGER.debug("Snapshot to Template: " + cmd); + logger.debug("Snapshot to Template: " + cmd); DataStoreTO srcStore = srcData.getDataStore(); DataStoreTO destStore = destData.getDataStore(); if (srcStore instanceof NfsTO && destStore instanceof NfsTO) { diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java index 0eeef82c055..2337694a7e6 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java @@ -34,7 +34,6 @@ import javax.naming.ConfigurationException; import com.cloud.user.AccountManager; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -74,7 +73,6 @@ import com.cloud.utils.net.NetUtils; @Component @Local(value = {MockAgentManager.class}) public class MockAgentManagerImpl extends ManagerBase implements MockAgentManager { - private static final Logger s_logger = Logger.getLogger(MockAgentManagerImpl.class); @Inject HostPodDao _podDao = null; @Inject @@ -110,10 +108,10 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage Long cidrSize = (Long)cidrPair.get(1); return new Pair(cidrAddress, cidrSize); } catch (PatternSyntaxException e) { - s_logger.error("Exception while splitting pod cidr"); + logger.error("Exception while splitting pod cidr"); return null; } catch (IndexOutOfBoundsException e) { - s_logger.error("Invalid pod cidr. Please check"); + logger.error("Invalid pod cidr. Please check"); return null; } } @@ -178,7 +176,7 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("Error while configuring mock agent " + ex.getMessage()); + logger.error("Error while configuring mock agent " + ex.getMessage()); throw new CloudRuntimeException("Error configuring agent", ex); } finally { txn.close(); @@ -197,7 +195,7 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage newResources.put(agentResource, args); } catch (ConfigurationException e) { - s_logger.error("error while configuring server resource" + e.getMessage()); + logger.error("error while configuring server resource" + e.getMessage()); } } } @@ -210,7 +208,7 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage random = SecureRandom.getInstance("SHA1PRNG"); _executor = new ThreadPoolExecutor(1, 5, 1, TimeUnit.DAYS, new LinkedBlockingQueue(), new NamedThreadFactory("Simulator-Agent-Mgr")); } catch (NoSuchAlgorithmException e) { - s_logger.debug("Failed to initialize random:" + e.toString()); + logger.debug("Failed to initialize random:" + e.toString()); return false; } return true; @@ -298,7 +296,7 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage try { _resourceMgr.deleteHost(host.getId(), true, true); } catch (Exception e) { - s_logger.debug("Failed to delete host: ", e); + logger.debug("Failed to delete host: ", e); } } } @@ -363,12 +361,12 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage try { _resourceMgr.discoverHosts(cmd); } catch (DiscoveryException e) { - s_logger.debug("Failed to discover host: " + e.toString()); + logger.debug("Failed to discover host: " + e.toString()); CallContext.unregister(); return; } } catch (ConfigurationException e) { - s_logger.debug("Failed to load secondary storage resource: " + e.toString()); + logger.debug("Failed to load secondary storage resource: " + e.toString()); CallContext.unregister(); return; } @@ -386,7 +384,7 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage if (_host != null) { return _host; } else { - s_logger.error("Host with guid " + guid + " was not found"); + logger.error("Host with guid " + guid + " was not found"); return null; } } catch (Exception ex) { @@ -494,8 +492,8 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage @Override public Answer checkNetworkCommand(CheckNetworkCommand cmd) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking if network name setup is done on the resource"); + if (logger.isDebugEnabled()) { + logger.debug("Checking if network name setup is done on the resource"); } return new CheckNetworkAnswer(cmd, true, "Network Setup check by names is done"); } diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockNetworkManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockNetworkManagerImpl.java index 0251c0c9073..53a875f0041 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockNetworkManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockNetworkManagerImpl.java @@ -21,7 +21,6 @@ package com.cloud.agent.manager; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckS2SVpnConnectionsCommand; @@ -60,7 +59,6 @@ import com.cloud.simulator.dao.MockVMDao; import com.cloud.utils.component.ManagerBase; public class MockNetworkManagerImpl extends ManagerBase implements MockNetworkManager { - private static final Logger s_logger = Logger.getLogger(MockVmManagerImpl.class); @Inject MockVMDao _mockVmDao; @@ -123,10 +121,10 @@ public class MockNetworkManagerImpl extends ManagerBase implements MockNetworkMa public PlugNicAnswer plugNic(PlugNicCommand cmd) { String vmname = cmd.getVmName(); if (_mockVmDao.findByVmName(vmname) != null) { - s_logger.debug("Plugged NIC (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); + logger.debug("Plugged NIC (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); return new PlugNicAnswer(cmd, true, "success"); } - s_logger.error("Plug NIC failed for (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); + logger.error("Plug NIC failed for (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); return new PlugNicAnswer(cmd, false, "failure"); } @@ -134,10 +132,10 @@ public class MockNetworkManagerImpl extends ManagerBase implements MockNetworkMa public UnPlugNicAnswer unplugNic(UnPlugNicCommand cmd) { String vmname = cmd.getVmName(); if (_mockVmDao.findByVmName(vmname) != null) { - s_logger.debug("Plugged NIC (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); + logger.debug("Plugged NIC (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); return new UnPlugNicAnswer(cmd, true, "success"); } - s_logger.error("Plug NIC failed for (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); + logger.error("Plug NIC failed for (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); return new UnPlugNicAnswer(cmd, false, "failure"); } @@ -211,7 +209,7 @@ public class MockNetworkManagerImpl extends ManagerBase implements MockNetworkMa return new Answer(cmd, true, "success"); } catch (Exception e) { String msg = "Creating guest network failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new Answer(cmd, false, msg); } } diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java index a4fc2f9c10b..120cab3ef1a 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java @@ -31,7 +31,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.command.DownloadCommand; @@ -104,7 +103,6 @@ import com.cloud.vm.DiskProfile; @Component @Local(value = {MockStorageManager.class}) public class MockStorageManagerImpl extends ManagerBase implements MockStorageManager { - private static final Logger s_logger = Logger.getLogger(MockStorageManagerImpl.class); @Inject MockStoragePoolDao _mockStoragePoolDao = null; @Inject @@ -1059,7 +1057,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa MessageDigest md = MessageDigest.getInstance("md5"); md5 = String.format("%032x", new BigInteger(1, md.digest(cmd.getTemplatePath().getBytes()))); } catch (NoSuchAlgorithmException e) { - s_logger.debug("failed to gernerate md5:" + e.toString()); + logger.debug("failed to gernerate md5:" + e.toString()); } txn.commit(); return new Answer(cmd, true, md5); diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java index dbe8adea25a..bf8a1c17b54 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java @@ -27,7 +27,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.api.Answer; @@ -93,7 +92,6 @@ import com.cloud.vm.VirtualMachine.PowerState; @Component @Local(value = {MockVmManager.class}) public class MockVmManagerImpl extends ManagerBase implements MockVmManager { - private static final Logger s_logger = Logger.getLogger(MockVmManagerImpl.class); @Inject MockVMDao _mockVmDao = null; @@ -262,12 +260,12 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { final MockVm vm = _mockVmDao.findByVmName(router_name); final String args = vm.getBootargs(); if (args.indexOf("router_pr=100") > 0) { - s_logger.debug("Router priority is for MASTER"); + logger.debug("Router priority is for MASTER"); final CheckRouterAnswer ans = new CheckRouterAnswer(cmd, "Status: MASTER", true); ans.setState(VirtualRouter.RedundantState.MASTER); return ans; } else { - s_logger.debug("Router priority is for BACKUP"); + logger.debug("Router priority is for BACKUP"); final CheckRouterAnswer ans = new CheckRouterAnswer(cmd, "Status: BACKUP", true); ans.setState(VirtualRouter.RedundantState.BACKUP); return ans; @@ -462,7 +460,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { final String vmName = cmd.getVmName(); final String vmSnapshotName = cmd.getTarget().getSnapshotName(); - s_logger.debug("Created snapshot " + vmSnapshotName + " for vm " + vmName); + logger.debug("Created snapshot " + vmSnapshotName + " for vm " + vmName); return new CreateVMSnapshotAnswer(cmd, cmd.getTarget(), cmd.getVolumeTOs()); } @@ -473,7 +471,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { if (_mockVmDao.findByVmName(cmd.getVmName()) == null) { return new DeleteVMSnapshotAnswer(cmd, false, "No VM by name " + cmd.getVmName()); } - s_logger.debug("Removed snapshot " + snapshotName + " of VM " + vm); + logger.debug("Removed snapshot " + snapshotName + " of VM " + vm); return new DeleteVMSnapshotAnswer(cmd, cmd.getVolumeTOs()); } @@ -485,7 +483,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { if (vmVo == null) { return new RevertToVMSnapshotAnswer(cmd, false, "No VM by name " + cmd.getVmName()); } - s_logger.debug("Reverted to snapshot " + snapshot + " of VM " + vm); + logger.debug("Reverted to snapshot " + snapshot + " of VM " + vm); return new RevertToVMSnapshotAnswer(cmd, cmd.getVolumeTOs(), vmVo.getPowerState()); } @@ -571,40 +569,40 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { boolean updateSeqnoAndSig = false; if (currSeqnum != null) { if (cmd.getSeqNum() > currSeqnum) { - s_logger.info("New seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum); + logger.info("New seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum); updateSeqnoAndSig = true; if (!cmd.getSignature().equals(currSig)) { - s_logger.info("New seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum + " new signature received:" + cmd.getSignature() + " curr=" + + logger.info("New seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum + " new signature received:" + cmd.getSignature() + " curr=" + currSig + ", updated iptables"); action = ", updated iptables"; reason = reason + "seqno_increased_sig_changed"; } else { - s_logger.info("New seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum + " no change in signature:" + cmd.getSignature() + ", do nothing"); + logger.info("New seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum + " no change in signature:" + cmd.getSignature() + ", do nothing"); reason = reason + "seqno_increased_sig_same"; } } else if (cmd.getSeqNum() < currSeqnum) { - s_logger.info("Older seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum + ", do nothing"); + logger.info("Older seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum + ", do nothing"); reason = reason + "seqno_decreased"; } else { if (!cmd.getSignature().equals(currSig)) { - s_logger.info("Identical seqno received: " + cmd.getSeqNum() + " new signature received:" + cmd.getSignature() + " curr=" + currSig + + logger.info("Identical seqno received: " + cmd.getSeqNum() + " new signature received:" + cmd.getSignature() + " curr=" + currSig + ", updated iptables"); action = ", updated iptables"; reason = reason + "seqno_same_sig_changed"; updateSeqnoAndSig = true; } else { - s_logger.info("Identical seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum + " no change in signature:" + cmd.getSignature() + + logger.info("Identical seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum + " no change in signature:" + cmd.getSignature() + ", do nothing"); reason = reason + "seqno_same_sig_same"; } } } else { - s_logger.info("New seqno received: " + cmd.getSeqNum() + " old=null"); + logger.info("New seqno received: " + cmd.getSeqNum() + " old=null"); updateSeqnoAndSig = true; action = ", updated iptables"; reason = ", seqno_new"; } - s_logger.info("Programmed network rules for vm " + cmd.getVmName() + " seqno=" + cmd.getSeqNum() + " signature=" + cmd.getSignature() + " guestIp=" + + logger.info("Programmed network rules for vm " + cmd.getVmName() + " seqno=" + cmd.getSeqNum() + " signature=" + cmd.getSignature() + " guestIp=" + cmd.getGuestIp() + ", numIngressRules=" + cmd.getIngressRuleSet().length + ", numEgressRules=" + cmd.getEgressRuleSet().length + " total cidrs=" + cmd.getTotalNumCidrs() + action + reason); return updateSeqnoAndSig; diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java index f9ed9ed65d4..02bf9b30025 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.storage.command.DownloadCommand; import org.apache.cloudstack.storage.command.DownloadProgressCommand; import org.apache.cloudstack.storage.command.StorageSubSystemCommand; import org.apache.cloudstack.storage.command.UploadStatusCommand; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.api.Answer; @@ -134,7 +133,6 @@ import com.google.gson.stream.JsonReader; @Component @Local(value = {SimulatorManager.class}) public class SimulatorManagerImpl extends ManagerBase implements SimulatorManager, PluggableService { - private static final Logger s_logger = Logger.getLogger(SimulatorManagerImpl.class); private static final Gson s_gson = GsonHelper.getGson(); @Inject MockVmManager _mockVmMgr; @@ -223,7 +221,7 @@ public class SimulatorManagerImpl extends ManagerBase implements SimulatorManage try { info.setTimeout(Integer.valueOf(entry.getValue())); } catch (final NumberFormatException e) { - s_logger.debug("invalid timeout parameter: " + e.toString()); + logger.debug("invalid timeout parameter: " + e.toString()); } } @@ -232,9 +230,9 @@ public class SimulatorManagerImpl extends ManagerBase implements SimulatorManage final int wait = Integer.valueOf(entry.getValue()); Thread.sleep(wait); } catch (final NumberFormatException e) { - s_logger.debug("invalid wait parameter: " + e.toString()); + logger.debug("invalid wait parameter: " + e.toString()); } catch (final InterruptedException e) { - s_logger.debug("thread is interrupted: " + e.toString()); + logger.debug("thread is interrupted: " + e.toString()); } } @@ -422,7 +420,7 @@ public class SimulatorManagerImpl extends ManagerBase implements SimulatorManage cmd instanceof SecStorageFirewallCfgCommand) { answer = new Answer(cmd); } else { - s_logger.error("Simulator does not implement command of type " + cmd.toString()); + logger.error("Simulator does not implement command of type " + cmd.toString()); answer = Answer.createUnsupportedCommandAnswer(cmd); } } @@ -436,7 +434,7 @@ public class SimulatorManagerImpl extends ManagerBase implements SimulatorManage return answer; } catch (final Exception e) { - s_logger.error("Failed execute cmd: ", e); + logger.error("Failed execute cmd: ", e); txn.rollback(); return new Answer(cmd, false, e.toString()); } finally { diff --git a/plugins/hypervisors/simulator/src/com/cloud/ha/SimulatorFencer.java b/plugins/hypervisors/simulator/src/com/cloud/ha/SimulatorFencer.java index aa7c3d49cb4..5910a50e3e9 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/ha/SimulatorFencer.java +++ b/plugins/hypervisors/simulator/src/com/cloud/ha/SimulatorFencer.java @@ -23,7 +23,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.FenceAnswer; @@ -41,7 +40,6 @@ import com.cloud.vm.VirtualMachine; @Local(value=FenceBuilder.class) public class SimulatorFencer extends AdapterBase implements FenceBuilder { - private static final Logger s_logger = Logger.getLogger(SimulatorFencer.class); @Inject HostDao _hostDao; @Inject AgentManager _agentMgr; @@ -72,7 +70,7 @@ public class SimulatorFencer extends AdapterBase implements FenceBuilder { @Override public Boolean fenceOff(VirtualMachine vm, Host host) { if (host.getHypervisorType() != HypervisorType.Simulator) { - s_logger.debug("Don't know how to fence non simulator hosts " + host.getHypervisorType()); + logger.debug("Don't know how to fence non simulator hosts " + host.getHypervisorType()); return null; } @@ -91,13 +89,13 @@ public class SimulatorFencer extends AdapterBase implements FenceBuilder { try { answer = (FenceAnswer)_agentMgr.send(h.getId(), fence); } catch (AgentUnavailableException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Moving on to the next host because " + h.toString() + " is unavailable"); + if (logger.isDebugEnabled()) { + logger.debug("Moving on to the next host because " + h.toString() + " is unavailable"); } continue; } catch (OperationTimedoutException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Moving on to the next host because " + h.toString() + " is unavailable"); + if (logger.isDebugEnabled()) { + logger.debug("Moving on to the next host because " + h.toString() + " is unavailable"); } continue; } @@ -107,8 +105,8 @@ public class SimulatorFencer extends AdapterBase implements FenceBuilder { } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to fence off " + vm.toString() + " on " + host.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Unable to fence off " + vm.toString() + " on " + host.toString()); } return false; diff --git a/plugins/hypervisors/simulator/src/com/cloud/ha/SimulatorInvestigator.java b/plugins/hypervisors/simulator/src/com/cloud/ha/SimulatorInvestigator.java index bc55ba32497..c57337f4d5f 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/ha/SimulatorInvestigator.java +++ b/plugins/hypervisors/simulator/src/com/cloud/ha/SimulatorInvestigator.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -43,7 +42,6 @@ import com.cloud.vm.VirtualMachine.PowerState; @Local(value=Investigator.class) public class SimulatorInvestigator extends AdapterBase implements Investigator { - private final static Logger s_logger = Logger.getLogger(SimulatorInvestigator.class); @Inject AgentManager _agentMgr; @Inject @@ -72,7 +70,7 @@ public class SimulatorInvestigator extends AdapterBase implements Investigator { return answer.getResult() ? Status.Up : Status.Down; } } catch (Exception e) { - s_logger.debug("Failed to send command to host: " + neighbor.getId()); + logger.debug("Failed to send command to host: " + neighbor.getId()); } } @@ -85,17 +83,17 @@ public class SimulatorInvestigator extends AdapterBase implements Investigator { try { Answer answer = _agentMgr.send(vm.getHostId(), cmd); if (!answer.getResult()) { - s_logger.debug("Unable to get vm state on " + vm.toString()); + logger.debug("Unable to get vm state on " + vm.toString()); throw new UnknownVM(); } CheckVirtualMachineAnswer cvmAnswer = (CheckVirtualMachineAnswer)answer; - s_logger.debug("Agent responded with state " + cvmAnswer.getState().toString()); + logger.debug("Agent responded with state " + cvmAnswer.getState().toString()); return cvmAnswer.getState() == PowerState.PowerOn; } catch (AgentUnavailableException e) { - s_logger.debug("Unable to reach the agent for " + vm.toString() + ": " + e.getMessage()); + logger.debug("Unable to reach the agent for " + vm.toString() + ": " + e.getMessage()); throw new UnknownVM(); } catch (OperationTimedoutException e) { - s_logger.debug("Operation timed out for " + vm.toString() + ": " + e.getMessage()); + logger.debug("Operation timed out for " + vm.toString() + ": " + e.getMessage()); throw new UnknownVM(); } } diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java index 7c31a9af754..84f678d4e37 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java @@ -28,7 +28,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -55,7 +54,6 @@ import com.cloud.storage.dao.VMTemplateZoneDao; @Local(value = Discoverer.class) public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(SimulatorDiscoverer.class); @Inject HostDao _hostDao; @@ -94,8 +92,8 @@ public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, L if (scheme.equals("http")) { if (host == null || !host.startsWith("sim")) { String msg = "uri is not of simulator type so we're not taking care of the discovery for this: " + uri; - if (s_logger.isDebugEnabled()) { - s_logger.debug(msg); + if (logger.isDebugEnabled()) { + logger.debug(msg); } return null; } @@ -121,8 +119,8 @@ public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, L } } else { String msg = "uriString is not http so we're not taking care of the discovery for this: " + uri; - if (s_logger.isDebugEnabled()) { - s_logger.debug(msg); + if (logger.isDebugEnabled()) { + logger.debug(msg); } return null; } @@ -130,15 +128,15 @@ public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, L String cluster = null; if (clusterId == null) { String msg = "must specify cluster Id when adding host"; - if (s_logger.isDebugEnabled()) { - s_logger.debug(msg); + if (logger.isDebugEnabled()) { + logger.debug(msg); } throw new RuntimeException(msg); } else { ClusterVO clu = _clusterDao.findById(clusterId); if (clu == null || (clu.getHypervisorType() != HypervisorType.Simulator)) { - if (s_logger.isInfoEnabled()) - s_logger.info("invalid cluster id or cluster is not for Simulator hypervisors"); + if (logger.isInfoEnabled()) + logger.info("invalid cluster id or cluster is not for Simulator hypervisors"); return null; } cluster = Long.toString(clusterId); @@ -151,8 +149,8 @@ public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, L String pod; if (podId == null) { String msg = "must specify pod Id when adding host"; - if (s_logger.isDebugEnabled()) { - s_logger.debug(msg); + if (logger.isDebugEnabled()) { + logger.debug(msg); } throw new RuntimeException(msg); } else { @@ -176,17 +174,17 @@ public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, L resources = createAgentResources(params); return resources; } catch (Exception ex) { - s_logger.error("Exception when discovering simulator hosts: " + ex.getMessage()); + logger.error("Exception when discovering simulator hosts: " + ex.getMessage()); } return null; } private Map> createAgentResources(Map params) { try { - s_logger.info("Creating Simulator Resources"); + logger.info("Creating Simulator Resources"); return _mockAgentMgr.createServerResources(params); } catch (Exception ex) { - s_logger.warn("Caught exception at agent resource creation: " + ex.getMessage(), ex); + logger.warn("Caught exception at agent resource creation: " + ex.getMessage(), ex); } return null; } diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java index 11bf3466105..50cd1470e50 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.resource.SecondaryStorageDiscoverer; import org.apache.cloudstack.storage.resource.SecondaryStorageResource; -import org.apache.log4j.Logger; import com.cloud.agent.Listener; import com.cloud.agent.api.AgentControlAnswer; @@ -47,7 +46,6 @@ import com.cloud.storage.dao.SnapshotDao; @Local(value = Discoverer.class) public class SimulatorSecondaryDiscoverer extends SecondaryStorageDiscoverer implements ResourceStateAdapter, Listener { - private static final Logger s_logger = Logger.getLogger(SimulatorSecondaryDiscoverer.class); @Inject MockStorageManager _mockStorageMgr = null; @Inject @@ -71,7 +69,7 @@ public class SimulatorSecondaryDiscoverer extends SecondaryStorageDiscoverer imp public Map> find(long dcId, Long podId, Long clusterId, URI uri, String username, String password, List hostTags) { if (!uri.getScheme().equalsIgnoreCase("sim")) { - s_logger.debug("It's not NFS or file or ISO, so not a secondary storage server: " + uri.toString()); + logger.debug("It's not NFS or file or ISO, so not a secondary storage server: " + uri.toString()); return null; } List stores = imageStoreDao.listImageStores(); diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockConfigurationDaoImpl.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockConfigurationDaoImpl.java index 1196260a9ae..3231ee9069c 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockConfigurationDaoImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockConfigurationDaoImpl.java @@ -22,7 +22,6 @@ import java.util.Formatter; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.simulator.MockConfigurationVO; @@ -34,7 +33,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {MockConfigurationDao.class}) public class MockConfigurationDaoImpl extends GenericDaoBase implements MockConfigurationDao { - final static Logger s_logger = Logger.getLogger(MockConfigurationDaoImpl.class); private final SearchBuilder _searchByDcIdName; private final SearchBuilder _searchByDcIDPodIdName; private final SearchBuilder _searchByDcIDPodIdClusterIdName; @@ -141,7 +139,7 @@ public class MockConfigurationDaoImpl extends GenericDaoBase networkElements; public VmwareServerDiscoverer() { - s_logger.info("VmwareServerDiscoverer is constructed"); + logger.info("VmwareServerDiscoverer is constructed"); } @Override public Map> find(long dcId, Long podId, Long clusterId, URI url, String username, String password, List hostTags) throws DiscoveryException { - if (s_logger.isInfoEnabled()) - s_logger.info("Discover host. dc: " + dcId + ", pod: " + podId + ", cluster: " + clusterId + ", uri host: " + url.getHost()); + if (logger.isInfoEnabled()) + logger.info("Discover host. dc: " + dcId + ", pod: " + podId + ", cluster: " + clusterId + ", uri host: " + url.getHost()); if (podId == null) { - if (s_logger.isInfoEnabled()) - s_logger.info("No pod is assigned, assuming that it is not for vmware and skip it to next discoverer"); + if (logger.isInfoEnabled()) + logger.info("No pod is assigned, assuming that it is not for vmware and skip it to next discoverer"); return null; } boolean failureInClusterDiscovery = true; String vsmIp = ""; ClusterVO cluster = _clusterDao.findById(clusterId); if (cluster == null || cluster.getHypervisorType() != HypervisorType.VMware) { - if (s_logger.isInfoEnabled()) - s_logger.info("invalid cluster id or cluster is not for VMware hypervisors"); + if (logger.isInfoEnabled()) + logger.info("invalid cluster id or cluster is not for VMware hypervisors"); return null; } @@ -146,7 +144,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer // If either or both not provided, try to retrieve & use the credentials from database, which are provided earlier while adding VMware DC to zone. if (usernameNotProvided || passwordNotProvided) { // Retrieve credentials associated with VMware DC - s_logger.info("Username and/or Password not provided while adding cluster to cloudstack zone. " + logger.info("Username and/or Password not provided while adding cluster to cloudstack zone. " + "Hence using both username & password provided while adding VMware DC to CloudStack zone."); username = vmwareDc.getUser(); password = vmwareDc.getPassword(); @@ -182,7 +180,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer int maxHostsPerCluster = _hvCapabilitiesDao.getMaxHostsPerCluster(hosts.get(0).getHypervisorType(), hosts.get(0).getHypervisorVersion()); if (hosts.size() >= maxHostsPerCluster) { String msg = "VMware cluster " + cluster.getName() + " is too big to add new host, current size: " + hosts.size() + ", max. size: " + maxHostsPerCluster; - s_logger.error(msg); + logger.error(msg); throw new DiscoveredWithErrorException(msg); } } @@ -268,7 +266,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer "Both public traffic and guest traffic is over same physical network " + pNetworkPublic + ". And virtual switch type chosen for each traffic is different" + ". A physical network cannot be shared by different types of virtual switches."; - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } } @@ -276,7 +274,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer privateTrafficLabel = _netmgr.getDefaultManagementTrafficLabel(dcId, HypervisorType.VMware); if (privateTrafficLabel != null) { - s_logger.info("Detected private network label : " + privateTrafficLabel); + logger.info("Detected private network label : " + privateTrafficLabel); } Pair vsmInfo = new Pair(false, 0L); if (nexusDVS && (guestTrafficLabelObj.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch) || @@ -287,13 +285,13 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer if (zoneType != NetworkType.Basic) { publicTrafficLabel = _netmgr.getDefaultPublicTrafficLabel(dcId, HypervisorType.VMware); if (publicTrafficLabel != null) { - s_logger.info("Detected public network label : " + publicTrafficLabel); + logger.info("Detected public network label : " + publicTrafficLabel); } } // Get physical network label guestTrafficLabel = _netmgr.getDefaultGuestTrafficLabel(dcId, HypervisorType.VMware); if (guestTrafficLabel != null) { - s_logger.info("Detected guest network label : " + guestTrafficLabel); + logger.info("Detected guest network label : " + guestTrafficLabel); } // Before proceeding with validation of Nexus 1000v VSM check if an instance of Nexus 1000v VSM is already associated with this cluster. boolean clusterHasVsm = _vmwareMgr.hasNexusVSM(clusterId); @@ -320,18 +318,18 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer if (nexusDVS) { if (vsmCredentials != null) { - s_logger.info("Stocking credentials of Nexus VSM"); + logger.info("Stocking credentials of Nexus VSM"); context.registerStockObject("vsmcredentials", vsmCredentials); } } List morHosts = _vmwareMgr.addHostToPodCluster(context, dcId, podId, clusterId, URLDecoder.decode(url.getPath(), "UTF-8")); if (morHosts == null) - s_logger.info("Found 0 hosts."); + logger.info("Found 0 hosts."); if (privateTrafficLabel != null) context.uregisterStockObject("privateTrafficLabel"); if (morHosts == null) { - s_logger.error("Unable to find host or cluster based on url: " + URLDecoder.decode(url.getPath(), "UTF-8")); + logger.error("Unable to find host or cluster based on url: " + URLDecoder.decode(url.getPath(), "UTF-8")); return null; } @@ -342,7 +340,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer morCluster = context.getHostMorByPath(URLDecoder.decode(uriFromCluster.getPath(), "UTF-8")); if (morCluster == null || !morCluster.getType().equalsIgnoreCase("ClusterComputeResource")) { - s_logger.warn("Cluster url does not point to a valid vSphere cluster, url: " + clusterDetails.get("url")); + logger.warn("Cluster url does not point to a valid vSphere cluster, url: " + clusterDetails.get("url")); return null; } else { ClusterMO clusterMo = new ClusterMO(context, morCluster); @@ -356,9 +354,9 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer if (!validateDiscoveredHosts(context, morCluster, morHosts)) { if (morCluster == null) - s_logger.warn("The discovered host is not standalone host, can not be added to a standalone cluster"); + logger.warn("The discovered host is not standalone host, can not be added to a standalone cluster"); else - s_logger.warn("The discovered host does not belong to the cluster"); + logger.warn("The discovered host does not belong to the cluster"); return null; } @@ -394,7 +392,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer resource.configure("VMware", params); } catch (ConfigurationException e) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, podId, "Unable to add " + url.getHost(), "Error is " + e.getMessage()); - s_logger.warn("Unable to instantiate " + url.getHost(), e); + logger.warn("Unable to instantiate " + url.getHost(), e); } resource.start(); @@ -414,17 +412,17 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer } catch (DiscoveredWithErrorException e) { throw e; } catch (Exception e) { - s_logger.warn("Unable to connect to Vmware vSphere server. service address: " + url.getHost() + ". " + e); + logger.warn("Unable to connect to Vmware vSphere server. service address: " + url.getHost() + ". " + e); return null; } finally { if (context != null) context.close(); if (failureInClusterDiscovery && vsmInfo.first()) { try { - s_logger.debug("Deleting Nexus 1000v VSM " + vsmIp + " because cluster discovery and addition to zone has failed."); + logger.debug("Deleting Nexus 1000v VSM " + vsmIp + " because cluster discovery and addition to zone has failed."); _nexusElement.deleteCiscoNexusVSM(vsmInfo.second().longValue()); } catch (Exception e) { - s_logger.warn("Deleting Nexus 1000v VSM " + vsmIp + " failed."); + logger.warn("Deleting Nexus 1000v VSM " + vsmIp + " failed."); } } } @@ -449,7 +447,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer vmwareDcZone = _vmwareDcZoneMapDao.findByZoneId(dcId); if (vmwareDcZone == null) { msg = "Zone " + dcId + " is not associated with any VMware DC yet. " + "Please add VMware DC to this zone first and then try to add clusters."; - s_logger.error(msg); + logger.error(msg); throw new DiscoveryException(msg); } @@ -494,13 +492,13 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer msg = "This cluster " + clusterName + " belongs to vCenter " + url.getHost() + ". But this zone is associated with VMware DC from vCenter " + vCenterHost + ". Make sure the cluster being added belongs to vCenter " + vCenterHost + " and VMware DC " + vmwareDcNameFromDb; - s_logger.error(msg); + logger.error(msg); throw new DiscoveryException(msg); } else if (!vmwareDcNameFromDb.equalsIgnoreCase(vmwareDcNameFromApi)) { msg = "This cluster " + clusterName + " belongs to VMware DC " + vmwareDcNameFromApi + " .But this zone is associated with VMware DC " + vmwareDcNameFromDb + ". Make sure the cluster being added belongs to VMware DC " + vmwareDcNameFromDb + " in vCenter " + vCenterHost; - s_logger.error(msg); + logger.error(msg); throw new DiscoveryException(msg); } return updatedInventoryPath; @@ -547,15 +545,15 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer @Override public boolean configure(String name, Map params) throws ConfigurationException { - if (s_logger.isInfoEnabled()) - s_logger.info("Configure VmwareServerDiscoverer, discover name: " + name); + if (logger.isInfoEnabled()) + logger.info("Configure VmwareServerDiscoverer, discover name: " + name); super.configure(name, params); createVmwareToolsIso(); - if (s_logger.isInfoEnabled()) { - s_logger.info("VmwareServerDiscoverer has been successfully configured"); + if (logger.isInfoEnabled()) { + logger.info("VmwareServerDiscoverer has been successfully configured"); } _resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this); return true; @@ -634,7 +632,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer try { trafficLabelObj = new VmwareTrafficLabel(zoneWideTrafficLabel, trafficType, defaultVirtualSwitchType); } catch (InvalidParameterValueException e) { - s_logger.error("Failed to recognize virtual switch type specified for " + trafficType + " traffic due to " + e.getMessage()); + logger.error("Failed to recognize virtual switch type specified for " + trafficType + " traffic due to " + e.getMessage()); throw e; } @@ -670,7 +668,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer try { trafficLabelObj = new VmwareTrafficLabel(zoneWideTrafficLabel, trafficType, defVirtualSwitchType); } catch (InvalidParameterValueException e) { - s_logger.error("Failed to recognize virtual switch type specified for " + trafficType + " traffic due to " + e.getMessage()); + logger.error("Failed to recognize virtual switch type specified for " + trafficType + " traffic due to " + e.getMessage()); throw e; } @@ -745,11 +743,11 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer try { resource.configure(host.getName(), params); } catch (ConfigurationException e) { - s_logger.warn("Unable to configure resource due to " + e.getMessage()); + logger.warn("Unable to configure resource due to " + e.getMessage()); return null; } if (!resource.start()) { - s_logger.warn("Unable to start the resource"); + logger.warn("Unable to start the resource"); return null; } } @@ -759,7 +757,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer private void validateVswitchType(String inputVswitchType) { VirtualSwitchType vSwitchType = VirtualSwitchType.getType(inputVswitchType); if (vSwitchType == VirtualSwitchType.None) { - s_logger.error("Unable to resolve " + inputVswitchType + " to a valid virtual switch type in VMware environment."); + logger.error("Unable to resolve " + inputVswitchType + " to a valid virtual switch type in VMware environment."); throw new InvalidParameterValueException("Invalid virtual switch type : " + inputVswitchType); } } diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java index 46e8105f0a9..61f38b68489 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.hypervisor.vmware.LegacyZoneVO; @@ -35,7 +34,6 @@ import com.cloud.utils.db.SearchCriteria.Op; @Local(value = LegacyZoneDao.class) @DB public class LegacyZoneDaoImpl extends GenericDaoBase implements LegacyZoneDao { - protected static final Logger s_logger = Logger.getLogger(LegacyZoneDaoImpl.class); final SearchBuilder zoneSearch; final SearchBuilder fullTableSearch; diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java index bd12f92833a..1fdbcf6f531 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.hypervisor.vmware.VmwareDatacenterVO; @@ -35,7 +34,6 @@ import com.cloud.utils.db.SearchCriteria.Op; @Local(value = VmwareDatacenterDao.class) @DB public class VmwareDatacenterDaoImpl extends GenericDaoBase implements VmwareDatacenterDao { - protected static final Logger s_logger = Logger.getLogger(VmwareDatacenterDaoImpl.class); final SearchBuilder nameSearch; final SearchBuilder guidSearch; diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java index eb0ec55505c..38fb7fabfbb 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java @@ -36,7 +36,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.vmware.vim25.AboutInfo; import com.vmware.vim25.ManagedObjectReference; @@ -125,7 +124,6 @@ import com.cloud.vm.DomainRouterVO; @Local(value = {VmwareManager.class, VmwareDatacenterService.class}) public class VmwareManagerImpl extends ManagerBase implements VmwareManager, VmwareStorageMount, Listener, VmwareDatacenterService { - private static final Logger s_logger = Logger.getLogger(VmwareManagerImpl.class); private static final int STARTUP_DELAY = 60000; // 60 seconds private static final long DEFAULT_HOST_SCAN_INTERVAL = 600000; // every 10 minutes @@ -203,10 +201,10 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw @Override public boolean configure(String name, Map params) throws ConfigurationException { - s_logger.info("Configure VmwareManagerImpl, manager name: " + name); + logger.info("Configure VmwareManagerImpl, manager name: " + name); if (!_configDao.isPremium()) { - s_logger.error("Vmware component can only run under premium distribution"); + logger.error("Vmware component can only run under premium distribution"); throw new ConfigurationException("Vmware component can only run under premium distribution"); } @@ -214,7 +212,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw if (_instance == null) { _instance = "DEFAULT"; } - s_logger.info("VmwareManagerImpl config - instance.name: " + _instance); + logger.info("VmwareManagerImpl config - instance.name: " + _instance); _mountParent = _configDao.getValue(Config.MountParent.key()); if (_mountParent == null) { @@ -224,7 +222,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw if (_instance != null) { _mountParent = _mountParent + File.separator + _instance; } - s_logger.info("VmwareManagerImpl config - _mountParent: " + _mountParent); + logger.info("VmwareManagerImpl config - _mountParent: " + _mountParent); String value = (String)params.get("scripts.timeout"); _timeout = NumbersUtil.parseInt(value, 1440) * 1000; @@ -268,20 +266,20 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw _additionalPortRangeStart = NumbersUtil.parseInt(_configDao.getValue(Config.VmwareAdditionalVncPortRangeStart.key()), 59000); if (_additionalPortRangeStart > 65535) { - s_logger.warn("Invalid port range start port (" + _additionalPortRangeStart + ") for additional VNC port allocation, reset it to default start port 59000"); + logger.warn("Invalid port range start port (" + _additionalPortRangeStart + ") for additional VNC port allocation, reset it to default start port 59000"); _additionalPortRangeStart = 59000; } _additionalPortRangeSize = NumbersUtil.parseInt(_configDao.getValue(Config.VmwareAdditionalVncPortRangeSize.key()), 1000); if (_additionalPortRangeSize < 0 || _additionalPortRangeStart + _additionalPortRangeSize > 65535) { - s_logger.warn("Invalid port range size (" + _additionalPortRangeSize + " for range starts at " + _additionalPortRangeStart); + logger.warn("Invalid port range size (" + _additionalPortRangeSize + " for range starts at " + _additionalPortRangeStart); _additionalPortRangeSize = Math.min(1000, 65535 - _additionalPortRangeStart); } _routerExtraPublicNics = NumbersUtil.parseInt(_configDao.getValue(Config.RouterExtraPublicNics.key()), 2); _vCenterSessionTimeout = NumbersUtil.parseInt(_configDao.getValue(Config.VmwareVcenterSessionTimeout.key()), 1200) * 1000; - s_logger.info("VmwareManagerImpl config - vmware.vcenter.session.timeout: " + _vCenterSessionTimeout); + logger.info("VmwareManagerImpl config - vmware.vcenter.session.timeout: " + _vCenterSessionTimeout); _recycleHungWorker = _configDao.getValue(Config.VmwareRecycleHungWorker.key()); if (_recycleHungWorker == null || _recycleHungWorker.isEmpty()) { @@ -293,17 +291,17 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw _rootDiskController = DiskControllerType.ide.toString(); } - s_logger.info("Additional VNC port allocation range is settled at " + _additionalPortRangeStart + " to " + (_additionalPortRangeStart + _additionalPortRangeSize)); + logger.info("Additional VNC port allocation range is settled at " + _additionalPortRangeStart + " to " + (_additionalPortRangeStart + _additionalPortRangeSize)); value = _configDao.getValue("vmware.host.scan.interval"); _hostScanInterval = NumbersUtil.parseLong(value, DEFAULT_HOST_SCAN_INTERVAL); - s_logger.info("VmwareManagerImpl config - vmware.host.scan.interval: " + _hostScanInterval); + logger.info("VmwareManagerImpl config - vmware.host.scan.interval: " + _hostScanInterval); ((VmwareStorageManagerImpl)_storageMgr).configure(params); _agentMgr.registerForHostEvents(this, true, true, true); - s_logger.info("VmwareManagerImpl has been successfully configured"); + logger.info("VmwareManagerImpl has been successfully configured"); return true; } @@ -321,7 +319,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw try { _hostScanScheduler.awaitTermination(3000, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { - s_logger.debug("[ignored] interupted while stopping<:/."); + logger.debug("[ignored] interupted while stopping<:/."); } shutdownCleanup(); @@ -362,7 +360,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw vlanId = mgmtTrafficLabelObj.getVlanId(); vSwitchType = mgmtTrafficLabelObj.getVirtualSwitchType().toString(); - s_logger.info("Preparing network on host " + hostMo.getContext().toString() + " for " + privateTrafficLabel); + logger.info("Preparing network on host " + hostMo.getContext().toString() + " for " + privateTrafficLabel); VirtualSwitchType vsType = VirtualSwitchType.getType(vSwitchType); //The management network is probably always going to be a physical network with islation type of vlans, so assume BroadcastDomainType VLAN if (VirtualSwitchType.StandardVirtualSwitch == vsType) { @@ -410,7 +408,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw int maxHostsPerCluster = _hvCapabilitiesDao.getMaxHostsPerCluster(HypervisorType.VMware, version); if (hosts.size() > maxHostsPerCluster) { String msg = "Failed to add VMware cluster as size is too big, current size: " + hosts.size() + ", max. size: " + maxHostsPerCluster; - s_logger.error(msg); + logger.error(msg); throw new DiscoveredWithErrorException(msg); } } @@ -429,12 +427,12 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw returnedHostList.add(mor); return returnedHostList; } else { - s_logger.error("Unsupport host type " + mor.getType() + ":" + mor.getValue() + " from inventory path: " + hostInventoryPath); + logger.error("Unsupport host type " + mor.getType() + ":" + mor.getValue() + " from inventory path: " + hostInventoryPath); return null; } } - s_logger.error("Unable to find host from inventory path: " + hostInventoryPath); + logger.error("Unable to find host from inventory path: " + hostInventoryPath); return null; } @@ -449,12 +447,12 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw if (secUrl == null) { // we are using non-NFS image store, then use cache storage instead - s_logger.info("Secondary storage is not NFS, we need to use staging storage"); + logger.info("Secondary storage is not NFS, we need to use staging storage"); DataStore cacheStore = _dataStoreMgr.getImageCacheStore(dcId); if (cacheStore != null) { secUrl = cacheStore.getUri(); } else { - s_logger.warn("No staging storage is found when non-NFS secondary storage is used"); + logger.warn("No staging storage is found when non-NFS secondary storage is used"); } } @@ -502,17 +500,17 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw @Override public boolean needRecycle(String workerTag) { - if (s_logger.isInfoEnabled()) - s_logger.info("Check to see if a worker VM with tag " + workerTag + " needs to be recycled"); + if (logger.isInfoEnabled()) + logger.info("Check to see if a worker VM with tag " + workerTag + " needs to be recycled"); if (workerTag == null || workerTag.isEmpty()) { - s_logger.error("Invalid worker VM tag " + workerTag); + logger.error("Invalid worker VM tag " + workerTag); return false; } String tokens[] = workerTag.split("-"); if (tokens.length != 3) { - s_logger.error("Invalid worker VM tag " + workerTag); + logger.error("Invalid worker VM tag " + workerTag); return false; } @@ -521,14 +519,14 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw long runid = Long.parseLong(tokens[2]); if (_mshostPeerDao.countStateSeenInPeers(msid, runid, ManagementServerHost.State.Down) > 0) { - if (s_logger.isInfoEnabled()) - s_logger.info("Worker VM's owner management server node has been detected down from peer nodes, recycle it"); + if (logger.isInfoEnabled()) + logger.info("Worker VM's owner management server node has been detected down from peer nodes, recycle it"); return true; } if (runid != _clusterMgr.getManagementRunId(msid)) { - if (s_logger.isInfoEnabled()) - s_logger.info("Worker VM's owner management server has changed runid, recycle it"); + if (logger.isInfoEnabled()) + logger.info("Worker VM's owner management server has changed runid, recycle it"); return true; } @@ -536,8 +534,8 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw // there are pending tasks on the subject VM /* if(System.currentTimeMillis() - startTick > _hungWorkerTimeout) { - if(s_logger.isInfoEnabled()) - s_logger.info("Worker VM expired, seconds elapsed: " + (System.currentTimeMillis() - startTick) / 1000); + if(logger.isInfoEnabled()) + logger.info("Worker VM expired, seconds elapsed: " + (System.currentTimeMillis() - startTick) / 1000); return true; } */ @@ -556,7 +554,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw if (!patchFolder.exists()) { if (!patchFolder.mkdirs()) { String msg = "Unable to create systemvm folder on secondary storage. location: " + patchFolder.toString(); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } @@ -564,23 +562,23 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw File srcIso = getSystemVMPatchIsoFile(); File destIso = new File(mountPoint + "/systemvm/" + getSystemVMIsoFileNameOnDatastore()); if (!destIso.exists()) { - s_logger.info("Inject SSH key pairs before copying systemvm.iso into secondary storage"); + logger.info("Inject SSH key pairs before copying systemvm.iso into secondary storage"); _configServer.updateKeyPairs(); - s_logger.info("Copy System VM patch ISO file to secondary storage. source ISO: " + srcIso.getAbsolutePath() + ", destination: " + + logger.info("Copy System VM patch ISO file to secondary storage. source ISO: " + srcIso.getAbsolutePath() + ", destination: " + destIso.getAbsolutePath()); try { FileUtil.copyfile(srcIso, destIso); } catch (IOException e) { - s_logger.error("Unexpected exception ", e); + logger.error("Unexpected exception ", e); String msg = "Unable to copy systemvm ISO on secondary storage. src location: " + srcIso.toString() + ", dest location: " + destIso; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("SystemVM ISO file " + destIso.getPath() + " already exists"); + if (logger.isTraceEnabled()) { + logger.trace("SystemVM ISO file " + destIso.getPath() + " already exists"); } } } finally { @@ -618,7 +616,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw assert (isoFile != null); if (!isoFile.exists()) { - s_logger.error("Unable to locate systemvm.iso in your setup at " + isoFile.toString()); + logger.error("Unable to locate systemvm.iso in your setup at " + isoFile.toString()); } return isoFile; } @@ -635,7 +633,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw } assert (keyFile != null); if (!keyFile.exists()) { - s_logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile.toString()); + logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile.toString()); } return keyFile; } @@ -666,12 +664,12 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw try { uri = new URI(storageUrl); } catch (URISyntaxException e) { - s_logger.error("Invalid storage URL format ", e); + logger.error("Invalid storage URL format ", e); throw new CloudRuntimeException("Unable to create mount point due to invalid storage URL format " + storageUrl); } mountPoint = mount(uri.getHost() + ":" + uri.getPath(), _mountParent); if (mountPoint == null) { - s_logger.error("Unable to create mount point for " + storageUrl); + logger.error("Unable to create mount point for " + storageUrl); return "/mnt/sec"; // throw new CloudRuntimeException("Unable to create mount point for " + storageUrl); } @@ -692,14 +690,14 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw break; } } - s_logger.error("Unable to create mount: " + mntPt); + logger.error("Unable to create mount: " + mntPt); } return mountPoint; } private void startupCleanup(String parent) { - s_logger.info("Cleanup mounted NFS mount points used in previous session"); + logger.info("Cleanup mounted NFS mount points used in previous session"); long mshostId = ManagementServerNode.getManagementServerId(); @@ -707,14 +705,14 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw List mounts = _storage.listMountPointsByMsHost(parent, mshostId); if (mounts != null && !mounts.isEmpty()) { for (String mountPoint : mounts) { - s_logger.info("umount NFS mount from previous session: " + mountPoint); + logger.info("umount NFS mount from previous session: " + mountPoint); String result = null; - Script command = new Script(true, "umount", _timeout, s_logger); + Script command = new Script(true, "umount", _timeout, logger); command.add(mountPoint); result = command.execute(); if (result != null) { - s_logger.warn("Unable to umount " + mountPoint + " due to " + result); + logger.warn("Unable to umount " + mountPoint + " due to " + result); } File file = new File(mountPoint); if (file.exists()) { @@ -725,17 +723,17 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw } private void shutdownCleanup() { - s_logger.info("Cleanup mounted NFS mount points used in current session"); + logger.info("Cleanup mounted NFS mount points used in current session"); for (String mountPoint : _storageMounts.values()) { - s_logger.info("umount NFS mount: " + mountPoint); + logger.info("umount NFS mount: " + mountPoint); String result = null; - Script command = new Script(true, "umount", _timeout, s_logger); + Script command = new Script(true, "umount", _timeout, logger); command.add(mountPoint); result = command.execute(); if (result != null) { - s_logger.warn("Unable to umount " + mountPoint + " due to " + result); + logger.warn("Unable to umount " + mountPoint + " due to " + result); } File file = new File(mountPoint); if (file.exists()) { @@ -747,13 +745,13 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw protected String mount(String path, String parent) { String mountPoint = setupMountPoint(parent); if (mountPoint == null) { - s_logger.warn("Unable to create a mount point"); + logger.warn("Unable to create a mount point"); return null; } Script script = null; String result = null; - Script command = new Script(true, "mount", _timeout, s_logger); + Script command = new Script(true, "mount", _timeout, logger); command.add("-t", "nfs"); // command.add("-o", "soft,timeo=133,retrans=2147483647,tcp,acdirmax=0,acdirmin=0"); if ("Mac OS X".equalsIgnoreCase(System.getProperty("os.name"))) { @@ -763,7 +761,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw command.add(mountPoint); result = command.execute(); if (result != null) { - s_logger.warn("Unable to mount " + path + " due to " + result); + logger.warn("Unable to mount " + path + " due to " + result); File file = new File(mountPoint); if (file.exists()) { file.delete(); @@ -772,11 +770,11 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw } // Change permissions for the mountpoint - script = new Script(true, "chmod", _timeout, s_logger); + script = new Script(true, "chmod", _timeout, logger); script.add("1777", mountPoint); result = script.execute(); if (result != null) { - s_logger.warn("Unable to set permissions for " + mountPoint + " due to " + result); + logger.warn("Unable to set permissions for " + mountPoint + " due to " + result); } return mountPoint; } @@ -844,8 +842,8 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw protected final static int DEFAULT_DOMR_SSHPORT = 3922; protected boolean shutdownRouterVM(DomainRouterVO router) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Try to shutdown router VM " + router.getInstanceName() + " directly."); + if (logger.isDebugEnabled()) { + logger.debug("Try to shutdown router VM " + router.getInstanceName() + " directly."); } Pair result; @@ -853,15 +851,15 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw result = SshHelper.sshExecute(router.getPrivateIpAddress(), DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, "poweroff -f"); if (!result.first()) { - s_logger.debug("Unable to shutdown " + router.getInstanceName() + " directly"); + logger.debug("Unable to shutdown " + router.getInstanceName() + " directly"); return false; } } catch (Throwable e) { - s_logger.warn("Unable to shutdown router " + router.getInstanceName() + " directly."); + logger.warn("Unable to shutdown router " + router.getInstanceName() + " directly."); return false; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Shutdown router " + router.getInstanceName() + " successful."); + if (logger.isDebugEnabled()) { + logger.debug("Shutdown router " + router.getInstanceName() + " successful."); } return true; } @@ -915,11 +913,11 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw long vsmId = 0; if (vsmMapVO != null) { vsmId = vsmMapVO.getVsmId(); - s_logger.info("vsmId is " + vsmId); + logger.info("vsmId is " + vsmId); nexusVSM = _nexusDao.findById(vsmId); - s_logger.info("Fetching nexus vsm credentials from database."); + logger.info("Fetching nexus vsm credentials from database."); } else { - s_logger.info("Found empty vsmMapVO."); + logger.info("Found empty vsmMapVO."); return null; } @@ -928,7 +926,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw nexusVSMCredentials.put("vsmip", nexusVSM.getipaddr()); nexusVSMCredentials.put("vsmusername", nexusVSM.getUserName()); nexusVSMCredentials.put("vsmpassword", nexusVSM.getPassword()); - s_logger.info("Successfully fetched the credentials of Nexus VSM."); + logger.info("Successfully fetched the credentials of Nexus VSM."); } return nexusVSMCredentials; } @@ -996,7 +994,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw Long associatedVmwareDcId = vmwareDcZoneMap.getVmwareDcId(); VmwareDatacenterVO associatedVmwareDc = _vmwareDcDao.findById(associatedVmwareDcId); if (associatedVmwareDc.getVcenterHost().equalsIgnoreCase(vCenterHost) && associatedVmwareDc.getVmwareDatacenterName().equalsIgnoreCase(vmwareDcName)) { - s_logger.info("Ignoring API call addVmwareDc, because VMware DC " + vCenterHost + "/" + vmwareDcName + + logger.info("Ignoring API call addVmwareDc, because VMware DC " + vCenterHost + "/" + vmwareDcName + " is already associated with specified zone with id " + zoneId); return associatedVmwareDc; } else { @@ -1030,7 +1028,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw dcMor = dcMo.getMor(); if (dcMor == null) { String msg = "Unable to find VMware DC " + vmwareDcName + " in vCenter " + vCenterHost + ". "; - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } @@ -1127,7 +1125,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw dcMo = new DatacenterMO(context, vmwareDcName); } catch (Throwable t) { String msg = "Unable to find DC " + vmwareDcName + " in vCenter " + vCenterHost; - s_logger.error(msg); + logger.error(msg); throw new DiscoveryException(msg); } @@ -1135,10 +1133,10 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw // Reset custom field property cloud.zone over this DC dcMo.setCustomFieldValue(CustomFieldConstants.CLOUD_ZONE, "false"); - s_logger.info("Sucessfully reset custom field property cloud.zone over DC " + vmwareDcName); + logger.info("Sucessfully reset custom field property cloud.zone over DC " + vmwareDcName); } catch (Exception e) { String msg = "Unable to reset custom field property cloud.zone over DC " + vmwareDcName + " due to : " + VmwareHelper.getExceptionMessage(e); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } finally { if (context != null) { @@ -1159,8 +1157,8 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw if (isLegacyZone(zoneId)) { throw new InvalidParameterValueException("The specified zone is legacy zone. Adding VMware datacenter to legacy zone is not supported."); } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("The specified zone is not legacy zone."); + if (logger.isTraceEnabled()) { + logger.trace("The specified zone is not legacy zone."); } } } @@ -1220,11 +1218,11 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw vsmMapVo = _vsmMapDao.findByClusterId(clusterId); if (vsmMapVo == null) { - s_logger.info("There is no instance of Nexus 1000v VSM associated with this cluster [Id:" + clusterId + "] yet."); + logger.info("There is no instance of Nexus 1000v VSM associated with this cluster [Id:" + clusterId + "] yet."); return false; } else { - s_logger.info("An instance of Nexus 1000v VSM [Id:" + vsmMapVo.getVsmId() + "] associated with this cluster [Id:" + clusterId + "]"); + logger.info("An instance of Nexus 1000v VSM [Id:" + vsmMapVo.getVsmId() + "] associated with this cluster [Id:" + clusterId + "]"); return true; } } diff --git a/plugins/hypervisors/vmware/src/com/cloud/network/CiscoNexusVSMDeviceManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/network/CiscoNexusVSMDeviceManagerImpl.java index 136e44261b5..beac489acdb 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/network/CiscoNexusVSMDeviceManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/network/CiscoNexusVSMDeviceManagerImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.api.StartupCommand; import com.cloud.dc.ClusterDetailsDao; @@ -65,7 +64,6 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { @Inject PortProfileDao _ppDao; - private static final org.apache.log4j.Logger s_logger = Logger.getLogger(ExternalLoadBalancerDeviceManagerImpl.class); @DB //public CiscoNexusVSMDeviceVO addCiscoNexusVSM(long clusterId, String ipaddress, String username, String password, ServerResource resource, String vsmName) { @@ -107,7 +105,7 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { netconfClient = new NetconfHelper(ipaddress, username, password); } catch (CloudRuntimeException e) { String msg = "Failed to connect to Nexus VSM " + ipaddress + " with credentials of user " + username; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } @@ -203,7 +201,7 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { if (hosts != null && hosts.size() > 0) { for (Host host : hosts) { if (host.getType() == Host.Type.Routing) { - s_logger.info("Non-empty cluster with id" + clusterId + "still has a host that uses this VSM. Please empty the cluster first"); + logger.info("Non-empty cluster with id" + clusterId + "still has a host that uses this VSM. Please empty the cluster first"); throw new ResourceInUseException("Non-empty cluster with id" + clusterId + "still has a host that uses this VSM. Please empty the cluster first"); } @@ -267,7 +265,7 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { public CiscoNexusVSMDeviceVO getCiscoVSMbyClusId(long clusterId) { ClusterVSMMapVO mapVO = _clusterVSMDao.findByClusterId(clusterId); if (mapVO == null) { - s_logger.info("Couldn't find a VSM associated with the specified cluster Id"); + logger.info("Couldn't find a VSM associated with the specified cluster Id"); return null; } // Else, pull out the VSM associated with the VSM id in mapVO. diff --git a/plugins/hypervisors/vmware/src/com/cloud/network/dao/CiscoNexusVSMDeviceDaoImpl.java b/plugins/hypervisors/vmware/src/com/cloud/network/dao/CiscoNexusVSMDeviceDaoImpl.java index 69517793dff..5b0371466f2 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/network/dao/CiscoNexusVSMDeviceDaoImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/network/dao/CiscoNexusVSMDeviceDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.network.CiscoNexusVSMDeviceVO; @@ -34,7 +33,6 @@ import com.cloud.utils.db.SearchCriteria.Op; @Local(value = CiscoNexusVSMDeviceDao.class) @DB public class CiscoNexusVSMDeviceDaoImpl extends GenericDaoBase implements CiscoNexusVSMDeviceDao { - protected static final Logger s_logger = Logger.getLogger(CiscoNexusVSMDeviceDaoImpl.class); final SearchBuilder mgmtVlanIdSearch; final SearchBuilder domainIdSearch; final SearchBuilder nameSearch; diff --git a/plugins/hypervisors/vmware/src/com/cloud/network/element/CiscoNexusVSMElement.java b/plugins/hypervisors/vmware/src/com/cloud/network/element/CiscoNexusVSMElement.java index 81e4f8637ed..56bc8161982 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/network/element/CiscoNexusVSMElement.java +++ b/plugins/hypervisors/vmware/src/com/cloud/network/element/CiscoNexusVSMElement.java @@ -25,7 +25,6 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.api.commands.DeleteCiscoNexusVSMCmd; import com.cloud.api.commands.DisableCiscoNexusVSMCmd; @@ -71,7 +70,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = NetworkElement.class) public class CiscoNexusVSMElement extends CiscoNexusVSMDeviceManagerImpl implements CiscoNexusVSMElementService, NetworkElement, Manager { - private static final Logger s_logger = Logger.getLogger(CiscoNexusVSMElement.class); @Inject CiscoNexusVSMDeviceDao _vsmDao; @@ -148,7 +146,7 @@ public class CiscoNexusVSMElement extends CiscoNexusVSMDeviceManagerImpl impleme try { result = deleteCiscoNexusVSM(cmd.getCiscoNexusVSMDeviceId()); } catch (ResourceInUseException e) { - s_logger.info("VSM could not be deleted"); + logger.info("VSM could not be deleted"); // TODO: Throw a better exception here. throw new CloudRuntimeException("Failed to delete specified VSM"); } @@ -267,7 +265,7 @@ public class CiscoNexusVSMElement extends CiscoNexusVSMDeviceManagerImpl impleme netconfClient.disconnect(); } catch (CloudRuntimeException e) { String msg = "Invalid credentials supplied for user " + vsmUser + " for Cisco Nexus 1000v VSM at " + vsmIp; - s_logger.error(msg); + logger.error(msg); _clusterDao.remove(clusterId); throw new CloudRuntimeException(msg); } @@ -277,7 +275,7 @@ public class CiscoNexusVSMElement extends CiscoNexusVSMDeviceManagerImpl impleme if (vsm != null) { List clusterList = _clusterVSMDao.listByVSMId(vsm.getId()); if (clusterList != null && !clusterList.isEmpty()) { - s_logger.error("Failed to add cluster: specified Nexus VSM is already associated with another cluster"); + logger.error("Failed to add cluster: specified Nexus VSM is already associated with another cluster"); ResourceInUseException ex = new ResourceInUseException("Failed to add cluster: specified Nexus VSM is already associated with another cluster with specified Id"); // get clusterUuid to report error @@ -322,7 +320,7 @@ public class CiscoNexusVSMElement extends CiscoNexusVSMDeviceManagerImpl impleme msg += "vsmpassword: Password of user account with admin privileges over Cisco Nexus 1000v dvSwitch. "; } } - s_logger.error(msg); + logger.error(msg); // Cleaning up the cluster record as addCluster operation failed because of invalid credentials of Nexus dvSwitch. _clusterDao.remove(clusterId); throw new CloudRuntimeException(msg); diff --git a/plugins/hypervisors/xenserver/src/com/cloud/ha/XenServerFencer.java b/plugins/hypervisors/xenserver/src/com/cloud/ha/XenServerFencer.java index 28cba2b98c4..7de794132d6 100644 --- a/plugins/hypervisors/xenserver/src/com/cloud/ha/XenServerFencer.java +++ b/plugins/hypervisors/xenserver/src/com/cloud/ha/XenServerFencer.java @@ -23,7 +23,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -42,7 +41,6 @@ import com.cloud.vm.VirtualMachine; @Local(value = FenceBuilder.class) public class XenServerFencer extends AdapterBase implements FenceBuilder { - private static final Logger s_logger = Logger.getLogger(XenServerFencer.class); @Inject HostDao _hostDao; @@ -54,7 +52,7 @@ public class XenServerFencer extends AdapterBase implements FenceBuilder { @Override public Boolean fenceOff(VirtualMachine vm, Host host) { if (host.getHypervisorType() != HypervisorType.XenServer) { - s_logger.debug("Don't know how to fence non XenServer hosts " + host.getHypervisorType()); + logger.debug("Don't know how to fence non XenServer hosts " + host.getHypervisorType()); return null; } @@ -73,18 +71,18 @@ public class XenServerFencer extends AdapterBase implements FenceBuilder { try { Answer ans = _agentMgr.send(h.getId(), fence); if (!(ans instanceof FenceAnswer)) { - s_logger.debug("Answer is not fenceanswer. Result = " + ans.getResult() + "; Details = " + ans.getDetails()); + logger.debug("Answer is not fenceanswer. Result = " + ans.getResult() + "; Details = " + ans.getDetails()); continue; } answer = (FenceAnswer)ans; } catch (AgentUnavailableException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Moving on to the next host because " + h.toString() + " is unavailable"); + if (logger.isDebugEnabled()) { + logger.debug("Moving on to the next host because " + h.toString() + " is unavailable"); } continue; } catch (OperationTimedoutException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Moving on to the next host because " + h.toString() + " is unavailable"); + if (logger.isDebugEnabled()) { + logger.debug("Moving on to the next host because " + h.toString() + " is unavailable"); } continue; } @@ -94,8 +92,8 @@ public class XenServerFencer extends AdapterBase implements FenceBuilder { } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to fence off " + vm.toString() + " on " + host.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Unable to fence off " + vm.toString() + " on " + host.toString()); } return false; diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java index 3f1e52e6ddd..3011914b770 100644 --- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java +++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java @@ -83,7 +83,6 @@ import com.xensource.xenapi.Types.UuidInvalid; import com.xensource.xenapi.Types.XenAPIException; import org.apache.cloudstack.hypervisor.xenserver.XenserverConfigs; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import javax.ejb.Local; @@ -104,7 +103,6 @@ import java.util.Set; @Local(value = Discoverer.class) public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(XcpServerDiscoverer.class); protected String _publicNic; protected String _privateNic; protected String _storageNic1; @@ -163,16 +161,16 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L for(HostPatch patch : patches) { PoolPatch pp = patch.getPoolPatch(conn); if (pp != null && pp.equals(poolPatch) && patch.getApplied(conn)) { - s_logger.debug("host " + hostIp + " does have " + hotFixUuid +" Hotfix."); + logger.debug("host " + hostIp + " does have " + hotFixUuid +" Hotfix."); return true; } } } return false; } catch (UuidInvalid e) { - s_logger.debug("host " + hostIp + " doesn't have " + hotFixUuid + " Hotfix"); + logger.debug("host " + hostIp + " doesn't have " + hotFixUuid + " Hotfix"); } catch (Exception e) { - s_logger.debug("can't get patches information, consider it doesn't have " + hotFixUuid + " Hotfix"); + logger.debug("can't get patches information, consider it doesn't have " + hotFixUuid + " Hotfix"); } return false; } @@ -186,25 +184,25 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L Connection conn = null; if (!url.getScheme().equals("http")) { String msg = "urlString is not http so we're not taking care of the discovery for this: " + url; - s_logger.debug(msg); + logger.debug(msg); return null; } if (clusterId == null) { String msg = "must specify cluster Id when add host"; - s_logger.debug(msg); + logger.debug(msg); throw new RuntimeException(msg); } if (podId == null) { String msg = "must specify pod Id when add host"; - s_logger.debug(msg); + logger.debug(msg); throw new RuntimeException(msg); } ClusterVO cluster = _clusterDao.findById(clusterId); if (cluster == null || cluster.getHypervisorType() != HypervisorType.XenServer) { - if (s_logger.isInfoEnabled()) - s_logger.info("invalid cluster id or cluster is not for XenServer hypervisors"); + if (logger.isInfoEnabled()) + logger.info("invalid cluster id or cluster is not for XenServer hypervisors"); return null; } @@ -217,7 +215,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L conn = _connPool.getConnect(hostIp, username, pass); if (conn == null) { String msg = "Unable to get a connection to " + url; - s_logger.debug(msg); + logger.debug(msg); throw new DiscoveryException(msg); } @@ -243,7 +241,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L if (!clu.getGuid().equals(poolUuid)) { String msg = "Please join the host " + hostIp + " to XS pool " + clu.getGuid() + " through XC/XS before adding it through CS UI"; - s_logger.warn(msg); + logger.warn(msg); throw new DiscoveryException(msg); } } else { @@ -255,7 +253,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L try { Session.logout(conn); } catch (Exception e) { - s_logger.debug("Caught exception during logout", e); + logger.debug("Caught exception during logout", e); } conn.dispose(); conn = null; @@ -278,7 +276,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L if (!support_hvm) { String msg = "Unable to add host " + record.address + " because it doesn't support hvm"; _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, podId, msg, msg); - s_logger.debug(msg); + logger.debug(msg); throw new RuntimeException(msg); } } @@ -299,12 +297,12 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L String hostKernelVer = record.softwareVersion.get("linux"); if (_resourceMgr.findHostByGuid(record.uuid) != null) { - s_logger.debug("Skipping " + record.address + " because " + record.uuid + " is already in the database."); + logger.debug("Skipping " + record.address + " because " + record.uuid + " is already in the database."); continue; } CitrixResourceBase resource = createServerResource(dcId, podId, record, latestHotFix); - s_logger.info("Found host " + record.hostname + " ip=" + record.address + " product version=" + prodVersion); + logger.info("Found host " + record.hostname + " ip=" + record.address + " product version=" + prodVersion); Map details = new HashMap(); Map params = new HashMap(); @@ -355,7 +353,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L resource.configure("XenServer", params); } catch (ConfigurationException e) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, podId, "Unable to add " + record.address, "Error is " + e.getMessage()); - s_logger.warn("Unable to instantiate " + record.address, e); + logger.warn("Unable to instantiate " + record.address, e); continue; } resource.start(); @@ -364,16 +362,16 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L } catch (SessionAuthenticationFailed e) { throw new DiscoveredWithErrorException("Authentication error"); } catch (XenAPIException e) { - s_logger.warn("XenAPI exception", e); + logger.warn("XenAPI exception", e); return null; } catch (XmlRpcException e) { - s_logger.warn("Xml Rpc Exception", e); + logger.warn("Xml Rpc Exception", e); return null; } catch (UnknownHostException e) { - s_logger.warn("Unable to resolve the host name", e); + logger.warn("Unable to resolve the host name", e); return null; } catch (Exception e) { - s_logger.debug("other exceptions: " + e.toString(), e); + logger.debug("other exceptions: " + e.toString(), e); return null; } return resources; @@ -433,7 +431,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L String msg = "Only support XCP 1.0.0, 1.1.0, 1.4.x, 1.5 beta, 1.6.x; XenServer 5.6, XenServer 5.6 FP1, XenServer 5.6 SP2, Xenserver 6.0, 6.0.2, 6.1.0, 6.2.0, 6.5.0 but this one is " + prodBrand + " " + prodVersion; - s_logger.warn(msg); + logger.warn(msg); throw new RuntimeException(msg); } @@ -553,7 +551,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L StartupRoutingCommand startup = (StartupRoutingCommand)cmd; if (startup.getHypervisorType() != HypervisorType.XenServer) { - s_logger.debug("Not XenServer so moving on."); + logger.debug("Not XenServer so moving on."); return; } @@ -565,7 +563,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L _clusterDao.update(cluster.getId(), cluster); } else if (!cluster.getGuid().equals(startup.getPool())) { String msg = "pool uuid for cluster " + cluster.getId() + " changed from " + cluster.getGuid() + " to " + startup.getPool(); - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -579,15 +577,15 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L if (!resource.equals(host.getResource())) { String msg = "host " + host.getPrivateIpAddress() + " changed from " + host.getResource() + " to " + resource; - s_logger.debug(msg); + logger.debug(msg); host.setResource(resource); host.setSetup(false); _hostDao.update(agentId, host); throw new HypervisorVersionChangedException(msg); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Setting up host " + agentId); + if (logger.isDebugEnabled()) { + logger.debug("Setting up host " + agentId); } HostEnvironment env = new HostEnvironment(); @@ -611,12 +609,12 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L } return; } else { - s_logger.warn("Unable to setup agent " + agentId + " due to " + ((answer != null) ? answer.getDetails() : "return null")); + logger.warn("Unable to setup agent " + agentId + " due to " + ((answer != null) ? answer.getDetails() : "return null")); } } catch (AgentUnavailableException e) { - s_logger.warn("Unable to setup agent " + agentId + " because it became unavailable.", e); + logger.warn("Unable to setup agent " + agentId + " because it became unavailable.", e); } catch (OperationTimedoutException e) { - s_logger.warn("Unable to setup agent " + agentId + " because it timed out", e); + logger.warn("Unable to setup agent " + agentId + " because it timed out", e); } throw new ConnectionException(true, "Reinitialize agent after setup."); } @@ -656,7 +654,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L HostPodVO pod = _podDao.findById(host.getPodId()); DataCenterVO dc = _dcDao.findById(host.getDataCenterId()); - s_logger.info("Host: " + host.getName() + " connected with hypervisor type: " + HypervisorType.XenServer + ". Checking CIDR..."); + logger.info("Host: " + host.getName() + " connected with hypervisor type: " + HypervisorType.XenServer + ". Checking CIDR..."); _resourceMgr.checkCIDR(pod, dc, ssCmd.getPrivateIpAddress(), ssCmd.getPrivateNetmask()); return _resourceMgr.fillRoutingHostVO(host, ssCmd, HypervisorType.XenServer, details, hostTags); } diff --git a/plugins/network-elements/bigswitch/src/com/cloud/network/element/BigSwitchBcfElement.java b/plugins/network-elements/bigswitch/src/com/cloud/network/element/BigSwitchBcfElement.java index dba9de2be8c..5311ff5a16c 100644 --- a/plugins/network-elements/bigswitch/src/com/cloud/network/element/BigSwitchBcfElement.java +++ b/plugins/network-elements/bigswitch/src/com/cloud/network/element/BigSwitchBcfElement.java @@ -30,7 +30,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; import org.apache.commons.net.util.SubnetUtils; @@ -132,7 +131,6 @@ import com.cloud.vm.dao.VMInstanceDao; public class BigSwitchBcfElement extends AdapterBase implements BigSwitchBcfElementService, ConnectivityProvider, IpDeployer, SourceNatServiceProvider, StaticNatServiceProvider, NetworkACLServiceProvider, FirewallServiceProvider, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(BigSwitchBcfElement.class); private static final Map> capabilities = setCapabilities(); @@ -198,18 +196,18 @@ NetworkACLServiceProvider, FirewallServiceProvider, ResourceStateAdapter { } private boolean canHandle(Network network, Service service) { - s_logger.debug("Checking if BigSwitchBcfElement can handle service " + service.getName() + " on network " + network.getDisplayText()); + logger.debug("Checking if BigSwitchBcfElement can handle service " + service.getName() + " on network " + network.getDisplayText()); if (network.getBroadcastDomainType() != BroadcastDomainType.Vlan) { return false; } if (!_networkModel.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.debug("BigSwitchBcfElement is not a provider for network " + network.getDisplayText()); + logger.debug("BigSwitchBcfElement is not a provider for network " + network.getDisplayText()); return false; } if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, BcfConstants.BIG_SWITCH_BCF)) { - s_logger.debug("BigSwitchBcfElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); + logger.debug("BigSwitchBcfElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); return false; } @@ -302,7 +300,7 @@ NetworkACLServiceProvider, FirewallServiceProvider, ResourceStateAdapter { } if (network.getBroadcastUri() == null) { - s_logger.error("Nic has no broadcast Uri"); + logger.error("Nic has no broadcast Uri"); return false; } @@ -360,7 +358,7 @@ NetworkACLServiceProvider, FirewallServiceProvider, ResourceStateAdapter { @Override public boolean verifyServicesCombination(Set services) { if (!services.contains(Service.Connectivity)) { - s_logger.warn("Unable to provide services without Connectivity service enabled for this element"); + logger.warn("Unable to provide services without Connectivity service enabled for this element"); return false; } return true; @@ -646,14 +644,14 @@ NetworkACLServiceProvider, FirewallServiceProvider, ResourceStateAdapter { String dstIp = rule.getDestIpAddress(); String mac = rule.getSourceMacAddress(); if(!rule.isForRevoke()) { - s_logger.debug("BCF enables static NAT for public IP: " + srcIp + " private IP " + dstIp + logger.debug("BCF enables static NAT for public IP: " + srcIp + " private IP " + dstIp + " mac " + mac); CreateBcfStaticNatCommand cmd = new CreateBcfStaticNatCommand( tenantId, network.getUuid(), dstIp, srcIp, mac); _bcfUtils.sendBcfCommandWithNetworkSyncCheck(cmd, network); } else { - s_logger.debug("BCF removes static NAT for public IP: " + srcIp + " private IP " + dstIp + logger.debug("BCF removes static NAT for public IP: " + srcIp + " private IP " + dstIp + " mac " + mac); DeleteBcfStaticNatCommand cmd = new DeleteBcfStaticNatCommand(tenantId, srcIp); diff --git a/plugins/network-elements/bigswitch/src/com/cloud/network/guru/BigSwitchBcfGuestNetworkGuru.java b/plugins/network-elements/bigswitch/src/com/cloud/network/guru/BigSwitchBcfGuestNetworkGuru.java index e205f47ef15..2d13b4271b1 100644 --- a/plugins/network-elements/bigswitch/src/com/cloud/network/guru/BigSwitchBcfGuestNetworkGuru.java +++ b/plugins/network-elements/bigswitch/src/com/cloud/network/guru/BigSwitchBcfGuestNetworkGuru.java @@ -26,7 +26,6 @@ import javax.inject.Inject; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.CreateBcfAttachmentCommand; @@ -92,7 +91,6 @@ import com.cloud.vm.dao.VMInstanceDao; */ @Local(value = NetworkGuru.class) public class BigSwitchBcfGuestNetworkGuru extends GuestNetworkGuru implements NetworkMigrationResponder { - private static final Logger s_logger = Logger.getLogger(BigSwitchBcfGuestNetworkGuru.class); @Inject PhysicalNetworkDao _physicalNetworkDao; @@ -140,7 +138,7 @@ public class BigSwitchBcfGuestNetworkGuru extends GuestNetworkGuru implements Ne isMyIsolationMethod(physicalNetwork)) { return true; } else { - s_logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); + logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); return false; } } @@ -150,21 +148,21 @@ public class BigSwitchBcfGuestNetworkGuru extends GuestNetworkGuru implements Ne // Check if the isolation type of the physical network is BCF_SEGMENT, then delegate GuestNetworkGuru to design PhysicalNetworkVO physnet = _physicalNetworkDao.findById(plan.getPhysicalNetworkId()); if (physnet == null || physnet.getIsolationMethods() == null || !physnet.getIsolationMethods().contains("BCF_SEGMENT")) { - s_logger.debug("Refusing to design this network, the physical isolation type is not BCF_SEGMENT"); + logger.debug("Refusing to design this network, the physical isolation type is not BCF_SEGMENT"); return null; } List devices = _bigswitchBcfDao.listByPhysicalNetwork(physnet.getId()); if (devices.isEmpty()) { - s_logger.error("No BigSwitch Controller on physical network " + physnet.getName()); + logger.error("No BigSwitch Controller on physical network " + physnet.getName()); return null; } for (BigSwitchBcfDeviceVO d: devices){ - s_logger.debug("BigSwitch Controller " + d.getUuid() + logger.debug("BigSwitch Controller " + d.getUuid() + " found on physical network " + physnet.getId()); } - s_logger.debug("Physical isolation type is BCF_SEGMENT, asking GuestNetworkGuru to design this network"); + logger.debug("Physical isolation type is BCF_SEGMENT, asking GuestNetworkGuru to design this network"); NetworkVO networkObject = (NetworkVO)super.design(offering, plan, userSpecified, owner); if (networkObject == null) { return null; @@ -312,7 +310,7 @@ public class BigSwitchBcfGuestNetworkGuru extends GuestNetworkGuru implements Ne public void shutdown(NetworkProfile profile, NetworkOffering offering) { NetworkVO networkObject = _networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Vlan || networkObject.getBroadcastUri() == null) { - s_logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); + logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); return; } @@ -356,7 +354,7 @@ public class BigSwitchBcfGuestNetworkGuru extends GuestNetworkGuru implements Ne tenantId = vpc.getUuid(); tenantName = vpc.getName(); boolean released = _vpcDao.releaseFromLockTable(vpc.getId()); - s_logger.debug("BCF guru release lock vpc id: " + vpc.getId() + logger.debug("BCF guru release lock vpc id: " + vpc.getId() + " released? " + released); } else { // use network id in CS as tenant in BSN @@ -402,14 +400,14 @@ public class BigSwitchBcfGuestNetworkGuru extends GuestNetworkGuru implements Ne public void rollbackMigration(NicProfile nic, Network network, VirtualMachineProfile vm, ReservationContext src, ReservationContext dst) { - s_logger.debug("BCF guru rollback migration"); + logger.debug("BCF guru rollback migration"); } @Override public void commitMigration(NicProfile nic, Network network, VirtualMachineProfile vm, ReservationContext src, ReservationContext dst) { - s_logger.debug("BCF guru commit migration"); + logger.debug("BCF guru commit migration"); } private void bcfUtilsInit(){ diff --git a/plugins/network-elements/bigswitch/src/com/cloud/network/resource/BigSwitchBcfResource.java b/plugins/network-elements/bigswitch/src/com/cloud/network/resource/BigSwitchBcfResource.java index a43cad31371..dcc8326bb98 100644 --- a/plugins/network-elements/bigswitch/src/com/cloud/network/resource/BigSwitchBcfResource.java +++ b/plugins/network-elements/bigswitch/src/com/cloud/network/resource/BigSwitchBcfResource.java @@ -25,7 +25,6 @@ import java.util.Map; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; @@ -70,7 +69,6 @@ import com.cloud.resource.ServerResource; import com.cloud.utils.component.ManagerBase; public class BigSwitchBcfResource extends ManagerBase implements ServerResource { - private static final Logger s_logger = Logger.getLogger(BigSwitchBcfResource.class); private String _name; private String _guid; @@ -176,20 +174,20 @@ public class BigSwitchBcfResource extends ManagerBase implements ServerResource try{ executeRequest(new SyncBcfTopologyCommand(true, true), _numRetries); } catch(Exception e){ - s_logger.error("BigSwitch BCF sync error", e); + logger.error("BigSwitch BCF sync error", e); } } else { try{ executeRequest(new SyncBcfTopologyCommand(true, false), _numRetries); } catch (Exception e){ - s_logger.error("BigSwitch BCF sync error", e); + logger.error("BigSwitch BCF sync error", e); } } } try { ControlClusterStatus ccs = _bigswitchBcfApi.getControlClusterStatus(); if (!ccs.getStatus()) { - s_logger.error("ControlCluster state is not ready: " + ccs.getStatus()); + logger.error("ControlCluster state is not ready: " + ccs.getStatus()); return null; } if (ccs.isTopologySyncRequested()) { @@ -200,11 +198,11 @@ public class BigSwitchBcfResource extends ManagerBase implements ServerResource executeRequest(new SyncBcfTopologyCommand(true, false), _numRetries); } } else { - s_logger.debug("topology sync needed but no topology history"); + logger.debug("topology sync needed but no topology history"); } } } catch (BigSwitchBcfApiException e) { - s_logger.error("getControlClusterStatus failed", e); + logger.error("getControlClusterStatus failed", e); return null; } try { @@ -222,7 +220,7 @@ public class BigSwitchBcfResource extends ManagerBase implements ServerResource } } catch (BigSwitchBcfApiException e) { - s_logger.error("getCapabilities failed", e); + logger.error("getCapabilities failed", e); } return new PingCommand(Host.Type.L2Networking, id); } @@ -274,7 +272,7 @@ public class BigSwitchBcfResource extends ManagerBase implements ServerResource } else if (cmd instanceof GetControllerDataCommand) { return executeRequest((GetControllerDataCommand)cmd, numRetries); } - s_logger.debug("Received unsupported command " + cmd.toString()); + logger.debug("Received unsupported command " + cmd.toString()); return Answer.createUnsupportedCommandAnswer(cmd); } @@ -575,7 +573,7 @@ public class BigSwitchBcfResource extends ManagerBase implements ServerResource } private Answer retry(Command cmd, int numRetries) { - s_logger.warn("Retrying " + cmd.getClass().getSimpleName() + ". Number of retries remaining: " + numRetries); + logger.warn("Retrying " + cmd.getClass().getSimpleName() + ". Number of retries remaining: " + numRetries); return executeRequest(cmd, numRetries); } diff --git a/plugins/network-elements/brocade-vcs/src/com/cloud/network/element/BrocadeVcsElement.java b/plugins/network-elements/brocade-vcs/src/com/cloud/network/element/BrocadeVcsElement.java index b551092300e..29e4d60c813 100644 --- a/plugins/network-elements/brocade-vcs/src/com/cloud/network/element/BrocadeVcsElement.java +++ b/plugins/network-elements/brocade-vcs/src/com/cloud/network/element/BrocadeVcsElement.java @@ -30,7 +30,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -94,7 +93,6 @@ import com.cloud.vm.dao.NicDao; @Component @Local(value = {NetworkElement.class}) public class BrocadeVcsElement extends AdapterBase implements NetworkElement, ResourceStateAdapter, BrocadeVcsElementService { - private static final Logger s_logger = Logger.getLogger(BrocadeVcsElement.class); private static final Map> capabilities = setCapabilities(); @@ -140,18 +138,18 @@ public class BrocadeVcsElement extends AdapterBase implements NetworkElement, Re } protected boolean canHandle(Network network, Service service) { - s_logger.debug("Checking if BrocadeVcsElement can handle service " + service.getName() + " on network " + network.getDisplayText()); + logger.debug("Checking if BrocadeVcsElement can handle service " + service.getName() + " on network " + network.getDisplayText()); if (network.getBroadcastDomainType() != BroadcastDomainType.Vcs) { return false; } if (!_networkModel.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.debug("BrocadeVcsElement is not a provider for network " + network.getDisplayText()); + logger.debug("BrocadeVcsElement is not a provider for network " + network.getDisplayText()); return false; } if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, Network.Provider.BrocadeVcs)) { - s_logger.debug("BrocadeVcsElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); + logger.debug("BrocadeVcsElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); return false; } @@ -168,7 +166,7 @@ public class BrocadeVcsElement extends AdapterBase implements NetworkElement, Re @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - s_logger.debug("entering BrocadeVcsElement implement function for network " + network.getDisplayText() + " (state " + network.getState() + ")"); + logger.debug("entering BrocadeVcsElement implement function for network " + network.getDisplayText() + " (state " + network.getState() + ")"); if (!canHandle(network, Service.Connectivity)) { return false; @@ -236,7 +234,7 @@ public class BrocadeVcsElement extends AdapterBase implements NetworkElement, Re public boolean verifyServicesCombination(Set services) { if (!services.contains(Service.Connectivity)) { - s_logger.warn("Unable to provide services without Connectivity service enabled for this element"); + logger.warn("Unable to provide services without Connectivity service enabled for this element"); return false; } return true; diff --git a/plugins/network-elements/brocade-vcs/src/com/cloud/network/guru/BrocadeVcsGuestNetworkGuru.java b/plugins/network-elements/brocade-vcs/src/com/cloud/network/guru/BrocadeVcsGuestNetworkGuru.java index 92bf113f9a3..c079a9e7279 100644 --- a/plugins/network-elements/brocade-vcs/src/com/cloud/network/guru/BrocadeVcsGuestNetworkGuru.java +++ b/plugins/network-elements/brocade-vcs/src/com/cloud/network/guru/BrocadeVcsGuestNetworkGuru.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.AssociateMacToNetworkAnswer; @@ -65,7 +64,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = NetworkGuru.class) public class BrocadeVcsGuestNetworkGuru extends GuestNetworkGuru { - private static final Logger s_logger = Logger.getLogger(BrocadeVcsGuestNetworkGuru.class); @Inject NetworkOfferingServiceMapDao _ntwkOfferingSrvcDao; @@ -94,7 +92,7 @@ public class BrocadeVcsGuestNetworkGuru extends GuestNetworkGuru { && isMyIsolationMethod(physicalNetwork) && _ntwkOfferingSrvcDao.areServicesSupportedByNetworkOffering(offering.getId(), Service.Connectivity)) { return true; } else { - s_logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); + logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); return false; } } @@ -105,10 +103,10 @@ public class BrocadeVcsGuestNetworkGuru extends GuestNetworkGuru { PhysicalNetworkVO physnet = _physicalNetworkDao.findById(plan.getPhysicalNetworkId()); DataCenter dc = _dcDao.findById(plan.getDataCenterId()); if (!canHandle(offering, dc.getNetworkType(), physnet)) { - s_logger.debug("Refusing to design this network"); + logger.debug("Refusing to design this network"); return null; } - s_logger.debug("Physical isolation type is VCS, asking GuestNetworkGuru to design this network"); + logger.debug("Physical isolation type is VCS, asking GuestNetworkGuru to design this network"); NetworkVO networkObject = (NetworkVO)super.design(offering, plan, userSpecified, owner); if (networkObject == null) { return null; @@ -133,7 +131,7 @@ public class BrocadeVcsGuestNetworkGuru extends GuestNetworkGuru { List devices = _brocadeVcsDao.listByPhysicalNetwork(physicalNetworkId); if (devices.isEmpty()) { - s_logger.error("No Brocade VCS Switch on physical network " + physicalNetworkId); + logger.error("No Brocade VCS Switch on physical network " + physicalNetworkId); return null; } @@ -145,8 +143,8 @@ public class BrocadeVcsGuestNetworkGuru extends GuestNetworkGuru { CreateNetworkAnswer answer = (CreateNetworkAnswer)_agentMgr.easySend(brocadeVcsHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("CreateNetworkCommand failed"); - s_logger.error("Unable to create network " + network.getId()); + logger.error("CreateNetworkCommand failed"); + logger.error("Unable to create network " + network.getId()); return null; } @@ -170,7 +168,7 @@ public class BrocadeVcsGuestNetworkGuru extends GuestNetworkGuru { List devices = _brocadeVcsDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No Brocade VCS Switch on physical network " + network.getPhysicalNetworkId()); + logger.error("No Brocade VCS Switch on physical network " + network.getPhysicalNetworkId()); return; } for (BrocadeVcsDeviceVO brocadeVcsDevice : devices) { @@ -182,7 +180,7 @@ public class BrocadeVcsGuestNetworkGuru extends GuestNetworkGuru { AssociateMacToNetworkAnswer answer = (AssociateMacToNetworkAnswer)_agentMgr.easySend(brocadeVcsHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("AssociateMacToNetworkCommand failed"); + logger.error("AssociateMacToNetworkCommand failed"); throw new InsufficientVirtualNetworkCapacityException("Unable to associate mac " + interfaceMac + " to network " + network.getId(), DataCenter.class, dc.getId()); } } @@ -196,7 +194,7 @@ public class BrocadeVcsGuestNetworkGuru extends GuestNetworkGuru { List devices = _brocadeVcsDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No Brocade VCS Switch on physical network " + network.getPhysicalNetworkId()); + logger.error("No Brocade VCS Switch on physical network " + network.getPhysicalNetworkId()); return; } for (BrocadeVcsDeviceVO brocadeVcsDevice : devices) { @@ -207,8 +205,8 @@ public class BrocadeVcsGuestNetworkGuru extends GuestNetworkGuru { DisassociateMacFromNetworkAnswer answer = (DisassociateMacFromNetworkAnswer)_agentMgr.easySend(brocadeVcsHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("DisassociateMacFromNetworkCommand failed"); - s_logger.error("Unable to disassociate mac " + interfaceMac + " from network " + network.getId()); + logger.error("DisassociateMacFromNetworkCommand failed"); + logger.error("Unable to disassociate mac " + interfaceMac + " from network " + network.getId()); return; } } @@ -236,13 +234,13 @@ public class BrocadeVcsGuestNetworkGuru extends GuestNetworkGuru { if (brocadeVcsNetworkVlanMapping != null) { vlanTag = brocadeVcsNetworkVlanMapping.getVlanId(); } else { - s_logger.error("Not able to find vlanId for network " + network.getId()); + logger.error("Not able to find vlanId for network " + network.getId()); return false; } List devices = _brocadeVcsDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No Brocade VCS Switch on physical network " + network.getPhysicalNetworkId()); + logger.error("No Brocade VCS Switch on physical network " + network.getPhysicalNetworkId()); return false; } for (BrocadeVcsDeviceVO brocadeVcsDevice : devices) { @@ -253,8 +251,8 @@ public class BrocadeVcsGuestNetworkGuru extends GuestNetworkGuru { DeleteNetworkAnswer answer = (DeleteNetworkAnswer)_agentMgr.easySend(brocadeVcsHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("DeleteNetworkCommand failed"); - s_logger.error("Unable to delete network " + network.getId()); + logger.error("DeleteNetworkCommand failed"); + logger.error("Unable to delete network " + network.getId()); return false; } } diff --git a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/element/CiscoVnmcElement.java b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/element/CiscoVnmcElement.java index d4d3c8dd0a6..8a9d0f54248 100644 --- a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/element/CiscoVnmcElement.java +++ b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/element/CiscoVnmcElement.java @@ -32,7 +32,6 @@ import javax.persistence.EntityExistsException; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -137,7 +136,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = NetworkElement.class) public class CiscoVnmcElement extends AdapterBase implements SourceNatServiceProvider, FirewallServiceProvider, PortForwardingServiceProvider, IpDeployer, StaticNatServiceProvider, ResourceStateAdapter, NetworkElement, CiscoVnmcElementService, CiscoAsa1000vService { - private static final Logger s_logger = Logger.getLogger(CiscoVnmcElement.class); private static final Map> capabilities = setCapabilities(); @Inject @@ -274,7 +272,7 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro final DataCenter zone = _entityMgr.findById(DataCenter.class, network.getDataCenterId()); if (zone.getNetworkType() == NetworkType.Basic) { - s_logger.debug("Not handling network implement in zone of type " + NetworkType.Basic); + logger.debug("Not handling network implement in zone of type " + NetworkType.Basic); return false; } @@ -284,24 +282,24 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro final List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No Cisco Vnmc device on network " + network.getName()); + logger.error("No Cisco Vnmc device on network " + network.getName()); return false; } List asaList = _ciscoAsa1000vDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (asaList.isEmpty()) { - s_logger.debug("No Cisco ASA 1000v device on network " + network.getName()); + logger.debug("No Cisco ASA 1000v device on network " + network.getName()); return false; } NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId()); if (asaForNetwork != null) { - s_logger.debug("Cisco ASA 1000v device already associated with network " + network.getName()); + logger.debug("Cisco ASA 1000v device already associated with network " + network.getName()); return true; } if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.SourceNat, Provider.CiscoVnmc)) { - s_logger.error("SourceNat service is not provided by Cisco Vnmc device on network " + network.getName()); + logger.error("SourceNat service is not provided by Cisco Vnmc device on network " + network.getName()); return false; } @@ -309,20 +307,20 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro // ensure that there is an ASA 1000v assigned to this network CiscoAsa1000vDevice assignedAsa = assignAsa1000vToNetwork(network); if (assignedAsa == null) { - s_logger.error("Unable to assign ASA 1000v device to network " + network.getName()); + logger.error("Unable to assign ASA 1000v device to network " + network.getName()); throw new CloudRuntimeException("Unable to assign ASA 1000v device to network " + network.getName()); } ClusterVO asaCluster = _clusterDao.findById(assignedAsa.getClusterId()); ClusterVSMMapVO clusterVsmMap = _clusterVsmMapDao.findByClusterId(assignedAsa.getClusterId()); if (clusterVsmMap == null) { - s_logger.error("Vmware cluster " + asaCluster.getName() + " has no Cisco Nexus VSM device associated with it"); + logger.error("Vmware cluster " + asaCluster.getName() + " has no Cisco Nexus VSM device associated with it"); throw new CloudRuntimeException("Vmware cluster " + asaCluster.getName() + " has no Cisco Nexus VSM device associated with it"); } CiscoNexusVSMDeviceVO vsmDevice = _vsmDeviceDao.findById(clusterVsmMap.getVsmId()); if (vsmDevice == null) { - s_logger.error("Unable to load details of Cisco Nexus VSM device associated with cluster " + asaCluster.getName()); + logger.error("Unable to load details of Cisco Nexus VSM device associated with cluster " + asaCluster.getName()); throw new CloudRuntimeException("Unable to load details of Cisco Nexus VSM device associated with cluster " + asaCluster.getName()); } @@ -357,14 +355,14 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro long callerUserId = CallContext.current().getCallingUserId(); outsideIp = _ipAddrMgr.allocateIp(owner, false, caller, callerUserId, zone, true); } catch (ResourceAllocationException e) { - s_logger.error("Unable to allocate additional public Ip address. Exception details " + e); + logger.error("Unable to allocate additional public Ip address. Exception details " + e); throw new CloudRuntimeException("Unable to allocate additional public Ip address. Exception details " + e); } try { outsideIp = _ipAddrMgr.associateIPToGuestNetwork(outsideIp.getId(), network.getId(), true); } catch (ResourceAllocationException e) { - s_logger.error("Unable to assign allocated additional public Ip " + outsideIp.getAddress().addr() + " to network with vlan " + vlanId + + logger.error("Unable to assign allocated additional public Ip " + outsideIp.getAddress().addr() + " to network with vlan " + vlanId + ". Exception details " + e); throw new CloudRuntimeException("Unable to assign allocated additional public Ip " + outsideIp.getAddress().addr() + " to network with vlan " + vlanId + ". Exception details " + e); @@ -377,33 +375,33 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro // all public ip addresses must be from same subnet, this essentially means single public subnet in zone if (!createLogicalEdgeFirewall(vlanId, network.getGateway(), gatewayNetmask, outsideIp.getAddress().addr(), sourceNatIp.getNetmask(), publicGateways, ciscoVnmcHost.getId())) { - s_logger.error("Failed to create logical edge firewall in Cisco VNMC device for network " + network.getName()); + logger.error("Failed to create logical edge firewall in Cisco VNMC device for network " + network.getName()); throw new CloudRuntimeException("Failed to create logical edge firewall in Cisco VNMC device for network " + network.getName()); } // create stuff in VSM for ASA device if (!configureNexusVsmForAsa(vlanId, network.getGateway(), vsmDevice.getUserName(), vsmDevice.getPassword(), vsmDevice.getipaddr(), assignedAsa.getInPortProfile(), ciscoVnmcHost.getId())) { - s_logger.error("Failed to configure Cisco Nexus VSM " + vsmDevice.getipaddr() + " for ASA device for network " + network.getName()); + logger.error("Failed to configure Cisco Nexus VSM " + vsmDevice.getipaddr() + " for ASA device for network " + network.getName()); throw new CloudRuntimeException("Failed to configure Cisco Nexus VSM " + vsmDevice.getipaddr() + " for ASA device for network " + network.getName()); } // configure source NAT if (!configureSourceNat(vlanId, network.getCidr(), sourceNatIp, ciscoVnmcHost.getId())) { - s_logger.error("Failed to configure source NAT in Cisco VNMC device for network " + network.getName()); + logger.error("Failed to configure source NAT in Cisco VNMC device for network " + network.getName()); throw new CloudRuntimeException("Failed to configure source NAT in Cisco VNMC device for network " + network.getName()); } // associate Asa 1000v instance with logical edge firewall if (!associateAsaWithLogicalEdgeFirewall(vlanId, assignedAsa.getManagementIp(), ciscoVnmcHost.getId())) { - s_logger.error("Failed to associate Cisco ASA 1000v (" + assignedAsa.getManagementIp() + ") with logical edge firewall in VNMC for network " + + logger.error("Failed to associate Cisco ASA 1000v (" + assignedAsa.getManagementIp() + ") with logical edge firewall in VNMC for network " + network.getName()); throw new CloudRuntimeException("Failed to associate Cisco ASA 1000v (" + assignedAsa.getManagementIp() + ") with logical edge firewall in VNMC for network " + network.getName()); } } catch (CloudRuntimeException e) { unassignAsa1000vFromNetwork(network); - s_logger.error("CiscoVnmcElement failed", e); + logger.error("CiscoVnmcElement failed", e); return false; } catch (Exception e) { unassignAsa1000vFromNetwork(network); @@ -479,7 +477,7 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro @Override public boolean verifyServicesCombination(Set services) { if (!services.contains(Service.Firewall)) { - s_logger.warn("CiscoVnmc must be used as Firewall Service Provider in the network"); + logger.warn("CiscoVnmc must be used as Firewall Service Provider in the network"); return false; } return true; @@ -644,26 +642,26 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro public boolean applyFWRules(Network network, List rules) throws ResourceUnavailableException { if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.Firewall, Provider.CiscoVnmc)) { - s_logger.error("Firewall service is not provided by Cisco Vnmc device on network " + network.getName()); + logger.error("Firewall service is not provided by Cisco Vnmc device on network " + network.getName()); return false; } // Find VNMC host for physical network List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No Cisco Vnmc device on network " + network.getName()); + logger.error("No Cisco Vnmc device on network " + network.getName()); return true; } // Find if ASA 1000v is associated with network NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId()); if (asaForNetwork == null) { - s_logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); + logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); return true; } if (network.getState() == Network.State.Allocated) { - s_logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + + logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + "; this network is not implemented. Skipping backend commands."); return true; } @@ -690,7 +688,7 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "Unable to apply firewall rules to Cisco ASA 1000v appliance due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, network.getDataCenterId()); } } @@ -702,26 +700,26 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro public boolean applyPFRules(Network network, List rules) throws ResourceUnavailableException { if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.PortForwarding, Provider.CiscoVnmc)) { - s_logger.error("Port forwarding service is not provided by Cisco Vnmc device on network " + network.getName()); + logger.error("Port forwarding service is not provided by Cisco Vnmc device on network " + network.getName()); return false; } // Find VNMC host for physical network List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No Cisco Vnmc device on network " + network.getName()); + logger.error("No Cisco Vnmc device on network " + network.getName()); return true; } // Find if ASA 1000v is associated with network NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId()); if (asaForNetwork == null) { - s_logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); + logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); return true; } if (network.getState() == Network.State.Allocated) { - s_logger.debug("External firewall was asked to apply port forwarding rules for network with ID " + network.getId() + + logger.debug("External firewall was asked to apply port forwarding rules for network with ID " + network.getId() + "; this network is not implemented. Skipping backend commands."); return true; } @@ -745,7 +743,7 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "Unable to apply port forwarding rules to Cisco ASA 1000v appliance due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, network.getDataCenterId()); } } @@ -756,26 +754,26 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro @Override public boolean applyStaticNats(Network network, List rules) throws ResourceUnavailableException { if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.StaticNat, Provider.CiscoVnmc)) { - s_logger.error("Static NAT service is not provided by Cisco Vnmc device on network " + network.getName()); + logger.error("Static NAT service is not provided by Cisco Vnmc device on network " + network.getName()); return false; } // Find VNMC host for physical network List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No Cisco Vnmc device on network " + network.getName()); + logger.error("No Cisco Vnmc device on network " + network.getName()); return true; } // Find if ASA 1000v is associated with network NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId()); if (asaForNetwork == null) { - s_logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); + logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); return true; } if (network.getState() == Network.State.Allocated) { - s_logger.debug("External firewall was asked to apply static NAT rules for network with ID " + network.getId() + + logger.debug("External firewall was asked to apply static NAT rules for network with ID " + network.getId() + "; this network is not implemented. Skipping backend commands."); return true; } @@ -800,7 +798,7 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "Unable to apply static NAT rules to Cisco ASA 1000v appliance due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, network.getDataCenterId()); } } diff --git a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java index 1d92d07133f..ed94003f7d5 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java +++ b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java @@ -25,7 +25,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -58,7 +57,6 @@ import com.cloud.vm.VirtualMachineProfile; @Component @Local(value = NetworkElement.class) public class ElasticLoadBalancerElement extends AdapterBase implements LoadBalancingServiceProvider, IpDeployer { - private static final Logger s_logger = Logger.getLogger(ElasticLoadBalancerElement.class); private static final Map> capabilities = setCapabilities(); @Inject NetworkModel _networkManager; @@ -76,7 +74,7 @@ public class ElasticLoadBalancerElement extends AdapterBase implements LoadBalan private boolean canHandle(Network network, List rules) { if (network.getGuestType() != Network.GuestType.Shared || network.getTrafficType() != TrafficType.Guest) { - s_logger.debug("Not handling network with type " + network.getGuestType() + " and traffic type " + network.getTrafficType()); + logger.debug("Not handling network with type " + network.getGuestType() + " and traffic type " + network.getTrafficType()); return false; } @@ -86,7 +84,7 @@ public class ElasticLoadBalancerElement extends AdapterBase implements LoadBalan if (schemeCaps != null) { for (LoadBalancingRule rule : rules) { if (!schemeCaps.contains(rule.getScheme().toString())) { - s_logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + this.getName()); + logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + this.getName()); return false; } } diff --git a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java index b2d651c4f70..b49b41064b8 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java +++ b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java @@ -35,7 +35,6 @@ import org.apache.cloudstack.api.command.user.loadbalancer.CreateLoadBalancerRul import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.managed.context.ManagedContextRunnable; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -105,7 +104,6 @@ import com.cloud.vm.dao.NicDao; @Component @Local(value = {ElasticLoadBalancerManager.class}) public class ElasticLoadBalancerManagerImpl extends ManagerBase implements ElasticLoadBalancerManager, VirtualMachineGuru { - private static final Logger s_logger = Logger.getLogger(ElasticLoadBalancerManagerImpl.class); @Inject private AgentManager _agentMgr; @@ -164,7 +162,7 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast try { answers = _agentMgr.send(elbVm.getHostId(), cmds); } catch (OperationTimedoutException e) { - s_logger.warn("ELB: Timed Out", e); + logger.warn("ELB: Timed Out", e); throw new AgentUnavailableException("Unable to send commands to virtual elbVm ", elbVm.getHostId(), e); } @@ -251,7 +249,7 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast DomainRouterVO elbVm = findElbVmForLb(rules.get(0)); if (elbVm == null) { - s_logger.warn("Unable to apply lb rules, ELB vm doesn't exist in the network " + network.getId()); + logger.warn("Unable to apply lb rules, ELB vm doesn't exist in the network " + network.getId()); throw new ResourceUnavailableException("Unable to apply lb rules", DataCenter.class, network.getDataCenterId()); } @@ -271,10 +269,10 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast } return applyLBRules(elbVm, lbRules, network.getId()); } else if (elbVm.getState() == State.Stopped || elbVm.getState() == State.Stopping) { - s_logger.debug("ELB VM is in " + elbVm.getState() + ", so not sending apply LoadBalancing rules commands to the backend"); + logger.debug("ELB VM is in " + elbVm.getState() + ", so not sending apply LoadBalancing rules commands to the backend"); return true; } else { - s_logger.warn("Unable to apply loadbalancing rules, ELB VM is not in the right state " + elbVm.getState()); + logger.warn("Unable to apply loadbalancing rules, ELB VM is not in the right state " + elbVm.getState()); throw new ResourceUnavailableException("Unable to apply loadbalancing rules, ELB VM is not in the right state", VirtualRouter.class, elbVm.getId()); } } @@ -298,13 +296,13 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast // this can sometimes happen, if DB is manually or programmatically manipulated if (offerings == null || offerings.size() < 2) { String msg = "Data integrity problem : System Offering For Elastic LB VM has been removed?"; - s_logger.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } String enabled = _configDao.getValue(Config.ElasticLoadBalancerEnabled.key()); _enabled = (enabled == null) ? false : Boolean.parseBoolean(enabled); - s_logger.info("Elastic Load balancer enabled: " + _enabled); + logger.info("Elastic Load balancer enabled: " + _enabled); if (_enabled) { String traffType = _configDao.getValue(Config.ElasticLoadBalancerNetwork.key()); if ("guest".equalsIgnoreCase(traffType)) { @@ -313,11 +311,11 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast _frontendTrafficType = TrafficType.Public; } else throw new ConfigurationException("ELB: Traffic type for front end of load balancer has to be guest or public; found : " + traffType); - s_logger.info("ELB: Elastic Load Balancer: will balance on " + traffType); + logger.info("ELB: Elastic Load Balancer: will balance on " + traffType); int gcIntervalMinutes = NumbersUtil.parseInt(configs.get(Config.ElasticLoadBalancerVmGcInterval.key()), 5); if (gcIntervalMinutes < 5) gcIntervalMinutes = 5; - s_logger.info("ELB: Elastic Load Balancer: scheduling GC to run every " + gcIntervalMinutes + " minutes"); + logger.info("ELB: Elastic Load Balancer: scheduling GC to run every " + gcIntervalMinutes + " minutes"); _gcThreadPool = Executors.newScheduledThreadPool(1, new NamedThreadFactory("ELBVM-GC")); _gcThreadPool.scheduleAtFixedRate(new CleanupThread(), gcIntervalMinutes, gcIntervalMinutes, TimeUnit.MINUTES); _itMgr.registerGuru(VirtualMachine.Type.ElasticLoadBalancerVm, this); @@ -329,7 +327,7 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast } private DomainRouterVO stop(DomainRouterVO elbVm, boolean forced) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("Stopping ELB vm " + elbVm); + logger.debug("Stopping ELB vm " + elbVm); try { _itMgr.advanceStop(elbVm.getUuid(), forced); return _routerDao.findById(elbVm.getId()); @@ -348,7 +346,7 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast List unusedElbVms = _elbVmMapDao.listUnusedElbVms(); if (unusedElbVms != null) { if (unusedElbVms.size() > 0) { - s_logger.info("Found " + unusedElbVms.size() + " unused ELB vms"); + logger.info("Found " + unusedElbVms.size() + " unused ELB vms"); } Set currentGcCandidates = new HashSet(); for (DomainRouterVO elbVm : unusedElbVms) { @@ -361,22 +359,22 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast boolean gceed = false; try { - s_logger.info("Attempting to stop ELB VM: " + elbVm); + logger.info("Attempting to stop ELB VM: " + elbVm); stop(elbVm, true); gceed = true; } catch (ConcurrentOperationException e) { - s_logger.warn("Unable to stop unused ELB vm " + elbVm + " due to ", e); + logger.warn("Unable to stop unused ELB vm " + elbVm + " due to ", e); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to stop unused ELB vm " + elbVm + " due to ", e); + logger.warn("Unable to stop unused ELB vm " + elbVm + " due to ", e); continue; } if (gceed) { try { - s_logger.info("Attempting to destroy ELB VM: " + elbVm); + logger.info("Attempting to destroy ELB VM: " + elbVm); _itMgr.expunge(elbVm.getUuid()); _routerDao.remove(elbVm.getId()); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to destroy unused ELB vm " + elbVm + " due to ", e); + logger.warn("Unable to destroy unused ELB vm " + elbVm + " due to ", e); gceed = false; } } @@ -446,14 +444,14 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast } else if (nic.getTrafficType() == TrafficType.Control) { // control command is sent over management network in VMware if (dest.getHost().getHypervisorType() == HypervisorType.VMware) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Check if we need to add management server explicit route to ELB vm. pod cidr: " + dest.getPod().getCidrAddress() + "/" + if (logger.isInfoEnabled()) { + logger.info("Check if we need to add management server explicit route to ELB vm. pod cidr: " + dest.getPod().getCidrAddress() + "/" + dest.getPod().getCidrSize() + ", pod gateway: " + dest.getPod().getGateway() + ", management host: " + ApiServiceConfiguration.ManagementHostIPAdr.value()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Added management server explicit route to ELB vm."); + if (logger.isDebugEnabled()) { + logger.debug("Added management server explicit route to ELB vm."); } // always add management explicit route, for basic networking setup buf.append(" mgmtcidr=").append(_mgmtCidr); @@ -479,8 +477,8 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast buf.append(" dns2=").append(defaultDns2); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Boot Args for " + profile + ": " + buf.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Boot Args for " + profile + ": " + buf.toString()); } if (controlNic == null) { @@ -515,7 +513,7 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast public boolean finalizeStart(VirtualMachineProfile profile, long hostId, Commands cmds, ReservationContext context) { CheckSshAnswer answer = (CheckSshAnswer)cmds.getAnswer("checkSsh"); if (answer == null || !answer.getResult()) { - s_logger.warn("Unable to ssh to the ELB VM: " + (answer != null ? answer.getDetails() : "No answer (answer for \"checkSsh\" was null)")); + logger.warn("Unable to ssh to the ELB VM: " + (answer != null ? answer.getDetails() : "No answer (answer for \"checkSsh\" was null)")); return false; } @@ -550,7 +548,7 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast } if (controlNic == null) { - s_logger.error("Control network doesn't exist for the ELB vm " + elbVm); + logger.error("Control network doesn't exist for the ELB vm " + elbVm); return false; } @@ -568,7 +566,7 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast lbRules.add(loadBalancing); } - s_logger.debug("Found " + lbRules.size() + " load balancing rule(s) to apply as a part of ELB vm " + elbVm + " start."); + logger.debug("Found " + lbRules.size() + " load balancing rule(s) to apply as a part of ELB vm " + elbVm + " start."); if (!lbRules.isEmpty()) { createApplyLoadBalancingRulesCommands(lbRules, elbVm, cmds, guestNetworkId); } diff --git a/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java b/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java index 1cd6851b582..7668a0f2c09 100644 --- a/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java +++ b/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java @@ -26,7 +26,6 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.google.gson.Gson; @@ -97,7 +96,6 @@ import com.cloud.vm.VirtualMachineProfile; public class F5ExternalLoadBalancerElement extends ExternalLoadBalancerDeviceManagerImpl implements LoadBalancingServiceProvider, IpDeployer, F5ExternalLoadBalancerElementService, ExternalLoadBalancerDeviceManager { - private static final Logger s_logger = Logger.getLogger(F5ExternalLoadBalancerElement.class); @Inject NetworkModel _networkManager; @@ -125,7 +123,7 @@ public class F5ExternalLoadBalancerElement extends ExternalLoadBalancerDeviceMan private boolean canHandle(Network config, List rules) { if ((config.getGuestType() != Network.GuestType.Isolated && config.getGuestType() != Network.GuestType.Shared) || config.getTrafficType() != TrafficType.Guest) { - s_logger.trace("Not handling network with Type " + config.getGuestType() + " and traffic type " + config.getTrafficType()); + logger.trace("Not handling network with Type " + config.getGuestType() + " and traffic type " + config.getTrafficType()); return false; } @@ -135,7 +133,7 @@ public class F5ExternalLoadBalancerElement extends ExternalLoadBalancerDeviceMan if (schemeCaps != null && rules != null && !rules.isEmpty()) { for (LoadBalancingRule rule : rules) { if (!schemeCaps.contains(rule.getScheme().toString())) { - s_logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + this.getName()); + logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + this.getName()); return false; } } @@ -517,8 +515,8 @@ public class F5ExternalLoadBalancerElement extends ExternalLoadBalancerDeviceMan public IpDeployer getIpDeployer(Network network) { ExternalLoadBalancerDeviceVO lbDevice = getExternalLoadBalancerForNetwork(network); if (lbDevice == null) { - s_logger.error("Cannot find external load balanacer for network " + network.getName()); - s_logger.error("Make F5 as dummy ip deployer, since we likely met this when clean up resource after shutdown network"); + logger.error("Cannot find external load balanacer for network " + network.getName()); + logger.error("Make F5 as dummy ip deployer, since we likely met this when clean up resource after shutdown network"); return this; } if (_networkManager.isNetworkInlineMode(network)) { diff --git a/plugins/network-elements/globodns/src/com/globo/globodns/cloudstack/element/GloboDnsElement.java b/plugins/network-elements/globodns/src/com/globo/globodns/cloudstack/element/GloboDnsElement.java index 042145bf926..9d9554c1c58 100644 --- a/plugins/network-elements/globodns/src/com/globo/globodns/cloudstack/element/GloboDnsElement.java +++ b/plugins/network-elements/globodns/src/com/globo/globodns/cloudstack/element/GloboDnsElement.java @@ -29,7 +29,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -82,7 +81,6 @@ import com.globo.globodns.cloudstack.resource.GloboDnsResource; @Local(NetworkElement.class) public class GloboDnsElement extends AdapterBase implements ResourceStateAdapter, NetworkElement, GloboDnsElementService, Configurable { - private static final Logger s_logger = Logger.getLogger(GloboDnsElement.class); private static final Map> capabilities = setCapabilities(); @@ -134,7 +132,7 @@ public class GloboDnsElement extends AdapterBase implements ResourceStateAdapter throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { if (!isTypeSupported(vm.getType())) { - s_logger.info("GloboDNS only manages records for VMs of type User, ConsoleProxy and DomainRouter. VM " + vm + " is " + vm.getType()); + logger.info("GloboDNS only manages records for VMs of type User, ConsoleProxy and DomainRouter. VM " + vm + " is " + vm.getType()); return false; } @@ -164,7 +162,7 @@ public class GloboDnsElement extends AdapterBase implements ResourceStateAdapter ResourceUnavailableException { if (!isTypeSupported(vm.getType())) { - s_logger.info("GloboDNS only manages records for VMs of type User, ConsoleProxy and DomainRouter. VM " + vm + " is " + vm.getType()); + logger.info("GloboDNS only manages records for VMs of type User, ConsoleProxy and DomainRouter. VM " + vm + " is " + vm.getType()); return false; } diff --git a/plugins/network-elements/globodns/src/com/globo/globodns/cloudstack/resource/GloboDnsResource.java b/plugins/network-elements/globodns/src/com/globo/globodns/cloudstack/resource/GloboDnsResource.java index 84c1b5b44e5..9f399a92e0d 100644 --- a/plugins/network-elements/globodns/src/com/globo/globodns/cloudstack/resource/GloboDnsResource.java +++ b/plugins/network-elements/globodns/src/com/globo/globodns/cloudstack/resource/GloboDnsResource.java @@ -21,7 +21,6 @@ import java.util.Map; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; @@ -68,7 +67,6 @@ public class GloboDnsResource extends ManagerBase implements ServerResource { private static final String REVERSE_DOMAIN_SUFFIX = "in-addr.arpa"; private static final String DEFAULT_AUTHORITY_TYPE = "M"; - private static final Logger s_logger = Logger.getLogger(GloboDnsResource.class); @Override public boolean configure(String name, Map params) throws ConfigurationException { @@ -125,7 +123,7 @@ public class GloboDnsResource extends ManagerBase implements ServerResource { @Override public StartupCommand[] initialize() { - s_logger.trace("initialize called"); + logger.trace("initialize called"); StartupCommand cmd = new StartupCommand(getType()); cmd.setName(_name); cmd.setGuid(_guid); @@ -197,7 +195,7 @@ public class GloboDnsResource extends ManagerBase implements ServerResource { if (!cmd.isOverride()) { for (Record record : _globoDns.getRecordAPI().listAll(domain.getId())) { if (record.getTypeNSRecordAttributes().getId() == null) { - s_logger.warn("There are records in domain " + cmd.getNetworkDomain() + " and override is not enable. I will not delete this domain."); + logger.warn("There are records in domain " + cmd.getNetworkDomain() + " and override is not enable. I will not delete this domain."); return new Answer(cmd, true, "Domain keeped"); } } @@ -205,7 +203,7 @@ public class GloboDnsResource extends ManagerBase implements ServerResource { _globoDns.getDomainAPI().removeDomain(domain.getId()); scheduleExportChangesToBind(); } else { - s_logger.warn("Domain " + cmd.getNetworkDomain() + " already been deleted."); + logger.warn("Domain " + cmd.getNetworkDomain() + " already been deleted."); } return new Answer(cmd, true, "Domain removed"); @@ -246,7 +244,7 @@ public class GloboDnsResource extends ManagerBase implements ServerResource { Domain domain = searchDomain(cmd.getNetworkDomain(), false); if (domain == null) { domain = _globoDns.getDomainAPI().createDomain(cmd.getNetworkDomain(), cmd.getReverseTemplateId(), DEFAULT_AUTHORITY_TYPE); - s_logger.warn("Domain " + cmd.getNetworkDomain() + " doesn't exist, maybe someone removed it. It was automatically created with template " + logger.warn("Domain " + cmd.getNetworkDomain() + " doesn't exist, maybe someone removed it. It was automatically created with template " + cmd.getReverseTemplateId()); } @@ -287,7 +285,7 @@ public class GloboDnsResource extends ManagerBase implements ServerResource { Domain reverseDomain = searchDomain(reverseDomainName, true); if (reverseDomain == null) { reverseDomain = _globoDns.getDomainAPI().createReverseDomain(reverseDomainName, templateId, DEFAULT_AUTHORITY_TYPE); - s_logger.info("Created reverse domain " + reverseDomainName + " with template " + templateId); + logger.info("Created reverse domain " + reverseDomainName + " with template " + templateId); } // create reverse @@ -303,14 +301,14 @@ public class GloboDnsResource extends ManagerBase implements ServerResource { if (domain == null) { // create domain = _globoDns.getDomainAPI().createDomain(cmd.getDomainName(), cmd.getTemplateId(), DEFAULT_AUTHORITY_TYPE); - s_logger.info("Created domain " + cmd.getDomainName() + " with template " + cmd.getTemplateId()); + logger.info("Created domain " + cmd.getDomainName() + " with template " + cmd.getTemplateId()); if (domain == null) { return new Answer(cmd, false, "Unable to create domain " + cmd.getDomainName()); } else { needsExport = true; } } else { - s_logger.warn("Domain " + cmd.getDomainName() + " already exist."); + logger.warn("Domain " + cmd.getDomainName() + " already exist."); } return new Answer(cmd); } catch (GloboDnsException e) { @@ -331,16 +329,16 @@ public class GloboDnsResource extends ManagerBase implements ServerResource { protected boolean removeRecord(String recordName, String recordValue, String bindZoneName, boolean reverse, boolean override) { Domain domain = searchDomain(bindZoneName, reverse); if (domain == null) { - s_logger.warn("Domain " + bindZoneName + " doesn't exists in GloboDNS. Record " + recordName + " has already been removed."); + logger.warn("Domain " + bindZoneName + " doesn't exists in GloboDNS. Record " + recordName + " has already been removed."); return false; } Record record = searchRecord(recordName, domain.getId()); if (record == null) { - s_logger.warn("Record " + recordName + " in domain " + bindZoneName + " has already been removed."); + logger.warn("Record " + recordName + " in domain " + bindZoneName + " has already been removed."); return false; } else { if (!override && !record.getContent().equals(recordValue)) { - s_logger.warn("Record " + recordName + " in domain " + bindZoneName + " have different value from " + recordValue + logger.warn("Record " + recordName + " in domain " + bindZoneName + " have different value from " + recordValue + " and override is not enable. I will not delete it."); return false; } @@ -363,7 +361,7 @@ public class GloboDnsResource extends ManagerBase implements ServerResource { if (record == null) { // Create new record record = _globoDns.getRecordAPI().createRecord(domainId, name, ip, type); - s_logger.info("Created record " + record.getName() + " in domain " + domainId); + logger.info("Created record " + record.getName() + " in domain " + domainId); } else { if (!ip.equals(record.getContent())) { if (Boolean.TRUE.equals(override)) { @@ -384,10 +382,10 @@ public class GloboDnsResource extends ManagerBase implements ServerResource { try { Export export = _globoDns.getExportAPI().scheduleExport(); if (export != null) { - s_logger.info("GloboDns Export: " + export.getResult()); + logger.info("GloboDns Export: " + export.getResult()); } } catch (GloboDnsException e) { - s_logger.warn("Error on scheduling export. Although everything was persist, someone need to manually force export in GloboDns", e); + logger.warn("Error on scheduling export. Although everything was persist, someone need to manually force export in GloboDns", e); } } @@ -428,11 +426,11 @@ public class GloboDnsResource extends ManagerBase implements ServerResource { // GloboDns search name in name and content. We need to iterate to check if recordName exists only in name for (Record candidate : candidates) { if (recordName.equalsIgnoreCase(candidate.getName())) { - s_logger.debug("Record " + recordName + " in domain id " + domainId + " found in GloboDNS"); + logger.debug("Record " + recordName + " in domain id " + domainId + " found in GloboDNS"); return candidate; } } - s_logger.debug("Record " + recordName + " in domain id " + domainId + " not found in GloboDNS"); + logger.debug("Record " + recordName + " in domain id " + domainId + " not found in GloboDNS"); return null; } diff --git a/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java b/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java index fcdbfb75cee..eb2b4b7b959 100644 --- a/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java +++ b/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java @@ -27,8 +27,6 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; - import org.apache.cloudstack.api.command.admin.internallb.ConfigureInternalLoadBalancerElementCmd; import org.apache.cloudstack.api.command.admin.internallb.CreateInternalLoadBalancerElementCmd; import org.apache.cloudstack.api.command.admin.internallb.ListInternalLoadBalancerElementsCmd; @@ -57,6 +55,7 @@ import com.cloud.network.VirtualRouterProvider.Type; import com.cloud.network.dao.NetworkServiceMapDao; import com.cloud.network.dao.PhysicalNetworkServiceProviderDao; import com.cloud.network.dao.VirtualRouterProviderDao; +import com.cloud.network.element.HAProxyLBRule; import com.cloud.network.element.IpDeployer; import com.cloud.network.element.LoadBalancingServiceProvider; import com.cloud.network.element.NetworkElement; @@ -86,7 +85,6 @@ import com.cloud.vm.dao.DomainRouterDao; @Local(value = {NetworkElement.class}) public class InternalLoadBalancerElement extends AdapterBase implements LoadBalancingServiceProvider, InternalLoadBalancerElementService, IpDeployer { - private static final Logger s_logger = Logger.getLogger(InternalLoadBalancerElement.class); protected static final Map> capabilities = setCapabilities(); private static InternalLoadBalancerElement internalLbElement = null; @@ -110,6 +108,8 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala ApplicationLoadBalancerRuleDao _appLbDao; @Inject EntityManager _entityMgr; + @Inject + private HAProxyLBRule haProxyLBRule; protected InternalLoadBalancerElement() { } @@ -125,11 +125,11 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala //works in Advance zone only DataCenter dc = _entityMgr.findById(DataCenter.class, config.getDataCenterId()); if (dc.getNetworkType() != NetworkType.Advanced) { - s_logger.trace("Not hanling zone of network type " + dc.getNetworkType()); + logger.trace("Not hanling zone of network type " + dc.getNetworkType()); return false; } if (config.getGuestType() != Network.GuestType.Isolated || config.getTrafficType() != TrafficType.Guest) { - s_logger.trace("Not handling network with Type " + config.getGuestType() + " and traffic type " + config.getTrafficType()); + logger.trace("Not handling network with Type " + config.getGuestType() + " and traffic type " + config.getTrafficType()); return false; } @@ -138,14 +138,14 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala String schemeCaps = lbCaps.get(Capability.LbSchemes); if (schemeCaps != null && lbScheme != null) { if (!schemeCaps.contains(lbScheme.toString())) { - s_logger.debug("Scheme " + lbScheme.toString() + " is not supported by the provider " + getName()); + logger.debug("Scheme " + lbScheme.toString() + " is not supported by the provider " + getName()); return false; } } } if (!_ntwkModel.isProviderSupportServiceInNetwork(config.getId(), Service.Lb, getProvider())) { - s_logger.trace("Element " + getProvider().getName() + " doesn't support service " + Service.Lb + " in the network " + config); + logger.trace("Element " + getProvider().getName() + " doesn't support service " + Service.Lb + " in the network " + config); return false; } return true; @@ -163,10 +163,10 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, - ResourceUnavailableException, InsufficientCapacityException { + ResourceUnavailableException, InsufficientCapacityException { if (!canHandle(network, null)) { - s_logger.trace("No need to implement " + getName()); + logger.trace("No need to implement " + getName()); return true; } @@ -175,10 +175,10 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala @Override public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) - throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { + throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { if (!canHandle(network, null)) { - s_logger.trace("No need to prepare " + getName()); + logger.trace("No need to prepare " + getName()); return true; } @@ -197,16 +197,16 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala Ip sourceIp = new Ip(ip); long active = _appLbDao.countActiveBySourceIp(sourceIp, network.getId()); if (active > 0) { - s_logger.debug("Have to implement internal lb vm for source ip " + sourceIp + " as a part of network " + network + " implement as there are " + active + - " internal lb rules exist for this ip"); + logger.debug("Have to implement internal lb vm for source ip " + sourceIp + " as a part of network " + network + " implement as there are " + active + + " internal lb rules exist for this ip"); List internalLbVms; try { internalLbVms = _internalLbMgr.deployInternalLbVm(network, sourceIp, dest, _accountMgr.getAccount(network.getAccountId()), null); } catch (InsufficientCapacityException e) { - s_logger.warn("Failed to deploy element " + getName() + " for ip " + sourceIp + " due to:", e); + logger.warn("Failed to deploy element " + getName() + " for ip " + sourceIp + " due to:", e); return false; } catch (ConcurrentOperationException e) { - s_logger.warn("Failed to deploy element " + getName() + " for ip " + sourceIp + " due to:", e); + logger.warn("Failed to deploy element " + getName() + " for ip " + sourceIp + " due to:", e); return false; } @@ -221,7 +221,7 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala @Override public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm, ReservationContext context) throws ConcurrentOperationException, - ResourceUnavailableException { + ResourceUnavailableException { return true; } @@ -236,11 +236,11 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala result = result && _internalLbMgr.destroyInternalLbVm(internalLbVm.getId(), context.getAccount(), context.getCaller().getId()); if (cleanup) { if (!result) { - s_logger.warn("Failed to stop internal lb element " + internalLbVm + ", but would try to process clean up anyway."); + logger.warn("Failed to stop internal lb element " + internalLbVm + ", but would try to process clean up anyway."); } result = (_internalLbMgr.destroyInternalLbVm(internalLbVm.getId(), context.getAccount(), context.getCaller().getId())); if (!result) { - s_logger.warn("Failed to clean up internal lb element " + internalLbVm); + logger.warn("Failed to clean up internal lb element " + internalLbVm); } } } @@ -271,7 +271,7 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala @Override public boolean shutdownProviderInstances(PhysicalNetworkServiceProvider provider, ReservationContext context) throws ConcurrentOperationException, - ResourceUnavailableException { + ResourceUnavailableException { VirtualRouterProviderVO element = _vrProviderDao.findByNspIdAndType(provider.getId(), Type.InternalLbVm); if (element == null) { return true; @@ -309,7 +309,7 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala //2) Get rules to apply Map> rulesToApply = getLbRulesToApply(rules); - s_logger.debug("Applying " + rulesToApply.size() + " on element " + getName()); + logger.debug("Applying " + rulesToApply.size() + " on element " + getName()); for (Ip sourceIp : rulesToApply.keySet()) { if (vmsToDestroy.contains(sourceIp)) { @@ -318,11 +318,11 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala if (vms.size() > 0) { //only one internal lb per IP exists try { - s_logger.debug("Destroying internal lb vm for ip " + sourceIp.addr() + " as all the rules for this vm are in Revoke state"); + logger.debug("Destroying internal lb vm for ip " + sourceIp.addr() + " as all the rules for this vm are in Revoke state"); return _internalLbMgr.destroyInternalLbVm(vms.get(0).getId(), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM), - _accountMgr.getUserIncludingRemoved(User.UID_SYSTEM).getId()); + _accountMgr.getUserIncludingRemoved(User.UID_SYSTEM).getId()); } catch (ConcurrentOperationException e) { - s_logger.warn("Failed to apply lb rule(s) for ip " + sourceIp.addr() + " on the element " + getName() + " due to:", e); + logger.warn("Failed to apply lb rule(s) for ip " + sourceIp.addr() + " on the element " + getName() + " due to:", e); return false; } } @@ -333,10 +333,10 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala DeployDestination dest = new DeployDestination(_entityMgr.findById(DataCenter.class, network.getDataCenterId()), null, null, null); internalLbVms = _internalLbMgr.deployInternalLbVm(network, sourceIp, dest, _accountMgr.getAccount(network.getAccountId()), null); } catch (InsufficientCapacityException e) { - s_logger.warn("Failed to apply lb rule(s) for ip " + sourceIp.addr() + "on the element " + getName() + " due to:", e); + logger.warn("Failed to apply lb rule(s) for ip " + sourceIp.addr() + "on the element " + getName() + " due to:", e); return false; } catch (ConcurrentOperationException e) { - s_logger.warn("Failed to apply lb rule(s) for ip " + sourceIp.addr() + "on the element " + getName() + " due to:", e); + logger.warn("Failed to apply lb rule(s) for ip " + sourceIp.addr() + "on the element " + getName() + " due to:", e); return false; } @@ -347,7 +347,7 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala //2.3 Apply Internal LB rules on the VM if (!_internalLbMgr.applyLoadBalancingRules(network, rulesToApply.get(sourceIp), internalLbVms)) { throw new CloudRuntimeException("Failed to apply load balancing rules for ip " + sourceIp.addr() + " in network " + network.getId() + " on element " + - getName()); + getName()); } } } @@ -372,7 +372,7 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala //2) Check if there are non revoked rules for the source ip address List rulesToCheck = groupedRules.get(sourceIp); if (_appLbDao.countBySourceIpAndNotRevoked(sourceIp, rulesToCheck.get(0).getNetworkId()) == 0) { - s_logger.debug("Have to destroy internal lb vm for source ip " + sourceIp + " as it has 0 rules in non-Revoke state"); + logger.debug("Have to destroy internal lb vm for source ip " + sourceIp + " as it has 0 rules in non-Revoke state"); vmsToDestroy.add(sourceIp); } } @@ -395,7 +395,7 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala rulesToApply.add(rule); groupedRules.put(sourceIp, rulesToApply); } else { - s_logger.debug("Internal lb rule " + rule + " doesn't have any vms assigned, skipping"); + logger.debug("Internal lb rule " + rule + " doesn't have any vms assigned, skipping"); } } return groupedRules; @@ -410,7 +410,7 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala if (routers == null || routers.isEmpty()) { return true; } - return VirtualRouterElement.validateHAProxyLBRule(rule); + return haProxyLBRule.validateHAProxyLBRule(rule); } return true; } @@ -449,7 +449,7 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala VirtualRouterProviderVO element = _vrProviderDao.findById(id); if (element == null || element.getType() != Type.InternalLbVm) { throw new InvalidParameterValueException("Can't find " + getName() + " element with network service provider id " + id + " to be used as a provider for " + - getName()); + getName()); } element.setEnabled(enable); @@ -462,7 +462,7 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala public VirtualRouterProvider addInternalLoadBalancerElement(long ntwkSvcProviderId) { VirtualRouterProviderVO element = _vrProviderDao.findByNspIdAndType(ntwkSvcProviderId, Type.InternalLbVm); if (element != null) { - s_logger.debug("There is already an " + getName() + " with service provider id " + ntwkSvcProviderId); + logger.debug("There is already an " + getName() + " with service provider id " + ntwkSvcProviderId); return null; } diff --git a/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java b/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java index 88ab512dbe9..59f07f664cb 100644 --- a/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java +++ b/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationSe import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.lb.ApplicationLoadBalancerRuleVO; import org.apache.cloudstack.lb.dao.ApplicationLoadBalancerRuleDao; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -121,7 +120,6 @@ import com.cloud.vm.dao.NicDao; @Local(value = {InternalLoadBalancerVMManager.class, InternalLoadBalancerVMService.class}) public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements InternalLoadBalancerVMManager, InternalLoadBalancerVMService, VirtualMachineGuru { - private static final Logger s_logger = Logger.getLogger(InternalLoadBalancerVMManagerImpl.class); static final private String InternalLbVmNamePrefix = "b"; private String _instance; @@ -206,13 +204,13 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In controlNic = nic; // Internal LB control command is sent over management server in VMware if (dest.getHost().getHypervisorType() == HypervisorType.VMware) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Check if we need to add management server explicit route to Internal LB. pod cidr: " + dest.getPod().getCidrAddress() + "/" + + if (logger.isInfoEnabled()) { + logger.info("Check if we need to add management server explicit route to Internal LB. pod cidr: " + dest.getPod().getCidrAddress() + "/" + dest.getPod().getCidrSize() + ", pod gateway: " + dest.getPod().getGateway() + ", management host: " + _mgmtHost); } - if (s_logger.isInfoEnabled()) { - s_logger.info("Add management server explicit route to Internal LB."); + if (logger.isInfoEnabled()) { + logger.info("Add management server explicit route to Internal LB."); } buf.append(" mgmtcidr=").append(_mgmtCidr); @@ -235,8 +233,8 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In final String type = "ilbvm"; buf.append(" type=" + type); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Boot Args for " + profile + ": " + buf.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Boot Args for " + profile + ": " + buf.toString()); } return true; @@ -271,7 +269,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In if (answer != null && answer instanceof CheckSshAnswer) { final CheckSshAnswer sshAnswer = (CheckSshAnswer)answer; if (sshAnswer == null || !sshAnswer.getResult()) { - s_logger.warn("Unable to ssh to the internal LB VM: " + sshAnswer.getDetails()); + logger.warn("Unable to ssh to the internal LB VM: " + sshAnswer.getDetails()); result = false; } } else { @@ -295,7 +293,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In if (answer != null && answer instanceof GetDomRVersionAnswer) { final GetDomRVersionAnswer versionAnswer = (GetDomRVersionAnswer)answer; if (answer == null || !answer.getResult()) { - s_logger.warn("Unable to get the template/scripts version of internal LB VM " + internalLbVm.getInstanceName() + " due to: " + versionAnswer.getDetails()); + logger.warn("Unable to get the template/scripts version of internal LB VM " + internalLbVm.getInstanceName() + " due to: " + versionAnswer.getDetails()); result = false; } else { internalLbVm.setTemplateVersion(versionAnswer.getTemplateVersion()); @@ -315,7 +313,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In final NicProfile controlNic = getNicProfileByTrafficType(profile, TrafficType.Control); if (controlNic == null) { - s_logger.error("Control network doesn't exist for the internal LB vm " + internalLbVm); + logger.error("Control network doesn't exist for the internal LB vm " + internalLbVm); return false; } @@ -375,7 +373,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In if (off != null) { _internalLbVmOfferingId = off.getId(); } else { - s_logger.warn("Invalid offering UUID is passed in " + Config.InternalLbVmServiceOfferingId.key() + "; the default offering will be used instead"); + logger.warn("Invalid offering UUID is passed in " + Config.InternalLbVmServiceOfferingId.key() + "; the default offering will be used instead"); } } @@ -387,15 +385,15 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In Storage.ProvisioningType.THIN, true, null, true, VirtualMachine.Type.InternalLoadBalancerVm, true); if (offerings == null || offerings.size() < 2) { String msg = "Data integrity problem : System Offering For Internal LB VM has been removed?"; - s_logger.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } } _itMgr.registerGuru(VirtualMachine.Type.InternalLoadBalancerVm, this); - if (s_logger.isInfoEnabled()) { - s_logger.info(getName() + " has been configured"); + if (logger.isInfoEnabled()) { + logger.info(getName() + " has been configured"); } return true; @@ -426,7 +424,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In } protected void finalizeLbRulesForIp(final Commands cmds, final DomainRouterVO internalLbVm, final Provider provider, final Ip sourceIp, final long guestNtwkId) { - s_logger.debug("Resending load balancing rules as a part of start for " + internalLbVm); + logger.debug("Resending load balancing rules as a part of start for " + internalLbVm); final List lbs = _lbDao.listBySrcIpSrcNtwkId(sourceIp, guestNtwkId); final List lbRules = new ArrayList(); if (_ntwkModel.isProviderSupportServiceInNetwork(guestNtwkId, Service.Lb, provider)) { @@ -440,7 +438,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In } } - s_logger.debug("Found " + lbRules.size() + " load balancing rule(s) to apply as a part of Intenrnal LB vm" + internalLbVm + " start."); + logger.debug("Found " + lbRules.size() + " load balancing rule(s) to apply as a part of Intenrnal LB vm" + internalLbVm + " start."); if (!lbRules.isEmpty()) { createApplyLoadBalancingRulesCommands(lbRules, internalLbVm, cmds, guestNtwkId); } @@ -507,7 +505,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In } if (controlIpAddress == null) { - s_logger.warn("Unable to find Internal LB control ip in its attached NICs!. Internal LB vm: " + internalLbVmId); + logger.warn("Unable to find Internal LB control ip in its attached NICs!. Internal LB vm: " + internalLbVmId); final DomainRouterVO internalLbVm = _internalLbVmDao.findById(internalLbVmId); return internalLbVm.getPrivateIpAddress(); } @@ -517,8 +515,8 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In @Override public boolean destroyInternalLbVm(final long vmId, final Account caller, final Long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Attempting to destroy Internal LB vm " + vmId); + if (logger.isDebugEnabled()) { + logger.debug("Attempting to destroy Internal LB vm " + vmId); } final DomainRouterVO internalLbVm = _internalLbVmDao.findById(vmId); @@ -548,7 +546,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In protected VirtualRouter stopInternalLbVm(final DomainRouterVO internalLbVm, final boolean forced, final Account caller, final long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException { - s_logger.debug("Stopping internal lb vm " + internalLbVm); + logger.debug("Stopping internal lb vm " + internalLbVm); try { _itMgr.advanceStop(internalLbVm.getUuid(), forced); return _internalLbVmDao.findById(internalLbVm.getId()); @@ -573,7 +571,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In if (internalLbVms != null) { runningInternalLbVms = new ArrayList(); } else { - s_logger.debug("Have no internal lb vms to start"); + logger.debug("Have no internal lb vms to start"); return null; } @@ -599,8 +597,8 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In throw new ConcurrentOperationException("Unable to lock network " + guestNetwork.getId()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Lock is acquired for network id " + lock.getId() + " as a part of internal lb startup in " + dest); + if (logger.isDebugEnabled()) { + logger.debug("Lock is acquired for network id " + lock.getId() + " as a part of internal lb startup in " + dest); } final long internalLbProviderId = getInternalLbProviderId(guestNetwork); @@ -616,7 +614,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In final DeploymentPlan plan = planAndInternalLbVms.first(); if (internalLbVms.size() > 0) { - s_logger.debug("Found " + internalLbVms.size() + " internal lb vms for the requested IP " + requestedGuestIp.addr()); + logger.debug("Found " + internalLbVms.size() + " internal lb vms for the requested IP " + requestedGuestIp.addr()); return internalLbVms; } @@ -636,8 +634,8 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In } finally { if (lock != null) { _networkDao.releaseFromLockTable(lock.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Lock is released for network id " + lock.getId() + " as a part of internal lb vm startup in " + dest); + if (logger.isDebugEnabled()) { + logger.debug("Lock is released for network id " + lock.getId() + " as a part of internal lb vm startup in " + dest); } } } @@ -669,7 +667,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In //1) Guest network - default if (guestNetwork != null) { - s_logger.debug("Adding nic for Internal LB in Guest network " + guestNetwork); + logger.debug("Adding nic for Internal LB in Guest network " + guestNetwork); final NicProfile guestNic = new NicProfile(); if (guestIp != null) { guestNic.setIPv4Address(guestIp.addr()); @@ -688,7 +686,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In } //2) Control network - s_logger.debug("Adding nic for Internal LB vm in Control network "); + logger.debug("Adding nic for Internal LB vm in Control network "); final List offerings = _ntwkModel.getSystemAccountNetworkOfferings(NetworkOffering.SystemControlNetwork); final NetworkOffering controlOffering = offerings.get(0); final Network controlConfig = _ntwkMgr.setupNetwork(_accountMgr.getSystemAccount(), controlOffering, plan, null, null, false).get(0); @@ -740,8 +738,8 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In final HypervisorType hType = iter.next(); try { final long id = _internalLbVmDao.getNextInSequence(Long.class, "id"); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating the internal lb vm " + id + " in datacenter " + dest.getDataCenter() + " with hypervisor type " + hType); + if (logger.isDebugEnabled()) { + logger.debug("Creating the internal lb vm " + id + " in datacenter " + dest.getDataCenter() + " with hypervisor type " + hType); } String templateName = null; switch (hType) { @@ -766,7 +764,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In final VMTemplateVO template = _templateDao.findRoutingTemplate(hType, templateName); if (template == null) { - s_logger.debug(hType + " won't support system vm, skip it"); + logger.debug(hType + " won't support system vm, skip it"); continue; } @@ -784,7 +782,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In internalLbVm = _internalLbVmDao.findById(internalLbVm.getId()); } catch (final InsufficientCapacityException ex) { if (allocateRetry < 2 && iter.hasNext()) { - s_logger.debug("Failed to allocate the Internal lb vm with hypervisor type " + hType + ", retrying one more time"); + logger.debug("Failed to allocate the Internal lb vm with hypervisor type " + hType + ", retrying one more time"); continue; } else { throw ex; @@ -799,7 +797,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In break; } catch (final InsufficientCapacityException ex) { if (startRetry < 2 && iter.hasNext()) { - s_logger.debug("Failed to start the Internal lb vm " + internalLbVm + " with hypervisor type " + hType + ", " + + logger.debug("Failed to start the Internal lb vm " + internalLbVm + " with hypervisor type " + hType + ", " + "destroying it and recreating one more time"); // destroy the internal lb vm destroyInternalLbVm(internalLbVm.getId(), _accountMgr.getSystemAccount(), User.UID_SYSTEM); @@ -820,10 +818,10 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In protected DomainRouterVO startInternalLbVm(DomainRouterVO internalLbVm, final Account caller, final long callerUserId, final Map params) throws StorageUnavailableException, InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("Starting Internal LB VM " + internalLbVm); + logger.debug("Starting Internal LB VM " + internalLbVm); _itMgr.start(internalLbVm.getUuid(), params, null, null); if (internalLbVm.isStopPending()) { - s_logger.info("Clear the stop pending flag of Internal LB VM " + internalLbVm.getHostName() + " after start router successfully!"); + logger.info("Clear the stop pending flag of Internal LB VM " + internalLbVm.getHostName() + " after start router successfully!"); internalLbVm.setStopPending(false); internalLbVm = _internalLbVmDao.persist(internalLbVm); } @@ -862,10 +860,10 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In public boolean applyLoadBalancingRules(final Network network, final List rules, final List internalLbVms) throws ResourceUnavailableException { if (rules == null || rules.isEmpty()) { - s_logger.debug("No lb rules to be applied for network " + network); + logger.debug("No lb rules to be applied for network " + network); return true; } - s_logger.info("lb rules to be applied for network "); + logger.info("lb rules to be applied for network "); //only one internal lb vm is supported per ip address at this time if (internalLbVms == null || internalLbVms.isEmpty()) { throw new CloudRuntimeException("Can't apply the lb rules on network " + network + " as the list of internal lb vms is empty"); @@ -875,10 +873,10 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In if (lbVm.getState() == State.Running) { return sendLBRules(lbVm, rules, network.getId()); } else if (lbVm.getState() == State.Stopped || lbVm.getState() == State.Stopping) { - s_logger.debug("Internal LB VM " + lbVm.getInstanceName() + " is in " + lbVm.getState() + ", so not sending apply lb rules commands to the backend"); + logger.debug("Internal LB VM " + lbVm.getInstanceName() + " is in " + lbVm.getState() + ", so not sending apply lb rules commands to the backend"); return true; } else { - s_logger.warn("Unable to apply lb rules, Internal LB VM is not in the right state " + lbVm.getState()); + logger.warn("Unable to apply lb rules, Internal LB VM is not in the right state " + lbVm.getState()); throw new ResourceUnavailableException("Unable to apply lb rules; Internal LB VM is not in the right state", DataCenter.class, lbVm.getDataCenterId()); } } @@ -894,7 +892,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In try { answers = _agentMgr.send(internalLbVm.getHostId(), cmds); } catch (final OperationTimedoutException e) { - s_logger.warn("Timed Out", e); + logger.warn("Timed Out", e); throw new AgentUnavailableException("Unable to send commands to virtual router ", internalLbVm.getHostId(), e); } diff --git a/plugins/network-elements/internal-loadbalancer/test/resources/lb_element.xml b/plugins/network-elements/internal-loadbalancer/test/resources/lb_element.xml index 5dec9c314f6..6be23c8979f 100644 --- a/plugins/network-elements/internal-loadbalancer/test/resources/lb_element.xml +++ b/plugins/network-elements/internal-loadbalancer/test/resources/lb_element.xml @@ -42,5 +42,5 @@ - + diff --git a/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ContrailElementImpl.java b/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ContrailElementImpl.java index 7f564098755..85755e5951b 100644 --- a/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ContrailElementImpl.java +++ b/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ContrailElementImpl.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.network.contrail.model.InstanceIpModel; import org.apache.cloudstack.network.contrail.model.VMInterfaceModel; import org.apache.cloudstack.network.contrail.model.VirtualMachineModel; import org.apache.cloudstack.network.contrail.model.VirtualNetworkModel; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.deploy.DeployDestination; @@ -85,7 +84,6 @@ public class ContrailElementImpl extends AdapterBase NicDao _nicDao; @Inject ServerDBSync _dbSync; - private static final Logger s_logger = Logger.getLogger(ContrailElement.class); // PluggableService @Override @@ -122,9 +120,9 @@ public class ContrailElementImpl extends AdapterBase @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - s_logger.debug("NetworkElement implement: " + network.getName() + ", traffic type: " + network.getTrafficType()); + logger.debug("NetworkElement implement: " + network.getName() + ", traffic type: " + network.getTrafficType()); if (network.getTrafficType() == TrafficType.Guest) { - s_logger.debug("ignore network " + network.getName()); + logger.debug("ignore network " + network.getName()); return true; } VirtualNetworkModel vnModel = _manager.getDatabase().lookupVirtualNetwork(network.getUuid(), _manager.getCanonicalName(network), network.getTrafficType()); @@ -139,7 +137,7 @@ public class ContrailElementImpl extends AdapterBase } _manager.getDatabase().getVirtualNetworks().add(vnModel); } catch (Exception ex) { - s_logger.warn("virtual-network update: ", ex); + logger.warn("virtual-network update: ", ex); } return true; } @@ -148,14 +146,14 @@ public class ContrailElementImpl extends AdapterBase public boolean prepare(Network network, NicProfile nicProfile, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - s_logger.debug("NetworkElement prepare: " + network.getName() + ", traffic type: " + network.getTrafficType()); + logger.debug("NetworkElement prepare: " + network.getName() + ", traffic type: " + network.getTrafficType()); if (network.getTrafficType() == TrafficType.Guest) { - s_logger.debug("ignore network " + network.getName()); + logger.debug("ignore network " + network.getName()); return true; } - s_logger.debug("network: " + network.getId()); + logger.debug("network: " + network.getId()); VirtualNetworkModel vnModel = _manager.getDatabase().lookupVirtualNetwork(network.getUuid(), _manager.getCanonicalName(network), network.getTrafficType()); @@ -185,7 +183,7 @@ public class ContrailElementImpl extends AdapterBase try { vmiModel.build(_manager.getModelController(), (VMInstanceVO)vm.getVirtualMachine(), nic); } catch (IOException ex) { - s_logger.warn("vm interface set", ex); + logger.warn("vm interface set", ex); return false; } @@ -199,7 +197,7 @@ public class ContrailElementImpl extends AdapterBase try { vmModel.update(_manager.getModelController()); } catch (Exception ex) { - s_logger.warn("virtual-machine-update", ex); + logger.warn("virtual-machine-update", ex); return false; } _manager.getDatabase().getVirtualMachines().add(vmModel); @@ -213,7 +211,7 @@ public class ContrailElementImpl extends AdapterBase if (network.getTrafficType() == TrafficType.Guest) { return true; } else if (!_manager.isManagedPhysicalNetwork(network)) { - s_logger.debug("release ignore network " + network.getId()); + logger.debug("release ignore network " + network.getId()); return true; } @@ -222,7 +220,7 @@ public class ContrailElementImpl extends AdapterBase VirtualMachineModel vmModel = _manager.getDatabase().lookupVirtualMachine(vm.getUuid()); if (vmModel == null) { - s_logger.debug("vm " + vm.getInstanceName() + " not in local database"); + logger.debug("vm " + vm.getInstanceName() + " not in local database"); return true; } VMInterfaceModel vmiModel = vmModel.getVMInterface(nic.getUuid()); @@ -230,7 +228,7 @@ public class ContrailElementImpl extends AdapterBase try { vmiModel.destroy(_manager.getModelController()); } catch (IOException ex) { - s_logger.warn("virtual-machine-interface delete", ex); + logger.warn("virtual-machine-interface delete", ex); } vmModel.removeSuccessor(vmiModel); } @@ -252,7 +250,7 @@ public class ContrailElementImpl extends AdapterBase */ @Override public boolean shutdown(Network network, ReservationContext context, boolean cleanup) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("NetworkElement shutdown"); + logger.debug("NetworkElement shutdown"); return true; } @@ -261,7 +259,7 @@ public class ContrailElementImpl extends AdapterBase */ @Override public boolean destroy(Network network, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("NetworkElement destroy"); + logger.debug("NetworkElement destroy"); return true; } @@ -275,11 +273,11 @@ public class ContrailElementImpl extends AdapterBase List systemNets = _manager.findSystemNetworks(types); if (systemNets != null && !systemNets.isEmpty()) { for (NetworkVO net: systemNets) { - s_logger.debug("update system network service: " + net.getName() + "; service provider: " + serviceMap); + logger.debug("update system network service: " + net.getName() + "; service provider: " + serviceMap); _networksDao.update(net.getId(), net, serviceMap); } } else { - s_logger.debug("no system networks created yet"); + logger.debug("no system networks created yet"); } serviceMap = ((ConfigurationServerImpl)_configServer).getServicesAndProvidersForNetwork( _manager.getPublicRouterOffering().getId()); types = new ArrayList(); @@ -287,11 +285,11 @@ public class ContrailElementImpl extends AdapterBase systemNets = _manager.findSystemNetworks(types); if (systemNets != null && !systemNets.isEmpty()) { for (NetworkVO net: systemNets) { - s_logger.debug("update system network service: " + net.getName() + "; service provider: " + serviceMap); + logger.debug("update system network service: " + net.getName() + "; service provider: " + serviceMap); _networksDao.update(net.getId(), net, serviceMap); } } else { - s_logger.debug("no system networks created yet"); + logger.debug("no system networks created yet"); } return true; } @@ -299,7 +297,7 @@ public class ContrailElementImpl extends AdapterBase @Override public boolean shutdownProviderInstances(PhysicalNetworkServiceProvider provider, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("NetworkElement shutdown ProviderInstances"); + logger.debug("NetworkElement shutdown ProviderInstances"); return true; } @@ -311,8 +309,8 @@ public class ContrailElementImpl extends AdapterBase @Override public boolean verifyServicesCombination(Set services) { // TODO Auto-generated method stub - s_logger.debug("NetworkElement verifyServices"); - s_logger.debug("Services: " + services); + logger.debug("NetworkElement verifyServices"); + logger.debug("Services: " + services); return true; } @@ -330,11 +328,11 @@ public class ContrailElementImpl extends AdapterBase } if (isFloatingIpCreate(ip)) { if (_manager.createFloatingIp(ip)) { - s_logger.debug("Successfully created floating ip: " + ip.getAddress().addr()); + logger.debug("Successfully created floating ip: " + ip.getAddress().addr()); } } else { if (_manager.deleteFloatingIp(ip)) { - s_logger.debug("Successfully deleted floating ip: " + ip.getAddress().addr()); + logger.debug("Successfully deleted floating ip: " + ip.getAddress().addr()); } } } diff --git a/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ContrailGuru.java b/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ContrailGuru.java index 8a86bc85363..70e7eeb31a8 100644 --- a/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ContrailGuru.java +++ b/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ContrailGuru.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.network.contrail.model.InstanceIpModel; import org.apache.cloudstack.network.contrail.model.VMInterfaceModel; import org.apache.cloudstack.network.contrail.model.VirtualMachineModel; import org.apache.cloudstack.network.contrail.model.VirtualNetworkModel; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter.NetworkType; @@ -91,7 +90,6 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { @Inject DataCenterDao _dcDao; - private static final Logger s_logger = Logger.getLogger(ContrailGuru.class); private static final TrafficType[] TrafficTypes = {TrafficType.Guest}; private boolean canHandle(NetworkOffering offering, NetworkType networkType, PhysicalNetwork physicalNetwork) { @@ -126,7 +124,7 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { PhysicalNetworkVO physnet = _physicalNetworkDao.findById(plan.getPhysicalNetworkId()); DataCenter dc = _dcDao.findById(plan.getDataCenterId()); if (!canHandle(offering, dc.getNetworkType(),physnet)) { - s_logger.debug("Refusing to design this network"); + logger.debug("Refusing to design this network"); return null; } NetworkVO network = @@ -136,14 +134,14 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { network.setCidr(userSpecified.getCidr()); network.setGateway(userSpecified.getGateway()); } - s_logger.debug("Allocated network " + userSpecified.getName() + (network.getCidr() == null ? "" : " subnet: " + network.getCidr())); + logger.debug("Allocated network " + userSpecified.getName() + (network.getCidr() == null ? "" : " subnet: " + network.getCidr())); return network; } @Override public Network implement(Network network, NetworkOffering offering, DeployDestination destination, ReservationContext context) throws InsufficientVirtualNetworkCapacityException { - s_logger.debug("Implement network: " + network.getName() + ", traffic type: " + network.getTrafficType()); + logger.debug("Implement network: " + network.getName() + ", traffic type: " + network.getTrafficType()); VirtualNetworkModel vnModel = _manager.getDatabase().lookupVirtualNetwork(network.getUuid(), _manager.getCanonicalName(network), network.getTrafficType()); if (vnModel == null) { @@ -156,7 +154,7 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { vnModel.update(_manager.getModelController()); } } catch (Exception ex) { - s_logger.warn("virtual-network update: ", ex); + logger.warn("virtual-network update: ", ex); return network; } _manager.getDatabase().getVirtualNetworks().add(vnModel); @@ -164,7 +162,7 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { if (network.getVpcId() != null) { List ips = _ipAddressDao.listByAssociatedVpc(network.getVpcId(), true); if (ips.isEmpty()) { - s_logger.debug("Creating a source nat ip for network " + network); + logger.debug("Creating a source nat ip for network " + network); Account owner = _accountMgr.getAccount(network.getAccountId()); try { PublicIp publicIp = _ipAddrMgr.assignSourceNatIpAddressToGuestNetwork(owner, network); @@ -174,7 +172,7 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { _ipAddressDao.update(ip.getId(), ip); _ipAddressDao.releaseFromLockTable(ip.getId()); } catch (Exception e) { - s_logger.error("Unable to allocate source nat ip: " + e); + logger.error("Unable to allocate source nat ip: " + e); } } } @@ -190,7 +188,7 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { @Override public NicProfile allocate(Network network, NicProfile profile, VirtualMachineProfile vm) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException, ConcurrentOperationException { - s_logger.debug("allocate NicProfile on " + network.getName()); + logger.debug("allocate NicProfile on " + network.getName()); if (profile != null && profile.getRequestedIPv4() != null) { throw new CloudRuntimeException("Does not support custom ip allocation at this time: " + profile); @@ -204,7 +202,7 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { try { broadcastUri = new URI("vlan://untagged"); } catch (Exception e) { - s_logger.warn("unable to instantiate broadcast URI: " + e); + logger.warn("unable to instantiate broadcast URI: " + e); } profile.setBroadcastUri(broadcastUri); @@ -217,8 +215,8 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { @Override public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException, ConcurrentOperationException { - s_logger.debug("reserve NicProfile on network id: " + network.getId() + " " + network.getName()); - s_logger.debug("deviceId: " + nic.getDeviceId()); + logger.debug("reserve NicProfile on network id: " + network.getId() + " " + network.getName()); + logger.debug("deviceId: " + nic.getDeviceId()); NicVO nicVO = _nicDao.findById(nic.getId()); assert nicVO != null; @@ -244,7 +242,7 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { vmiModel.build(_manager.getModelController(), (VMInstanceVO)vm.getVirtualMachine(), nicVO); vmiModel.setActive(); } catch (IOException ex) { - s_logger.error("virtual-machine-interface set", ex); + logger.error("virtual-machine-interface set", ex); return; } @@ -253,17 +251,17 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { ipModel = new InstanceIpModel(vm.getInstanceName(), nic.getDeviceId()); ipModel.addToVMInterface(vmiModel); } else { - s_logger.debug("Reuse existing instance-ip object on " + ipModel.getName()); + logger.debug("Reuse existing instance-ip object on " + ipModel.getName()); } if (nic.getIPv4Address() != null) { - s_logger.debug("Nic using existing IP address " + nic.getIPv4Address()); + logger.debug("Nic using existing IP address " + nic.getIPv4Address()); ipModel.setAddress(nic.getIPv4Address()); } try { vmModel.update(_manager.getModelController()); } catch (Exception ex) { - s_logger.warn("virtual-machine update", ex); + logger.warn("virtual-machine update", ex); return; } @@ -274,15 +272,15 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { if (nic.getMacAddress() == null) { MacAddressesType macs = vmi.getMacAddresses(); if (macs == null) { - s_logger.debug("no mac address is allocated for Nic " + nicVO.getUuid()); + logger.debug("no mac address is allocated for Nic " + nicVO.getUuid()); } else { - s_logger.info("VMI " + _manager.getVifNameByVmUuid(vm.getUuid(), nicVO.getDeviceId()) + " got mac address: " + macs.getMacAddress().get(0)); + logger.info("VMI " + _manager.getVifNameByVmUuid(vm.getUuid(), nicVO.getDeviceId()) + " got mac address: " + macs.getMacAddress().get(0)); nic.setMacAddress(macs.getMacAddress().get(0)); } } if (nic.getIPv4Address() == null) { - s_logger.debug("Allocated IP address " + ipModel.getAddress()); + logger.debug("Allocated IP address " + ipModel.getAddress()); nic.setIPv4Address(ipModel.getAddress()); if (network.getCidr() != null) { nic.setIPv4Netmask(NetUtils.cidr2Netmask(network.getCidr())); @@ -298,7 +296,7 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { @Override public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservationId) { - s_logger.debug("release NicProfile " + nic.getId()); + logger.debug("release NicProfile " + nic.getId()); return true; } @@ -308,7 +306,7 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { */ @Override public void deallocate(Network network, NicProfile nic, VirtualMachineProfile vm) { - s_logger.debug("deallocate NicProfile " + nic.getId() + " on " + network.getName()); + logger.debug("deallocate NicProfile " + nic.getId() + " on " + network.getName()); NicVO nicVO = _nicDao.findById(nic.getId()); assert nicVO != null; @@ -332,7 +330,7 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { try { vmModel.delete(_manager.getModelController()); } catch (IOException ex) { - s_logger.warn("virtual-machine delete", ex); + logger.warn("virtual-machine delete", ex); return; } } @@ -342,12 +340,12 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { @Override public void updateNicProfile(NicProfile profile, Network network) { // TODO Auto-generated method stub - s_logger.debug("update NicProfile " + profile.getId() + " on " + network.getName()); + logger.debug("update NicProfile " + profile.getId() + " on " + network.getName()); } @Override public void shutdown(NetworkProfile network, NetworkOffering offering) { - s_logger.debug("NetworkGuru shutdown"); + logger.debug("NetworkGuru shutdown"); VirtualNetworkModel vnModel = _manager.getDatabase().lookupVirtualNetwork(network.getUuid(), _manager.getCanonicalName(network), network.getTrafficType()); if (vnModel == null) { return; @@ -356,21 +354,21 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { _manager.getDatabase().getVirtualNetworks().remove(vnModel); vnModel.delete(_manager.getModelController()); } catch (IOException e) { - s_logger.warn("virtual-network delete", e); + logger.warn("virtual-network delete", e); } } @Override public boolean trash(Network network, NetworkOffering offering) { // TODO Auto-generated method stub - s_logger.debug("NetworkGuru trash"); + logger.debug("NetworkGuru trash"); return true; } @Override public void updateNetworkProfile(NetworkProfile networkProfile) { // TODO Auto-generated method stub - s_logger.debug("NetworkGuru updateNetworkProfile"); + logger.debug("NetworkGuru updateNetworkProfile"); } @Override diff --git a/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ContrailManagerImpl.java b/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ContrailManagerImpl.java index cc87aa5cee1..b5abd7600be 100644 --- a/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ContrailManagerImpl.java +++ b/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ContrailManagerImpl.java @@ -50,7 +50,6 @@ import org.apache.cloudstack.network.contrail.model.ModelController; import org.apache.cloudstack.network.contrail.model.VirtualNetworkModel; import org.apache.commons.io.IOUtils; import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.ConfigurationService; @@ -145,7 +144,6 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager @Inject NetworkACLDao _networkAclDao; - private static final Logger s_logger = Logger.getLogger(ContrailManager.class); private ApiConnector _api; @@ -175,8 +173,8 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager try { _dbSyncTimer.schedule(new DBSyncTask(), 0, _dbSyncInterval); } catch (Exception ex) { - s_logger.debug("Unable to start DB Sync timer " + ex.getMessage()); - s_logger.debug("timer start", ex); + logger.debug("Unable to start DB Sync timer " + ex.getMessage()); + logger.debug("timer start", ex); } return true; } @@ -340,10 +338,10 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager } _api = ApiConnectorFactory.build(hostname, port); } catch (IOException ex) { - s_logger.warn("Unable to read " + configuration, ex); + logger.warn("Unable to read " + configuration, ex); throw new ConfigurationException(); } catch (Exception ex) { - s_logger.debug("Exception in configure: " + ex); + logger.debug("Exception in configure: " + ex); ex.printStackTrace(); throw new ConfigurationException(); } finally { @@ -360,7 +358,7 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager Provider.JuniperContrailVpcRouter); _vpcOffering = locateVpcOffering(); }catch (Exception ex) { - s_logger.debug("Exception in locating network offerings: " + ex); + logger.debug("Exception in locating network offerings: " + ex); ex.printStackTrace(); throw new ConfigurationException(); } @@ -524,12 +522,12 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager public void syncNetworkDB(short syncMode) throws IOException { if (_dbSync.syncAll(syncMode) == ServerDBSync.SYNC_STATE_OUT_OF_SYNC) { if (syncMode == DBSyncGeneric.SYNC_MODE_CHECK) { - s_logger.info("# Cloudstack DB & VNC are out of sync #"); + logger.info("# Cloudstack DB & VNC are out of sync #"); } else { - s_logger.info("# Cloudstack DB & VNC were out of sync, performed re-sync operation #"); + logger.info("# Cloudstack DB & VNC were out of sync, performed re-sync operation #"); } } else { - s_logger.info("# Cloudstack DB & VNC are in sync #"); + logger.info("# Cloudstack DB & VNC are in sync #"); } } @@ -539,13 +537,13 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager @Override public void run() { try { - s_logger.debug("DB Sync task is running"); + logger.debug("DB Sync task is running"); syncNetworkDB(_syncMode); // Change to check mode _syncMode = DBSyncGeneric.SYNC_MODE_CHECK; } catch (Exception ex) { - s_logger.debug(ex); - s_logger.info("Unable to sync network db"); + logger.debug(ex); + logger.info("Unable to sync network db"); } } } @@ -596,7 +594,7 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager sc.setParameters("trafficType", types.toArray()); List dbNets = _networksDao.search(sc, null); if (dbNets == null) { - s_logger.debug("no system networks for the given traffic types: " + types.toString()); + logger.debug("no system networks for the given traffic types: " + types.toString()); dbNets = new ArrayList(); } @@ -671,7 +669,7 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager List dbNets = _networksDao.search(sc, null); if (dbNets == null) { - s_logger.debug("no juniper managed networks for the given traffic types: " + types.toString()); + logger.debug("no juniper managed networks for the given traffic types: " + types.toString()); dbNets = new ArrayList(); } @@ -713,7 +711,7 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager sc.setParameters("vpcOffering", getVpcOffering().getId()); List vpcs = _vpcDao.search(sc, null); if (vpcs == null || vpcs.size() == 0) { - s_logger.debug("no vpcs found"); + logger.debug("no vpcs found"); return null; } return vpcs; @@ -737,7 +735,7 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager sc.setParameters("vpcId", vpcIds.toArray()); List acls = _networkAclDao.search(sc, null); if (acls == null || acls.size() == 0) { - s_logger.debug("no acls found"); + logger.debug("no acls found"); return null; } /* only return if acl is associated to any network */ @@ -761,7 +759,7 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager List dbNets = findManagedNetworks(null); if (dbNets == null || dbNets.isEmpty()) { - s_logger.debug("Juniper managed networks is empty"); + logger.debug("Juniper managed networks is empty"); return null; } @@ -783,7 +781,7 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager List publicIps = _ipAddressDao.search(sc, null); if (publicIps == null) { - s_logger.debug("no public ips"); + logger.debug("no public ips"); return null; } @@ -808,7 +806,7 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager vnModel.update(getModelController()); } } catch (Exception ex) { - s_logger.warn("virtual-network update: ", ex); + logger.warn("virtual-network update: ", ex); } getDatabase().getVirtualNetworks().add(vnModel); } @@ -923,7 +921,7 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager } getDatabase().getVirtualNetworks().add(vnModel); } catch (Exception ex) { - s_logger.warn("virtual-network update: ", ex); + logger.warn("virtual-network update: ", ex); } return vnModel; } @@ -943,7 +941,7 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager fipPoolModel.update(getModelController()); vnModel.setFipPoolModel(fipPoolModel); } catch (Exception ex) { - s_logger.warn("floating-ip-pool create: ", ex); + logger.warn("floating-ip-pool create: ", ex); return false; } } @@ -957,7 +955,7 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager try { fipModel.update(getModelController()); } catch (Exception ex) { - s_logger.warn("floating-ip create: ", ex); + logger.warn("floating-ip create: ", ex); return false; } } @@ -974,7 +972,7 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager try { fipModel.destroy(getModelController()); } catch (IOException ex) { - s_logger.warn("floating ip delete", ex); + logger.warn("floating ip delete", ex); return false; } fipPoolModel.removeSuccessor(fipModel); @@ -998,7 +996,7 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager try { fipPool = (FloatingIpPool)_api.findByFQN(FloatingIpPool.class, fipPoolName); } catch (Exception ex) { - s_logger.debug(ex); + logger.debug(ex); } if (fipPool == null) { return null; @@ -1008,7 +1006,7 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager try { return (List)_api.getObjects(FloatingIp.class, ips); } catch (IOException ex) { - s_logger.debug(ex); + logger.debug(ex); return null; } } diff --git a/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ContrailVpcElementImpl.java b/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ContrailVpcElementImpl.java index 4a72fcb6697..11c93a44072 100644 --- a/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ContrailVpcElementImpl.java +++ b/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ContrailVpcElementImpl.java @@ -26,7 +26,6 @@ import javax.inject.Inject; import org.apache.cloudstack.network.contrail.model.VirtualNetworkModel; import org.apache.cloudstack.network.contrail.model.NetworkPolicyModel; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.deploy.DeployDestination; @@ -48,8 +47,6 @@ import com.cloud.vm.ReservationContext; @Component @Local(value = {NetworkACLServiceProvider.class, VpcProvider.class, ContrailElementImpl.class}) public class ContrailVpcElementImpl extends ContrailElementImpl implements NetworkACLServiceProvider, VpcProvider { - private static final Logger s_logger = - Logger.getLogger(ContrailElement.class); @Inject NetworkACLDao _networkACLDao; @@ -65,7 +62,7 @@ public class ContrailVpcElementImpl extends ContrailElementImpl implements Netwo ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { // TODO Auto-generated method stub - s_logger.debug("NetworkElement implementVpc"); + logger.debug("NetworkElement implementVpc"); return true; } @@ -73,7 +70,7 @@ public class ContrailVpcElementImpl extends ContrailElementImpl implements Netwo public boolean shutdownVpc(Vpc vpc, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { // TODO Auto-generated method stub - s_logger.debug("NetworkElement shutdownVpc"); + logger.debug("NetworkElement shutdownVpc"); return true; } @@ -81,7 +78,7 @@ public class ContrailVpcElementImpl extends ContrailElementImpl implements Netwo public boolean createPrivateGateway(PrivateGateway gateway) throws ConcurrentOperationException, ResourceUnavailableException { // TODO Auto-generated method stub - s_logger.debug("NetworkElement createPrivateGateway"); + logger.debug("NetworkElement createPrivateGateway"); return false; } @@ -89,7 +86,7 @@ public class ContrailVpcElementImpl extends ContrailElementImpl implements Netwo public boolean deletePrivateGateway(PrivateGateway privateGateway) throws ConcurrentOperationException, ResourceUnavailableException { // TODO Auto-generated method stub - s_logger.debug("NetworkElement deletePrivateGateway"); + logger.debug("NetworkElement deletePrivateGateway"); return false; } @@ -97,7 +94,7 @@ public class ContrailVpcElementImpl extends ContrailElementImpl implements Netwo public boolean applyStaticRoutes(Vpc vpc, List routes) throws ResourceUnavailableException { // TODO Auto-generated method stub - s_logger.debug("NetworkElement applyStaticRoutes"); + logger.debug("NetworkElement applyStaticRoutes"); return true; } @@ -105,9 +102,9 @@ public class ContrailVpcElementImpl extends ContrailElementImpl implements Netwo public boolean applyNetworkACLs(Network net, List rules) throws ResourceUnavailableException { - s_logger.debug("NetworkElement applyNetworkACLs"); + logger.debug("NetworkElement applyNetworkACLs"); if (rules == null || rules.isEmpty()) { - s_logger.debug("no rules to apply"); + logger.debug("no rules to apply"); return true; } @@ -127,7 +124,7 @@ public class ContrailVpcElementImpl extends ContrailElementImpl implements Netwo project = _manager.getDefaultVncProject(); } } catch (IOException ex) { - s_logger.warn("read project", ex); + logger.warn("read project", ex); return false; } policyModel.setProject(project); @@ -145,7 +142,7 @@ public class ContrailVpcElementImpl extends ContrailElementImpl implements Netwo try { policyModel.build(_manager.getModelController(), rules); } catch (Exception e) { - s_logger.error(e); + logger.error(e); e.printStackTrace(); return false; } @@ -156,7 +153,7 @@ public class ContrailVpcElementImpl extends ContrailElementImpl implements Netwo } _manager.getDatabase().getNetworkPolicys().add(policyModel); } catch (Exception ex) { - s_logger.error("network-policy update: ", ex); + logger.error("network-policy update: ", ex); ex.printStackTrace(); return false; } @@ -192,7 +189,7 @@ public class ContrailVpcElementImpl extends ContrailElementImpl implements Netwo List rules) throws ResourceUnavailableException { // TODO Auto-generated method stub - s_logger.debug("NetworkElement applyACLItemsToPrivateGw"); + logger.debug("NetworkElement applyACLItemsToPrivateGw"); return true; } diff --git a/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ManagementNetworkGuru.java b/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ManagementNetworkGuru.java index fd29ca9a7b5..fa50512d0a1 100644 --- a/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ManagementNetworkGuru.java +++ b/plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ManagementNetworkGuru.java @@ -28,7 +28,6 @@ import java.util.Properties; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.deploy.DeploymentPlan; @@ -48,7 +47,6 @@ import com.cloud.utils.PropertiesUtil; */ @Component public class ManagementNetworkGuru extends ContrailGuru { - private static final Logger s_logger = Logger.getLogger(ManagementNetworkGuru.class); private static final TrafficType[] TrafficTypes = {TrafficType.Management}; private final String configuration = "contrail.properties"; @@ -71,7 +69,7 @@ public class ManagementNetworkGuru extends ContrailGuru { } inputFile = new FileInputStream(configFile); } catch (FileNotFoundException e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); throw new ConfigurationException(e.getMessage()); } @@ -79,14 +77,14 @@ public class ManagementNetworkGuru extends ContrailGuru { try { configProps.load(inputFile); } catch (IOException e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); throw new ConfigurationException(e.getMessage()); } finally { closeAutoCloseable(inputFile, "error closing config file"); } _mgmtCidr = configProps.getProperty("management.cidr"); _mgmtGateway = configProps.getProperty("management.gateway"); - s_logger.info("Management network " + _mgmtCidr + " gateway: " + _mgmtGateway); + logger.info("Management network " + _mgmtCidr + " gateway: " + _mgmtGateway); return true; } @@ -123,7 +121,7 @@ public class ManagementNetworkGuru extends ContrailGuru { network.setCidr(_mgmtCidr); network.setGateway(_mgmtGateway); } - s_logger.debug("Allocated network " + userSpecified.getName() + (network.getCidr() == null ? "" : " subnet: " + network.getCidr())); + logger.debug("Allocated network " + userSpecified.getName() + (network.getCidr() == null ? "" : " subnet: " + network.getCidr())); return network; } diff --git a/plugins/network-elements/juniper-contrail/test/org/apache/cloudstack/network/contrail/management/MockAccountManager.java b/plugins/network-elements/juniper-contrail/test/org/apache/cloudstack/network/contrail/management/MockAccountManager.java index b5da6043352..357f8b956a8 100644 --- a/plugins/network-elements/juniper-contrail/test/org/apache/cloudstack/network/contrail/management/MockAccountManager.java +++ b/plugins/network-elements/juniper-contrail/test/org/apache/cloudstack/network/contrail/management/MockAccountManager.java @@ -24,7 +24,6 @@ import java.net.InetAddress; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.RoleType; @@ -63,7 +62,6 @@ import com.cloud.utils.db.TransactionCallbackNoReturn; import com.cloud.utils.db.TransactionStatus; public class MockAccountManager extends ManagerBase implements AccountManager { - private static final Logger s_logger = Logger.getLogger(MockAccountManager.class); @Inject AccountDao _accountDao; @@ -88,7 +86,7 @@ public class MockAccountManager extends ManagerBase implements AccountManager { throw new ConfigurationException("Unable to find the system user using " + User.UID_SYSTEM); } CallContext.register(_systemUser, _systemAccount); - s_logger.info("MockAccountManager initialization successful"); + logger.info("MockAccountManager initialization successful"); return true; } diff --git a/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java b/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java index cb31ea440a1..fb1ed79c2eb 100644 --- a/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java +++ b/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java @@ -25,7 +25,6 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.response.ExternalFirewallResponse; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -94,7 +93,6 @@ import com.cloud.vm.VirtualMachineProfile; public class JuniperSRXExternalFirewallElement extends ExternalFirewallDeviceManagerImpl implements SourceNatServiceProvider, FirewallServiceProvider, PortForwardingServiceProvider, IpDeployer, JuniperSRXFirewallElementService, StaticNatServiceProvider { - private static final Logger s_logger = Logger.getLogger(JuniperSRXExternalFirewallElement.class); private static final Map> capabilities = setCapabilities(); @@ -131,18 +129,18 @@ public class JuniperSRXExternalFirewallElement extends ExternalFirewallDeviceMan DataCenter zone = _entityMgr.findById(DataCenter.class, network.getDataCenterId()); if ((zone.getNetworkType() == NetworkType.Advanced && !(network.getGuestType() == Network.GuestType.Isolated || network.getGuestType() == Network.GuestType.Shared)) || (zone.getNetworkType() == NetworkType.Basic && network.getGuestType() != Network.GuestType.Shared)) { - s_logger.trace("Element " + getProvider().getName() + "is not handling network type = " + network.getGuestType()); + logger.trace("Element " + getProvider().getName() + "is not handling network type = " + network.getGuestType()); return false; } if (service == null) { if (!_networkManager.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.trace("Element " + getProvider().getName() + " is not a provider for the network " + network); + logger.trace("Element " + getProvider().getName() + " is not a provider for the network " + network); return false; } } else { if (!_networkManager.isProviderSupportServiceInNetwork(network.getId(), service, getProvider())) { - s_logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + " in the network " + network); + logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + " in the network " + network); return false; } } @@ -157,7 +155,7 @@ public class JuniperSRXExternalFirewallElement extends ExternalFirewallDeviceMan // don't have to implement network is Basic zone if (zone.getNetworkType() == NetworkType.Basic) { - s_logger.debug("Not handling network implement in zone of type " + NetworkType.Basic); + logger.debug("Not handling network implement in zone of type " + NetworkType.Basic); return false; } @@ -170,7 +168,7 @@ public class JuniperSRXExternalFirewallElement extends ExternalFirewallDeviceMan } catch (InsufficientCapacityException capacityException) { // TODO: handle out of capacity exception in more gracefule manner when multiple providers are present for // the network - s_logger.error("Fail to implement the JuniperSRX for network " + network, capacityException); + logger.error("Fail to implement the JuniperSRX for network " + network, capacityException); return false; } } @@ -192,7 +190,7 @@ public class JuniperSRXExternalFirewallElement extends ExternalFirewallDeviceMan // don't have to implement network is Basic zone if (zone.getNetworkType() == NetworkType.Basic) { - s_logger.debug("Not handling network shutdown in zone of type " + NetworkType.Basic); + logger.debug("Not handling network shutdown in zone of type " + NetworkType.Basic); return false; } @@ -527,7 +525,7 @@ public class JuniperSRXExternalFirewallElement extends ExternalFirewallDeviceMan @Override public boolean verifyServicesCombination(Set services) { if (!services.contains(Service.Firewall)) { - s_logger.warn("SRX must be used as Firewall Service Provider in the network"); + logger.warn("SRX must be used as Firewall Service Provider in the network"); return false; } return true; diff --git a/plugins/network-elements/midonet/src/com/cloud/network/element/MidoNetElement.java b/plugins/network-elements/midonet/src/com/cloud/network/element/MidoNetElement.java index e1d93303248..890d0d03637 100644 --- a/plugins/network-elements/midonet/src/com/cloud/network/element/MidoNetElement.java +++ b/plugins/network-elements/midonet/src/com/cloud/network/element/MidoNetElement.java @@ -32,7 +32,6 @@ import javax.naming.ConfigurationException; import javax.ws.rs.core.MultivaluedMap; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import org.midonet.client.MidonetApi; import org.midonet.client.dto.DtoRule; import org.midonet.client.dto.DtoRule.DtoRange; @@ -92,7 +91,6 @@ import com.sun.jersey.core.util.MultivaluedMapImpl; public class MidoNetElement extends AdapterBase implements ConnectivityProvider, DhcpServiceProvider, SourceNatServiceProvider, StaticNatServiceProvider, IpDeployer, PortForwardingServiceProvider, FirewallServiceProvider, PluggableService { - private static final Logger s_logger = Logger.getLogger(MidoNetElement.class); private static final Map> capabilities = setCapabilities(); @@ -147,7 +145,7 @@ public class MidoNetElement extends AdapterBase implements ConnectivityProvider, } if (this.api == null) { - s_logger.info("midonet API server address is " + value); + logger.info("midonet API server address is " + value); setMidonetApi(new MidonetApi(value)); this.api.enableLogging(); } @@ -177,12 +175,12 @@ public class MidoNetElement extends AdapterBase implements ConnectivityProvider, if (service == null) { if (!_networkModel.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.trace("Element " + getProvider().getName() + " is not a provider for the network " + network); + logger.trace("Element " + getProvider().getName() + " is not a provider for the network " + network); return false; } } else { if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), service, getProvider())) { - s_logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + " in the network " + network); + logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + " in the network " + network); return false; } } @@ -287,7 +285,7 @@ public class MidoNetElement extends AdapterBase implements ConnectivityProvider, public boolean associatePublicIP(Network network, final List ipAddress) throws ResourceUnavailableException { - s_logger.debug("associatePublicIP called with network: " + network.toString()); + logger.debug("associatePublicIP called with network: " + network.toString()); /* * Get Mido Router for this network and set source rules * These should only be allocated inside the for loop, because @@ -352,7 +350,7 @@ public class MidoNetElement extends AdapterBase implements ConnectivityProvider, @Override public boolean applyIps(Network network, List ipAddress, Set services) throws ResourceUnavailableException { - s_logger.debug("applyIps called with network: " + network.toString()); + logger.debug("applyIps called with network: " + network.toString()); if (!this.midoInNetwork(network)) { return false; } @@ -376,7 +374,7 @@ public class MidoNetElement extends AdapterBase implements ConnectivityProvider, */ @Override public IpDeployer getIpDeployer(Network network) { - s_logger.debug("getIpDeployer called with network " + network.toString()); + logger.debug("getIpDeployer called with network " + network.toString()); return this; } @@ -387,7 +385,7 @@ public class MidoNetElement extends AdapterBase implements ConnectivityProvider, public boolean addDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { - s_logger.debug("addDhcpEntry called with network: " + network.toString() + " nic: " + nic.toString() + " vm: " + vm.toString()); + logger.debug("addDhcpEntry called with network: " + network.toString() + " nic: " + nic.toString() + " vm: " + vm.toString()); if (!this.midoInNetwork(network)) { return false; } @@ -421,7 +419,7 @@ public class MidoNetElement extends AdapterBase implements ConnectivityProvider, // On DHCP subnet, add host using host details if (sub == null) { - s_logger.error("Failed to create DHCP subnet on Midonet bridge"); + logger.error("Failed to create DHCP subnet on Midonet bridge"); return false; } else { // Check if the host already exists - we may just be restarting an existing VM @@ -580,7 +578,7 @@ public class MidoNetElement extends AdapterBase implements ConnectivityProvider, */ @Override public boolean applyStaticNats(Network network, List rules) throws ResourceUnavailableException { - s_logger.debug("applyStaticNats called with network: " + network.toString()); + logger.debug("applyStaticNats called with network: " + network.toString()); if (!midoInNetwork(network)) { return false; } @@ -728,7 +726,7 @@ public class MidoNetElement extends AdapterBase implements ConnectivityProvider, @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - s_logger.debug("implement called with network: " + network.toString()); + logger.debug("implement called with network: " + network.toString()); if (!midoInNetwork(network)) { return false; } @@ -748,7 +746,7 @@ public class MidoNetElement extends AdapterBase implements ConnectivityProvider, @Override public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - s_logger.debug("prepare called with network: " + network.toString() + " nic: " + nic.toString() + " vm: " + vm.toString()); + logger.debug("prepare called with network: " + network.toString() + " nic: " + nic.toString() + " vm: " + vm.toString()); if (!midoInNetwork(network)) { return false; } @@ -808,7 +806,7 @@ public class MidoNetElement extends AdapterBase implements ConnectivityProvider, @Override public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("release called with network: " + network.toString() + " nic: " + nic.toString() + " vm: " + vm.toString()); + logger.debug("release called with network: " + network.toString() + " nic: " + nic.toString() + " vm: " + vm.toString()); if (!midoInNetwork(network)) { return false; } @@ -848,7 +846,7 @@ public class MidoNetElement extends AdapterBase implements ConnectivityProvider, @Override public boolean shutdown(Network network, ReservationContext context, boolean cleanup) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("shutdown called with network: " + network.toString()); + logger.debug("shutdown called with network: " + network.toString()); if (!midoInNetwork(network)) { return false; } @@ -864,7 +862,7 @@ public class MidoNetElement extends AdapterBase implements ConnectivityProvider, @Override public boolean destroy(Network network, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("destroy called with network: " + network.toString()); + logger.debug("destroy called with network: " + network.toString()); if (!midoInNetwork(network)) { return false; } @@ -913,7 +911,7 @@ public class MidoNetElement extends AdapterBase implements ConnectivityProvider, @Override public boolean applyPFRules(Network network, List rules) throws ResourceUnavailableException { - s_logger.debug("applyPFRules called with network " + network.toString()); + logger.debug("applyPFRules called with network " + network.toString()); if (!midoInNetwork(network)) { return false; } @@ -1410,11 +1408,11 @@ public class MidoNetElement extends AdapterBase implements ConnectivityProvider, String networkUUIDStr = String.valueOf(networkID); - s_logger.debug("Attempting to create guest network bridge"); + logger.debug("Attempting to create guest network bridge"); try { netBridge = api.addBridge().tenantId(accountUuid).name(networkUUIDStr).create(); } catch (HttpInternalServerError ex) { - s_logger.warn("Bridge creation failed, retrying bridge get in case it now exists.", ex); + logger.warn("Bridge creation failed, retrying bridge get in case it now exists.", ex); netBridge = getNetworkBridge(networkID, accountUuid); } } diff --git a/plugins/network-elements/midonet/src/com/cloud/network/guru/MidoNetGuestNetworkGuru.java b/plugins/network-elements/midonet/src/com/cloud/network/guru/MidoNetGuestNetworkGuru.java index aaf2fca51ba..024b5eda1d6 100644 --- a/plugins/network-elements/midonet/src/com/cloud/network/guru/MidoNetGuestNetworkGuru.java +++ b/plugins/network-elements/midonet/src/com/cloud/network/guru/MidoNetGuestNetworkGuru.java @@ -22,7 +22,6 @@ package com.cloud.network.guru; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenter.NetworkType; @@ -47,7 +46,6 @@ import com.cloud.vm.VirtualMachineProfile; @Component @Local(value = NetworkGuru.class) public class MidoNetGuestNetworkGuru extends GuestNetworkGuru { - private static final Logger s_logger = Logger.getLogger(MidoNetGuestNetworkGuru.class); @Inject AccountDao _accountDao; @@ -64,7 +62,7 @@ public class MidoNetGuestNetworkGuru extends GuestNetworkGuru { isMyIsolationMethod(physicalNetwork)) { return true; } else { - s_logger.trace("We only take care of Guest networks of type " + Network.GuestType.Isolated + " in zone of type " + NetworkType.Advanced + + logger.trace("We only take care of Guest networks of type " + Network.GuestType.Isolated + " in zone of type " + NetworkType.Advanced + " using isolation method MIDO."); return false; } @@ -72,15 +70,15 @@ public class MidoNetGuestNetworkGuru extends GuestNetworkGuru { @Override public Network design(NetworkOffering offering, DeploymentPlan plan, Network userSpecified, Account owner) { - s_logger.debug("design called"); + logger.debug("design called"); // Check if the isolation type of the related physical network is MIDO PhysicalNetworkVO physnet = _physicalNetworkDao.findById(plan.getPhysicalNetworkId()); if (physnet == null || physnet.getIsolationMethods() == null || !physnet.getIsolationMethods().contains("MIDO")) { - s_logger.debug("Refusing to design this network, the physical isolation type is not MIDO"); + logger.debug("Refusing to design this network, the physical isolation type is not MIDO"); return null; } - s_logger.debug("Physical isolation type is MIDO, asking GuestNetworkGuru to design this network"); + logger.debug("Physical isolation type is MIDO, asking GuestNetworkGuru to design this network"); NetworkVO networkObject = (NetworkVO)super.design(offering, plan, userSpecified, owner); if (networkObject == null) { return null; @@ -95,7 +93,7 @@ public class MidoNetGuestNetworkGuru extends GuestNetworkGuru { public Network implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws InsufficientVirtualNetworkCapacityException { assert (network.getState() == Network.State.Implementing) : "Why are we implementing " + network; - s_logger.debug("implement called network: " + network.toString()); + logger.debug("implement called network: " + network.toString()); long dcId = dest.getDataCenter().getId(); @@ -126,7 +124,7 @@ public class MidoNetGuestNetworkGuru extends GuestNetworkGuru { String broadcastUriStr = accountUUIDStr + "." + String.valueOf(network.getId()) + ":" + routerName; implemented.setBroadcastUri(Networks.BroadcastDomainType.Mido.toUri(broadcastUriStr)); - s_logger.debug("Broadcast URI set to " + broadcastUriStr); + logger.debug("Broadcast URI set to " + broadcastUriStr); return implemented; } @@ -134,27 +132,27 @@ public class MidoNetGuestNetworkGuru extends GuestNetworkGuru { @Override public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException { - s_logger.debug("reserve called with network: " + network.toString() + " nic: " + nic.toString() + " vm: " + vm.toString()); + logger.debug("reserve called with network: " + network.toString() + " nic: " + nic.toString() + " vm: " + vm.toString()); super.reserve(nic, network, vm, dest, context); } @Override public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservationId) { - s_logger.debug("release called with nic: " + nic.toString() + " vm: " + vm.toString()); + logger.debug("release called with nic: " + nic.toString() + " vm: " + vm.toString()); return super.release(nic, vm, reservationId); } @Override public void shutdown(NetworkProfile profile, NetworkOffering offering) { - s_logger.debug("shutdown called"); + logger.debug("shutdown called"); super.shutdown(profile, offering); } @Override public boolean trash(Network network, NetworkOffering offering) { - s_logger.debug("trash called with network: " + network.toString()); + logger.debug("trash called with network: " + network.toString()); return super.trash(network, offering); } diff --git a/plugins/network-elements/midonet/src/com/cloud/network/guru/MidoNetPublicNetworkGuru.java b/plugins/network-elements/midonet/src/com/cloud/network/guru/MidoNetPublicNetworkGuru.java index bb1b6f9b3de..078f95dc91e 100644 --- a/plugins/network-elements/midonet/src/com/cloud/network/guru/MidoNetPublicNetworkGuru.java +++ b/plugins/network-elements/midonet/src/com/cloud/network/guru/MidoNetPublicNetworkGuru.java @@ -23,7 +23,6 @@ import java.net.URI; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.dc.Vlan; @@ -57,7 +56,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = NetworkGuru.class) public class MidoNetPublicNetworkGuru extends PublicNetworkGuru { - private static final Logger s_logger = Logger.getLogger(MidoNetPublicNetworkGuru.class); // Inject any stuff we need to use (DAOs etc) @Inject @@ -72,7 +70,7 @@ public class MidoNetPublicNetworkGuru extends PublicNetworkGuru { // Only change is to make broadcast domain type Mido @Override public Network design(NetworkOffering offering, DeploymentPlan plan, Network network, Account owner) { - s_logger.debug("design called with network: " + network); + logger.debug("design called with network: " + network); if (!canHandle(offering)) { return null; } @@ -121,7 +119,7 @@ public class MidoNetPublicNetworkGuru extends PublicNetworkGuru { @Override public void updateNicProfile(NicProfile profile, Network network) { - s_logger.debug("updateNicProfile called with network: " + network + " profile: " + profile); + logger.debug("updateNicProfile called with network: " + network + " profile: " + profile); DataCenter dc = _dcDao.findById(network.getDataCenterId()); if (profile != null) { @@ -137,7 +135,7 @@ public class MidoNetPublicNetworkGuru extends PublicNetworkGuru { if (nic == null) { nic = new NicProfile(Nic.ReservationStrategy.Create, null, null, null, null); } - s_logger.debug("allocate called with network: " + network + " nic: " + nic + " vm: " + vm); + logger.debug("allocate called with network: " + network + " nic: " + nic + " vm: " + vm); DataCenter dc = _dcDao.findById(network.getDataCenterId()); if (nic.getRequestedIPv4() != null) { @@ -162,7 +160,7 @@ public class MidoNetPublicNetworkGuru extends PublicNetworkGuru { @Override public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException, ConcurrentOperationException { - s_logger.debug("reserve called with network: " + network + " nic: " + nic + " vm: " + vm); + logger.debug("reserve called with network: " + network + " nic: " + nic + " vm: " + vm); if (nic.getIPv4Address() == null) { getIp(nic, dest.getDataCenter(), vm, network); } @@ -170,14 +168,14 @@ public class MidoNetPublicNetworkGuru extends PublicNetworkGuru { @Override public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservationId) { - s_logger.debug("release called with nic: " + nic + " vm: " + vm); + logger.debug("release called with nic: " + nic + " vm: " + vm); return true; } @Override public Network implement(Network network, NetworkOffering offering, DeployDestination destination, ReservationContext context) throws InsufficientVirtualNetworkCapacityException { - s_logger.debug("implement called with network: " + network); + logger.debug("implement called with network: " + network); long dcId = destination.getDataCenter().getId(); //get physical network id @@ -204,9 +202,9 @@ public class MidoNetPublicNetworkGuru extends PublicNetworkGuru { @Override @DB public void deallocate(Network network, NicProfile nic, VirtualMachineProfile vm) { - s_logger.debug("deallocate called with network: " + network + " nic: " + nic + " vm: " + vm); - if (s_logger.isDebugEnabled()) { - s_logger.debug("public network deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); + logger.debug("deallocate called with network: " + network + " nic: " + nic + " vm: " + vm); + if (logger.isDebugEnabled()) { + logger.debug("public network deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); } final IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIPv4Address()); @@ -221,19 +219,19 @@ public class MidoNetPublicNetworkGuru extends PublicNetworkGuru { } nic.deallocate(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deallocated nic: " + nic); + if (logger.isDebugEnabled()) { + logger.debug("Deallocated nic: " + nic); } } @Override public void shutdown(NetworkProfile network, NetworkOffering offering) { - s_logger.debug("shutdown called with network: " + network); + logger.debug("shutdown called with network: " + network); } @Override public boolean trash(Network network, NetworkOffering offering) { - s_logger.debug("trash called with network: " + network); + logger.debug("trash called with network: " + network); return true; } diff --git a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java index 9e679f61356..b25ba7ecabf 100644 --- a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java +++ b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java @@ -27,7 +27,6 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.google.gson.Gson; @@ -121,7 +120,6 @@ import com.cloud.vm.VirtualMachineProfile; public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl implements LoadBalancingServiceProvider, NetscalerLoadBalancerElementService, ExternalLoadBalancerDeviceManager, IpDeployer, StaticNatServiceProvider, GslbServiceProvider { - private static final Logger s_logger = Logger.getLogger(NetscalerElement.class); public static final AutoScaleCounterType AutoScaleCounterSnmp = new AutoScaleCounterType("snmp"); public static final AutoScaleCounterType AutoScaleCounterNetscaler = new AutoScaleCounterType("netscaler"); @@ -166,7 +164,7 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl (zone.getNetworkType() == NetworkType.Basic && config.getGuestType() == Network.GuestType.Shared && config.getTrafficType() == TrafficType.Guest); if (!(handleInAdvanceZone || handleInBasicZone)) { - s_logger.trace("Not handling network with Type " + config.getGuestType() + " and traffic type " + config.getTrafficType() + " in zone of type " + + logger.trace("Not handling network with Type " + config.getGuestType() + " and traffic type " + config.getTrafficType() + " in zone of type " + zone.getNetworkType()); return false; } @@ -189,7 +187,7 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl } if (_ntwkSrvcDao.canProviderSupportServiceInNetwork(guestConfig.getId(), Service.StaticNat, Network.Provider.Netscaler) && !isBasicZoneNetwok(guestConfig)) { - s_logger.error("NetScaler provider can not be Static Nat service provider for the network " + guestConfig.getGuestType() + " and traffic type " + + logger.error("NetScaler provider can not be Static Nat service provider for the network " + guestConfig.getGuestType() + " and traffic type " + guestConfig.getTrafficType()); return false; } @@ -346,7 +344,7 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl uri = new URI(cmd.getUrl()); } catch (Exception e) { String msg = "Error parsing the url parameter specified in addNetscalerLoadBalancer command due to " + e.getMessage(); - s_logger.debug(msg); + logger.debug(msg); throw new InvalidParameterValueException(msg); } Map configParams = new HashMap(); @@ -357,7 +355,7 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl if (dedicatedUse && !deviceName.equals(NetworkDevice.NetscalerVPXLoadBalancer.getName())) { String msg = "Only Netscaler VPX load balancers can be specified for dedicated use"; - s_logger.debug(msg); + logger.debug(msg); throw new InvalidParameterValueException(msg); } @@ -365,13 +363,13 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl if (!deviceName.equals(NetworkDevice.NetscalerVPXLoadBalancer.getName()) && !deviceName.equals(NetworkDevice.NetscalerMPXLoadBalancer.getName())) { String msg = "Only Netscaler VPX or MPX load balancers can be specified as GSLB service provider"; - s_logger.debug(msg); + logger.debug(msg); throw new InvalidParameterValueException(msg); } if (cmd.getSitePublicIp() == null || cmd.getSitePrivateIp() == null) { String msg = "Public and Privae IP needs to provided for NetScaler that will be GSLB provider"; - s_logger.debug(msg); + logger.debug(msg); throw new InvalidParameterValueException(msg); } @@ -676,15 +674,15 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl // NetScaler can only act as Lb and Static Nat service provider if (services != null && !services.isEmpty() && !netscalerServices.containsAll(services)) { - s_logger.warn("NetScaler network element can only support LB and Static NAT services and service combination " + services + " is not supported."); + logger.warn("NetScaler network element can only support LB and Static NAT services and service combination " + services + " is not supported."); StringBuffer buff = new StringBuffer(); for (Service service : services) { buff.append(service.getName()); buff.append(" "); } - s_logger.warn("NetScaler network element can only support LB and Static NAT services and service combination " + buff.toString() + " is not supported."); - s_logger.warn("NetScaler network element can only support LB and Static NAT services and service combination " + services + " is not supported."); + logger.warn("NetScaler network element can only support LB and Static NAT services and service combination " + buff.toString() + " is not supported."); + logger.warn("NetScaler network element can only support LB and Static NAT services and service combination " + services + " is not supported."); return false; } @@ -720,14 +718,14 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl lbDeviceVO = allocateLoadBalancerForNetwork(network); } catch (Exception e) { errMsg = "Could not allocate a NetSclaer load balancer for configuring elastic load balancer rules due to " + e.getMessage(); - s_logger.error(errMsg); + logger.error(errMsg); throw new ResourceUnavailableException(errMsg, this.getClass(), 0); } } if (!isNetscalerDevice(lbDeviceVO.getDeviceName())) { errMsg = "There are no NetScaler load balancer assigned for this network. So NetScaler element can not be handle elastic load balancer rules."; - s_logger.error(errMsg); + logger.error(errMsg); throw new ResourceUnavailableException(errMsg, this.getClass(), 0); } @@ -765,7 +763,7 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl String msg = "Unable to apply elastic load balancer rules to the external load balancer appliance in zone " + network.getDataCenterId() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, network.getDataCenterId()); } } @@ -791,14 +789,14 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl lbDevice = allocateLoadBalancerForNetwork(config); } catch (Exception e) { errMsg = "Could not allocate a NetSclaer load balancer for configuring static NAT rules due to" + e.getMessage(); - s_logger.error(errMsg); + logger.error(errMsg); throw new ResourceUnavailableException(errMsg, this.getClass(), 0); } } if (!isNetscalerDevice(lbDevice.getDeviceName())) { errMsg = "There are no NetScaler load balancer assigned for this network. So NetScaler element will not be handling the static nat rules."; - s_logger.error(errMsg); + logger.error(errMsg); throw new ResourceUnavailableException(errMsg, this.getClass(), 0); } SetStaticNatRulesAnswer answer = null; @@ -827,7 +825,7 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl ExternalLoadBalancerDeviceVO lbDevice = getNetScalerForEIP(rule); if (lbDevice == null) { String errMsg = "There is no NetScaler device configured to perform EIP to guest IP address: " + rule.getDestIpAddress(); - s_logger.error(errMsg); + logger.error(errMsg); throw new ResourceUnavailableException(errMsg, this.getClass(), 0); } @@ -842,7 +840,7 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl SetStaticNatRulesAnswer answer = (SetStaticNatRulesAnswer)_agentMgr.send(lbDevice.getHostId(), cmd); if (answer == null) { String errMsg = "Failed to configure INAT rule on NetScaler device " + lbDevice.getHostId(); - s_logger.error(errMsg); + logger.error(errMsg); throw new ResourceUnavailableException(errMsg, this.getClass(), 0); } } @@ -851,7 +849,7 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl } return true; } catch (Exception e) { - s_logger.error("Failed to configure StaticNat rule due to " + e.getMessage()); + logger.error("Failed to configure StaticNat rule due to " + e.getMessage()); return false; } } @@ -887,13 +885,13 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl ExternalLoadBalancerDeviceVO lbDeviceVO = getExternalLoadBalancerForNetwork(network); if (lbDeviceVO == null) { - s_logger.warn("There is no external load balancer device assigned to this network either network is not implement are already shutdown so just returning"); + logger.warn("There is no external load balancer device assigned to this network either network is not implement are already shutdown so just returning"); return null; } if (!isNetscalerDevice(lbDeviceVO.getDeviceName())) { errMsg = "There are no NetScaler load balancer assigned for this network. So NetScaler element can not be handle elastic load balancer rules."; - s_logger.error(errMsg); + logger.error(errMsg); throw new ResourceUnavailableException(errMsg, this.getClass(), 0); } @@ -939,10 +937,10 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl return getLBHealthChecks(network, lbrules); } } catch (ResourceUnavailableException e) { - s_logger.error("Error in getting the LB Rules from NetScaler " + e); + logger.error("Error in getting the LB Rules from NetScaler " + e); } } else { - s_logger.error("Network cannot handle to LB service "); + logger.error("Network cannot handle to LB service "); } return null; } @@ -961,7 +959,7 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl ExternalLoadBalancerDeviceVO nsGslbProvider = findGslbProvider(zoneId, physicalNetworkId); if (nsGslbProvider == null) { String msg = "Unable to find a NetScaler configured as gslb service provider in zone " + zoneId; - s_logger.debug(msg); + logger.debug(msg); throw new ResourceUnavailableException(msg, DataCenter.class, zoneId); } @@ -972,7 +970,7 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl Answer answer = _agentMgr.easySend(zoneGslbProviderHosId, gslbConfigCmd); if (answer == null || !answer.getResult()) { String msg = "Unable to apply global load balancer rule to the gslb service provider in zone " + zoneId; - s_logger.debug(msg); + logger.debug(msg); throw new ResourceUnavailableException(msg, DataCenter.class, zoneId); } @@ -1031,7 +1029,7 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl if (schemeCaps != null) { for (LoadBalancingRule rule : rules) { if (!schemeCaps.contains(rule.getScheme().toString())) { - s_logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + this.getName()); + logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + this.getName()); return false; } } diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/element/NiciraNvpElement.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/element/NiciraNvpElement.java index 0243e2d4052..d59d4e16fc8 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/element/NiciraNvpElement.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/element/NiciraNvpElement.java @@ -32,7 +32,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; @@ -137,7 +136,6 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { private static final int MAX_PORT = 65535; private static final int MIN_PORT = 0; - private static final Logger s_logger = Logger.getLogger(NiciraNvpElement.class); private static final Map> capabilities = setCapabilities(); @@ -187,18 +185,18 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { } protected boolean canHandle(Network network, Service service) { - s_logger.debug("Checking if NiciraNvpElement can handle service " + service.getName() + " on network " + network.getDisplayText()); + logger.debug("Checking if NiciraNvpElement can handle service " + service.getName() + " on network " + network.getDisplayText()); if (network.getBroadcastDomainType() != BroadcastDomainType.Lswitch) { return false; } if (!networkModel.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.debug("NiciraNvpElement is not a provider for network " + network.getDisplayText()); + logger.debug("NiciraNvpElement is not a provider for network " + network.getDisplayText()); return false; } if (!ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, Network.Provider.NiciraNvp)) { - s_logger.debug("NiciraNvpElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); + logger.debug("NiciraNvpElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); return false; } @@ -215,20 +213,20 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - s_logger.debug("entering NiciraNvpElement implement function for network " + network.getDisplayText() + " (state " + network.getState() + ")"); + logger.debug("entering NiciraNvpElement implement function for network " + network.getDisplayText() + " (state " + network.getState() + ")"); if (!canHandle(network, Service.Connectivity)) { return false; } if (network.getBroadcastUri() == null) { - s_logger.error("Nic has no broadcast Uri with the LSwitch Uuid"); + logger.error("Nic has no broadcast Uri with the LSwitch Uuid"); return false; } List devices = niciraNvpDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); + logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); return false; } NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); @@ -244,7 +242,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { // Implement SourceNat immediately as we have al the info already if (networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.SourceNat, Provider.NiciraNvp)) { - s_logger.debug("Apparently we are supposed to provide SourceNat on this network"); + logger.debug("Apparently we are supposed to provide SourceNat on this network"); PublicIp sourceNatIp = ipAddrMgr.assignSourceNatIpAddressToGuestNetwork(owner, network); String publicCidr = sourceNatIp.getAddress().addr() + "/" + NetUtils.getCidrSize(sourceNatIp.getVlanNetmask()); @@ -270,7 +268,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { context.getAccount().getAccountName()); CreateLogicalRouterAnswer answer = (CreateLogicalRouterAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer.getResult() == false) { - s_logger.error("Failed to create Logical Router for network " + network.getDisplayText()); + logger.error("Failed to create Logical Router for network " + network.getDisplayText()); return false; } @@ -291,7 +289,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { } if (network.getBroadcastUri() == null) { - s_logger.error("Nic has no broadcast Uri with the LSwitch Uuid"); + logger.error("Nic has no broadcast Uri with the LSwitch Uuid"); return false; } @@ -299,7 +297,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { List devices = niciraNvpDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); + logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); return false; } NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); @@ -311,14 +309,14 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { FindLogicalSwitchPortAnswer answer = (FindLogicalSwitchPortAnswer)agentMgr.easySend(niciraNvpHost.getId(), findCmd); if (answer.getResult()) { - s_logger.warn("Existing Logical Switchport found for nic " + nic.getName() + " with uuid " + existingNicMap.getLogicalSwitchPortUuid()); + logger.warn("Existing Logical Switchport found for nic " + nic.getName() + " with uuid " + existingNicMap.getLogicalSwitchPortUuid()); UpdateLogicalSwitchPortCommand cmd = new UpdateLogicalSwitchPortCommand(existingNicMap.getLogicalSwitchPortUuid(), BroadcastDomainType.getValue(network.getBroadcastUri()), nicVO.getUuid(), context.getDomain().getName() + "-" + context.getAccount().getAccountName(), nic.getName()); agentMgr.easySend(niciraNvpHost.getId(), cmd); return true; } else { - s_logger.error("Stale entry found for nic " + nic.getName() + " with logical switchport uuid " + existingNicMap.getLogicalSwitchPortUuid()); + logger.error("Stale entry found for nic " + nic.getName() + " with logical switchport uuid " + existingNicMap.getLogicalSwitchPortUuid()); niciraNvpNicMappingDao.remove(existingNicMap.getId()); } } @@ -329,7 +327,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { CreateLogicalSwitchPortAnswer answer = (CreateLogicalSwitchPortAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("CreateLogicalSwitchPortCommand failed"); + logger.error("CreateLogicalSwitchPortCommand failed"); return false; } @@ -349,7 +347,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { } if (network.getBroadcastUri() == null) { - s_logger.error("Nic has no broadcast Uri with the LSwitch Uuid"); + logger.error("Nic has no broadcast Uri with the LSwitch Uuid"); return false; } @@ -357,7 +355,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { List devices = niciraNvpDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); + logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); return false; } NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); @@ -365,7 +363,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { NiciraNvpNicMappingVO nicMap = niciraNvpNicMappingDao.findByNicUuid(nicVO.getUuid()); if (nicMap == null) { - s_logger.error("No mapping for nic " + nic.getName()); + logger.error("No mapping for nic " + nic.getName()); return false; } @@ -373,7 +371,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { DeleteLogicalSwitchPortAnswer answer = (DeleteLogicalSwitchPortAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("DeleteLogicalSwitchPortCommand failed"); + logger.error("DeleteLogicalSwitchPortCommand failed"); return false; } @@ -390,20 +388,20 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { List devices = niciraNvpDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); + logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); return false; } NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); HostVO niciraNvpHost = hostDao.findById(niciraNvpDevice.getHostId()); if (networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.SourceNat, Provider.NiciraNvp)) { - s_logger.debug("Apparently we were providing SourceNat on this network"); + logger.debug("Apparently we were providing SourceNat on this network"); // Deleting the LogicalRouter will also take care of all provisioned // nat rules. NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(network.getId()); if (routermapping == null) { - s_logger.warn("No logical router uuid found for network " + network.getDisplayText()); + logger.warn("No logical router uuid found for network " + network.getDisplayText()); // This might be cause by a failed deployment, so don't make shutdown fail as well. return true; } @@ -411,7 +409,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { DeleteLogicalRouterCommand cmd = new DeleteLogicalRouterCommand(routermapping.getLogicalRouterUuid()); DeleteLogicalRouterAnswer answer = (DeleteLogicalRouterAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer.getResult() == false) { - s_logger.error("Failed to delete LogicalRouter for network " + network.getDisplayText()); + logger.error("Failed to delete LogicalRouter for network " + network.getDisplayText()); return false; } @@ -452,11 +450,11 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { // This element can only function in a Nicra Nvp based // SDN network, so Connectivity needs to be present here if (!services.contains(Service.Connectivity)) { - s_logger.warn("Unable to provide services without Connectivity service enabled for this element"); + logger.warn("Unable to provide services without Connectivity service enabled for this element"); return false; } if ((services.contains(Service.PortForwarding) || services.contains(Service.StaticNat)) && !services.contains(Service.SourceNat)) { - s_logger.warn("Unable to provide StaticNat and/or PortForwarding without the SourceNat service"); + logger.warn("Unable to provide StaticNat and/or PortForwarding without the SourceNat service"); return false; } return true; @@ -734,7 +732,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { // SourceNat is required for StaticNat and PortForwarding List devices = niciraNvpDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); + logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); return false; } NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); @@ -743,7 +741,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(network.getId()); if (routermapping == null) { - s_logger.error("No logical router uuid found for network " + network.getDisplayText()); + logger.error("No logical router uuid found for network " + network.getDisplayText()); return false; } @@ -762,7 +760,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { //FIXME answer can be null if the host is down return answer.getResult(); } else { - s_logger.debug("No need to provision ip addresses as we are not providing L3 services."); + logger.debug("No need to provision ip addresses as we are not providing L3 services."); } return true; @@ -779,7 +777,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { List devices = niciraNvpDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); + logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); return false; } NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); @@ -787,7 +785,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(network.getId()); if (routermapping == null) { - s_logger.error("No logical router uuid found for network " + network.getDisplayText()); + logger.error("No logical router uuid found for network " + network.getDisplayText()); return false; } @@ -819,7 +817,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { List devices = niciraNvpDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); + logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); return false; } NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); @@ -827,7 +825,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(network.getId()); if (routermapping == null) { - s_logger.error("No logical router uuid found for network " + network.getDisplayText()); + logger.error("No logical router uuid found for network " + network.getDisplayText()); return false; } diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java index 975ebd19aa0..bfce33238bc 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java @@ -26,7 +26,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.CreateLogicalSwitchAnswer; @@ -71,7 +70,6 @@ import com.cloud.vm.VirtualMachineProfile; public class NiciraNvpGuestNetworkGuru extends GuestNetworkGuru { private static final int MAX_NAME_LENGTH = 40; - private static final Logger s_logger = Logger.getLogger(NiciraNvpGuestNetworkGuru.class); @Inject protected NetworkModel networkModel; @@ -108,7 +106,7 @@ public class NiciraNvpGuestNetworkGuru extends GuestNetworkGuru { && isMyIsolationMethod(physicalNetwork) && ntwkOfferingSrvcDao.areServicesSupportedByNetworkOffering(offering.getId(), Service.Connectivity)) { return true; } else { - s_logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); + logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); return false; } } @@ -119,18 +117,18 @@ public class NiciraNvpGuestNetworkGuru extends GuestNetworkGuru { final PhysicalNetworkVO physnet = physicalNetworkDao.findById(plan.getPhysicalNetworkId()); final DataCenter dc = _dcDao.findById(plan.getDataCenterId()); if (!canHandle(offering, dc.getNetworkType(), physnet)) { - s_logger.debug("Refusing to design this network"); + logger.debug("Refusing to design this network"); return null; } final List devices = niciraNvpDao.listByPhysicalNetwork(physnet.getId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + physnet.getName()); + logger.error("No NiciraNvp Controller on physical network " + physnet.getName()); return null; } - s_logger.debug("Nicira Nvp " + devices.get(0).getUuid() + " found on physical network " + physnet.getId()); + logger.debug("Nicira Nvp " + devices.get(0).getUuid() + " found on physical network " + physnet.getId()); - s_logger.debug("Physical isolation type is supported, asking GuestNetworkGuru to design this network"); + logger.debug("Physical isolation type is supported, asking GuestNetworkGuru to design this network"); final NetworkVO networkObject = (NetworkVO) super.design(offering, plan, userSpecified, owner); if (networkObject == null) { return null; @@ -176,7 +174,7 @@ public class NiciraNvpGuestNetworkGuru extends GuestNetworkGuru { final List devices = niciraNvpDao.listByPhysicalNetwork(physicalNetworkId); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + physicalNetworkId); + logger.error("No NiciraNvp Controller on physical network " + physicalNetworkId); return null; } final NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); @@ -190,16 +188,16 @@ public class NiciraNvpGuestNetworkGuru extends GuestNetworkGuru { final CreateLogicalSwitchAnswer answer = (CreateLogicalSwitchAnswer) agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("CreateLogicalSwitchCommand failed"); + logger.error("CreateLogicalSwitchCommand failed"); return null; } try { implemented.setBroadcastUri(new URI("lswitch", answer.getLogicalSwitchUuid(), null)); implemented.setBroadcastDomainType(BroadcastDomainType.Lswitch); - s_logger.info("Implemented OK, network linked to = " + implemented.getBroadcastUri().toString()); + logger.info("Implemented OK, network linked to = " + implemented.getBroadcastUri().toString()); } catch (final URISyntaxException e) { - s_logger.error("Unable to store logical switch id in broadcast uri, uuid = " + implemented.getUuid(), e); + logger.error("Unable to store logical switch id in broadcast uri, uuid = " + implemented.getUuid(), e); return null; } @@ -221,13 +219,13 @@ public class NiciraNvpGuestNetworkGuru extends GuestNetworkGuru { public void shutdown(final NetworkProfile profile, final NetworkOffering offering) { final NetworkVO networkObject = networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Lswitch || networkObject.getBroadcastUri() == null) { - s_logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); + logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); return; } final List devices = niciraNvpDao.listByPhysicalNetwork(networkObject.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + networkObject.getPhysicalNetworkId()); + logger.error("No NiciraNvp Controller on physical network " + networkObject.getPhysicalNetworkId()); return; } final NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); @@ -237,7 +235,7 @@ public class NiciraNvpGuestNetworkGuru extends GuestNetworkGuru { final DeleteLogicalSwitchAnswer answer = (DeleteLogicalSwitchAnswer) agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("DeleteLogicalSwitchCommand failed"); + logger.error("DeleteLogicalSwitchCommand failed"); } super.shutdown(profile, offering); diff --git a/plugins/network-elements/nuage-vsp/src/com/cloud/network/element/NuageVspElement.java b/plugins/network-elements/nuage-vsp/src/com/cloud/network/element/NuageVspElement.java index e777268e424..c092706d5b1 100644 --- a/plugins/network-elements/nuage-vsp/src/com/cloud/network/element/NuageVspElement.java +++ b/plugins/network-elements/nuage-vsp/src/com/cloud/network/element/NuageVspElement.java @@ -31,7 +31,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.api.InternalIdentity; import org.apache.cloudstack.network.ExternalNetworkDeviceManager; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.StartupCommand; @@ -93,7 +92,6 @@ import com.cloud.vm.dao.NicDao; public class NuageVspElement extends AdapterBase implements ConnectivityProvider, IpDeployer, SourceNatServiceProvider, StaticNatServiceProvider, FirewallServiceProvider, DhcpServiceProvider, NetworkACLServiceProvider, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(NuageVspElement.class); private static final Map> capabilities = setCapabilities(); @@ -194,14 +192,14 @@ public class NuageVspElement extends AdapterBase implements ConnectivityProvider @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - s_logger.debug("Entering NuageElement implement function for network " + network.getDisplayText() + " (state " + network.getState() + ")"); + logger.debug("Entering NuageElement implement function for network " + network.getDisplayText() + " (state " + network.getState() + ")"); if (!canHandle(network, Service.Connectivity)) { return false; } if (network.getBroadcastUri() == null) { - s_logger.error("Nic has no broadcast Uri with the virtual router IP"); + logger.error("Nic has no broadcast Uri with the virtual router IP"); return false; } @@ -216,7 +214,7 @@ public class NuageVspElement extends AdapterBase implements ConnectivityProvider } if (network.getBroadcastUri() == null) { - s_logger.error("Nic has no broadcast Uri with the virtual router IP"); + logger.error("Nic has no broadcast Uri with the virtual router IP"); return false; } @@ -230,7 +228,7 @@ public class NuageVspElement extends AdapterBase implements ConnectivityProvider } if (network.getBroadcastUri() == null) { - s_logger.error("Nic has no broadcast Uri with the virtual router IP"); + logger.error("Nic has no broadcast Uri with the virtual router IP"); return false; } @@ -275,12 +273,12 @@ public class NuageVspElement extends AdapterBase implements ConnectivityProvider // This element can only function in a NuageVsp based // SDN network, so Connectivity needs to be present here if (!services.contains(Service.Connectivity)) { - s_logger.warn("Unable to support services combination without Connectivity service provided by Nuage VSP."); + logger.warn("Unable to support services combination without Connectivity service provided by Nuage VSP."); return false; } if ((services.contains(Service.StaticNat)) && (!services.contains(Service.SourceNat))) { - s_logger.warn("Unable to provide StaticNat without the SourceNat service."); + logger.warn("Unable to provide StaticNat without the SourceNat service."); return false; } @@ -289,7 +287,7 @@ public class NuageVspElement extends AdapterBase implements ConnectivityProvider // NuageVsp doesn't implement any of these services, and we don't // want anyone else to do it for us. So if these services // exist, we can't handle it. - s_logger.warn("Unable to support services combination. The services list contains service(s) not supported by Nuage VSP."); + logger.warn("Unable to support services combination. The services list contains service(s) not supported by Nuage VSP."); return false; } @@ -303,13 +301,13 @@ public class NuageVspElement extends AdapterBase implements ConnectivityProvider } if (!_networkModel.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.debug("NuageElement is not a provider for network " + network.getDisplayText()); + logger.debug("NuageElement is not a provider for network " + network.getDisplayText()); return false; } if (service != null) { if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, getProvider())) { - s_logger.debug("NuageElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); + logger.debug("NuageElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); return false; } } @@ -373,13 +371,13 @@ public class NuageVspElement extends AdapterBase implements ConnectivityProvider ApplyStaticNatVspCommand cmd = new ApplyStaticNatVspCommand(networkDomain.getUuid(), vpcOrSubnetUuid, isL3Network, sourceNatDetails); ApplyStaticNatVspAnswer answer = (ApplyStaticNatVspAnswer)_agentMgr.easySend(nuageVspHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("ApplyStaticNatNuageVspCommand for network " + config.getUuid() + " failed"); + logger.error("ApplyStaticNatNuageVspCommand for network " + config.getUuid() + " failed"); if ((null != answer) && (null != answer.getDetails())) { throw new ResourceUnavailableException(answer.getDetails(), Network.class, config.getId()); } } } catch (Exception e) { - s_logger.warn("Failed to apply static Nat in Vsp " + e.getMessage()); + logger.warn("Failed to apply static Nat in Vsp " + e.getMessage()); } } catch (Exception e) { throw new ResourceUnavailableException("Failed to apply Static NAT in VSP", Network.class, config.getId(), e); @@ -395,9 +393,9 @@ public class NuageVspElement extends AdapterBase implements ConnectivityProvider @Override public boolean applyFWRules(Network network, List rules) throws ResourceUnavailableException { - s_logger.debug("Handling applyFWRules for network " + network.getName() + " with " + rules.size() + " FWRules"); + logger.debug("Handling applyFWRules for network " + network.getName() + " with " + rules.size() + " FWRules"); if (rules != null && rules.size() == 1 && rules.iterator().next().getType().equals(FirewallRuleType.System)) { - s_logger.debug("Default ACL added by CS as system is ignored for network " + network.getName() + " with rule " + rules); + logger.debug("Default ACL added by CS as system is ignored for network " + network.getName() + " with rule " + rules); return true; } return applyACLRules(network, rules, false); @@ -406,12 +404,12 @@ public class NuageVspElement extends AdapterBase implements ConnectivityProvider @Override public boolean applyNetworkACLs(Network network, List rules) throws ResourceUnavailableException { if (rules == null || rules.isEmpty()) { - s_logger.debug("No rules to apply. So, delete all the existing ACL in VSP from Subnet with uuid " + network.getUuid()); + logger.debug("No rules to apply. So, delete all the existing ACL in VSP from Subnet with uuid " + network.getUuid()); } else { - s_logger.debug("New rules has to applied. So, delete all the existing ACL in VSP from Subnet with uuid " + network.getUuid()); + logger.debug("New rules has to applied. So, delete all the existing ACL in VSP from Subnet with uuid " + network.getUuid()); } if (rules != null) { - s_logger.debug("Handling applyNetworkACLs for network " + network.getName() + " with " + rules.size() + " Network ACLs"); + logger.debug("Handling applyNetworkACLs for network " + network.getName() + " with " + rules.size() + " Network ACLs"); applyACLRules(network, rules, true); } return true; @@ -440,7 +438,7 @@ public class NuageVspElement extends AdapterBase implements ConnectivityProvider isVpc, network.getId()); ApplyAclRuleVspAnswer answer = (ApplyAclRuleVspAnswer)_agentMgr.easySend(nuageVspHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("ApplyAclRuleNuageVspCommand for network " + network.getUuid() + " failed"); + logger.error("ApplyAclRuleNuageVspCommand for network " + network.getUuid() + " failed"); if ((null != answer) && (null != answer.getDetails())) { throw new ResourceUnavailableException(answer.getDetails(), Network.class, network.getId()); } diff --git a/plugins/network-elements/nuage-vsp/src/com/cloud/network/guru/NuageVspGuestNetworkGuru.java b/plugins/network-elements/nuage-vsp/src/com/cloud/network/guru/NuageVspGuestNetworkGuru.java index 9bbaf409b4e..e611d025dbb 100644 --- a/plugins/network-elements/nuage-vsp/src/com/cloud/network/guru/NuageVspGuestNetworkGuru.java +++ b/plugins/network-elements/nuage-vsp/src/com/cloud/network/guru/NuageVspGuestNetworkGuru.java @@ -31,7 +31,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.guru.DeallocateVmVspAnswer; @@ -85,7 +84,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = NetworkGuru.class) public class NuageVspGuestNetworkGuru extends GuestNetworkGuru { - public static final Logger s_logger = Logger.getLogger(NuageVspGuestNetworkGuru.class); @Inject NetworkOfferingServiceMapDao _ntwkOfferingSrvcDao; @@ -114,7 +112,7 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru { PhysicalNetworkVO physnet = _physicalNetworkDao.findById(plan.getPhysicalNetworkId()); DataCenter dc = _dcDao.findById(plan.getDataCenterId()); if (!canHandle(offering, dc.getNetworkType(), physnet)) { - s_logger.debug("Refusing to design this network"); + logger.debug("Refusing to design this network"); return null; } @@ -163,7 +161,7 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru { AccountVO networksAccount = _accountDao.findById(network.getAccountId()); if (networksAccount.getType() == Account.ACCOUNT_TYPE_PROJECT) { String errorMessage = "CS project support is not yet implemented in NuageVsp"; - s_logger.debug(errorMessage); + logger.debug(errorMessage); throw new InsufficientVirtualNetworkCapacityException(errorMessage, Account.class, network.getAccountId()); } boolean isL3Network = isL3Network(offering.getId()); @@ -182,13 +180,13 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru { ImplementNetworkVspAnswer answer = (ImplementNetworkVspAnswer)_agentMgr.easySend(nuageVspHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("ImplementNetworkNuageVspCommand failed"); + logger.error("ImplementNetworkNuageVspCommand failed"); if ((null != answer) && (null != answer.getDetails())) { - s_logger.error(answer.getDetails()); + logger.error(answer.getDetails()); } return null; } - s_logger.info("Implemented OK, network " + networkUuid + " in tenant " + tenantId + " linked to " + implemented.getBroadcastUri().toString()); + logger.info("Implemented OK, network " + networkUuid + " in tenant " + tenantId + " linked to " + implemented.getBroadcastUri().toString()); return implemented; } @@ -204,7 +202,7 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru { nic.setBroadcastUri(network.getBroadcastUri()); nic.setIsolationUri(network.getBroadcastUri()); - s_logger.debug("Handling reserve() call back to with Create a new VM or add an interface to existing VM in network " + network.getName()); + logger.debug("Handling reserve() call back to with Create a new VM or add an interface to existing VM in network " + network.getName()); DataCenter dc = _dcDao.findById(network.getDataCenterId()); Account networksAccount = _accountDao.findById(network.getAccountId()); DomainVO networksDomain = _domainDao.findById(network.getDomainId()); @@ -233,9 +231,9 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru { ReserveVmInterfaceVspAnswer answer = (ReserveVmInterfaceVspAnswer)_agentMgr.easySend(nuageVspHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("ReserveVmInterfaceNuageVspCommand failed"); + logger.error("ReserveVmInterfaceNuageVspCommand failed"); if ((null != answer) && (null != answer.getDetails())) { - s_logger.error(answer.getDetails()); + logger.error(answer.getDetails()); } throw new InsufficientVirtualNetworkCapacityException("Failed to reserve VM in Nuage VSP.", Network.class, network.getId()); } @@ -249,7 +247,7 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru { && isMyIsolationMethod(physicalNetwork)) { return true; } else { - s_logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); + logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); return false; } } @@ -258,7 +256,7 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru { public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservationId) { long networkId = nic.getNetworkId(); Network network = _networkDao.findById(networkId); - s_logger.debug("Handling release() call back, which is called when a VM is stopped or destroyed, to delete the VM with state " + vm.getVirtualMachine().getState() + logger.debug("Handling release() call back, which is called when a VM is stopped or destroyed, to delete the VM with state " + vm.getVirtualMachine().getState() + " from netork " + network.getName()); if (vm.getVirtualMachine().getState().equals(VirtualMachine.State.Stopping)) { try { @@ -266,16 +264,16 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru { ReleaseVmVspCommand cmd = new ReleaseVmVspCommand(network.getUuid(), vm.getUuid(), vm.getInstanceName()); ReleaseVmVspAnswer answer = (ReleaseVmVspAnswer)_agentMgr.easySend(nuageVspHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("ReleaseVmNuageVspCommand for VM " + vm.getUuid() + " failed"); + logger.error("ReleaseVmNuageVspCommand for VM " + vm.getUuid() + " failed"); if ((null != answer) && (null != answer.getDetails())) { - s_logger.error(answer.getDetails()); + logger.error(answer.getDetails()); } } } catch (InsufficientVirtualNetworkCapacityException e) { - s_logger.debug("Handling release() call back. Failed to delete CS VM " + vm.getInstanceName() + " in VSP. " + e.getMessage()); + logger.debug("Handling release() call back. Failed to delete CS VM " + vm.getInstanceName() + " in VSP. " + e.getMessage()); } } else { - s_logger.debug("Handling release() call back. VM " + vm.getInstanceName() + " is in " + vm.getVirtualMachine().getState() + " state. So, the CS VM is not deleted." + logger.debug("Handling release() call back. VM " + vm.getInstanceName() + " is in " + vm.getVirtualMachine().getState() + " state. So, the CS VM is not deleted." + " This could be a case where VM interface is deleted. deallocate() call back should be called later"); } @@ -287,7 +285,7 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru { public void deallocate(Network network, NicProfile nic, VirtualMachineProfile vm) { try { - s_logger.debug("Handling deallocate() call back, which is called when a VM is destroyed or interface is removed, " + "to delete VM Interface with IP " + logger.debug("Handling deallocate() call back, which is called when a VM is destroyed or interface is removed, " + "to delete VM Interface with IP " + nic.getIPv4Address() + " from a VM " + vm.getInstanceName() + " with state " + vm.getVirtualMachine().getState()); DomainVO networksDomain = _domainDao.findById(network.getDomainId()); NicVO nicFrmDd = _nicDao.findById(nic.getId()); @@ -303,13 +301,13 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru { isL3Network(networkOfferingId), vpcUuid, networksDomain.getUuid(), vm.getInstanceName(), vm.getUuid()); DeallocateVmVspAnswer answer = (DeallocateVmVspAnswer)_agentMgr.easySend(nuageVspHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("DeallocateVmNuageVspCommand for VM " + vm.getUuid() + " failed"); + logger.error("DeallocateVmNuageVspCommand for VM " + vm.getUuid() + " failed"); if ((null != answer) && (null != answer.getDetails())) { - s_logger.error(answer.getDetails()); + logger.error(answer.getDetails()); } } } catch (InsufficientVirtualNetworkCapacityException e) { - s_logger.error("Handling deallocate(). VM " + vm.getInstanceName() + " with NIC IP " + nic.getIPv4Address() + logger.error("Handling deallocate(). VM " + vm.getInstanceName() + " with NIC IP " + nic.getIPv4Address() + " is getting destroyed. REST API failed to update the VM state in NuageVsp", e); } super.deallocate(network, nic, vm); @@ -323,7 +321,7 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru { @Override public boolean trash(Network network, NetworkOffering offering) { - s_logger.debug("Handling trash() call back to delete the network " + network.getName() + " with uuid " + network.getUuid() + " from VSP"); + logger.debug("Handling trash() call back to delete the network " + network.getName() + " with uuid " + network.getUuid() + " from VSP"); long domainId = network.getDomainId(); Domain domain = _domainDao.findById(domainId); Long vpcId = network.getVpcId(); @@ -337,13 +335,13 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru { TrashNetworkVspCommand cmd = new TrashNetworkVspCommand(domain.getUuid(), network.getUuid(), isL3Network(offering.getId()), vpcUuid); TrashNetworkVspAnswer answer = (TrashNetworkVspAnswer)_agentMgr.easySend(nuageVspHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("TrashNetworkNuageVspCommand for network " + network.getUuid() + " failed"); + logger.error("TrashNetworkNuageVspCommand for network " + network.getUuid() + " failed"); if ((null != answer) && (null != answer.getDetails())) { - s_logger.error(answer.getDetails()); + logger.error(answer.getDetails()); } } } catch (Exception e) { - s_logger.warn("Failed to clean up network information in Vsp " + e.getMessage()); + logger.warn("Failed to clean up network information in Vsp " + e.getMessage()); } return super.trash(network, offering); @@ -361,13 +359,13 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru { Iterator ipIterator = allIPsInCidr.iterator(); long vip = ipIterator.next(); if (NetUtils.ip2Long(network.getGateway()) == vip) { - s_logger.debug("Gateway of the Network(" + network.getUuid() + ") has the first IP " + NetUtils.long2Ip(vip)); + logger.debug("Gateway of the Network(" + network.getUuid() + ") has the first IP " + NetUtils.long2Ip(vip)); vip = ipIterator.next(); virtualRouterIp = NetUtils.long2Ip(vip); - s_logger.debug("So, reserving the 2nd IP " + virtualRouterIp + " for the Virtual Router IP in Network(" + network.getUuid() + ")"); + logger.debug("So, reserving the 2nd IP " + virtualRouterIp + " for the Virtual Router IP in Network(" + network.getUuid() + ")"); } else { virtualRouterIp = NetUtils.long2Ip(vip); - s_logger.debug("1nd IP is not used as the gateway IP. So, reserving" + virtualRouterIp + " for the Virtual Router IP for " + "Network(" + network.getUuid() + ")"); + logger.debug("1nd IP is not used as the gateway IP. So, reserving" + virtualRouterIp + " for the Virtual Router IP for " + "Network(" + network.getUuid() + ")"); } addressRange.add(NetUtils.long2Ip(ipIterator.next())); addressRange.add(NetUtils.long2Ip((Long)allIPsInCidr.toArray()[allIPsInCidr.size() - 1])); @@ -390,7 +388,7 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru { } } } catch (Exception e) { - s_logger.error("Failed to parse the VM interface Json response from VSP REST API. VM interface json string is " + vmInterfacesDetails, e); + logger.error("Failed to parse the VM interface Json response from VSP REST API. VM interface json string is " + vmInterfacesDetails, e); throw new InsufficientVirtualNetworkCapacityException("Failed to parse the VM interface Json response from VSP REST API. VM interface Json " + "string is " + vmInterfacesDetails + ". So. failed to get IP for the VM from VSP address for network " + network, Network.class, network.getId()); } diff --git a/plugins/network-elements/nuage-vsp/src/com/cloud/network/manager/NuageVspManagerImpl.java b/plugins/network-elements/nuage-vsp/src/com/cloud/network/manager/NuageVspManagerImpl.java index fed970ede5a..09320571b84 100644 --- a/plugins/network-elements/nuage-vsp/src/com/cloud/network/manager/NuageVspManagerImpl.java +++ b/plugins/network-elements/nuage-vsp/src/com/cloud/network/manager/NuageVspManagerImpl.java @@ -39,7 +39,6 @@ import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.network.ExternalNetworkDeviceManager; import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; import com.cloud.api.ApiDBUtils; import com.cloud.api.commands.AddNuageVspDeviceCmd; @@ -82,7 +81,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Local(value = {NuageVspManager.class}) public class NuageVspManagerImpl extends ManagerBase implements NuageVspManager, Configurable { - private static final Logger s_logger = Logger.getLogger(NuageVspManagerImpl.class); private static final int ONE_MINUTE_MULTIPLIER = 60 * 1000; @@ -290,7 +288,7 @@ public class NuageVspManagerImpl extends ManagerBase implements NuageVspManager, try { initNuageScheduledTasks(); } catch (Exception ce) { - s_logger.warn("Failed to load NuageVsp configuration properties. Check if the NuageVsp properties are configured correctly"); + logger.warn("Failed to load NuageVsp configuration properties. Check if the NuageVsp properties are configured correctly"); } return true; } @@ -318,7 +316,7 @@ public class NuageVspManagerImpl extends ManagerBase implements NuageVspManager, scheduler .scheduleWithFixedDelay(new NuageVspSyncTask("ENTERPRISE"), ONE_MINUTE_MULTIPLIER * 15, ONE_MINUTE_MULTIPLIER * syncUpIntervalInMinutes, TimeUnit.MILLISECONDS); } else { - s_logger.warn("NuageVsp configuration for syncWorkers=" + numOfSyncThreads + " syncInterval=" + syncUpIntervalInMinutes + logger.warn("NuageVsp configuration for syncWorkers=" + numOfSyncThreads + " syncInterval=" + syncUpIntervalInMinutes + " could not be read properly. So, check if the properties are configured properly in global properties"); } } diff --git a/plugins/network-elements/nuage-vsp/src/com/cloud/network/resource/NuageVspResource.java b/plugins/network-elements/nuage-vsp/src/com/cloud/network/resource/NuageVspResource.java index 65c8ae96fa3..c4a3ff18eed 100644 --- a/plugins/network-elements/nuage-vsp/src/com/cloud/network/resource/NuageVspResource.java +++ b/plugins/network-elements/nuage-vsp/src/com/cloud/network/resource/NuageVspResource.java @@ -32,7 +32,6 @@ import net.nuage.vsp.acs.client.NuageVspGuruClient; import net.nuage.vsp.acs.client.NuageVspSyncClient; import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; @@ -70,7 +69,6 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.utils.exception.CloudRuntimeException; public class NuageVspResource extends ManagerBase implements ServerResource { - private static final Logger s_logger = Logger.getLogger(NuageVspResource.class); private String _name; private String _guid; @@ -184,7 +182,7 @@ public class NuageVspResource extends ManagerBase implements ServerResource { try { login(); } catch (Exception e) { - s_logger.error("Failed to login to Nuage VSD on " + name + " as user " + cmsUser + " Exception " + e.getMessage()); + logger.error("Failed to login to Nuage VSD on " + name + " as user " + cmsUser + " Exception " + e.getMessage()); throw new CloudRuntimeException("Failed to login to Nuage VSD on " + name + " as user " + cmsUser, e); } @@ -219,7 +217,7 @@ public class NuageVspResource extends ManagerBase implements ServerResource { } catch (Exception e) { _isNuageVspClientLoaded = false; String errorMessage = "Nuage Vsp Plugin client is not yet installed. Please install NuageVsp plugin client to use NuageVsp plugin in Cloudstack. "; - s_logger.warn(errorMessage + e.getMessage()); + logger.warn(errorMessage + e.getMessage()); throw new Exception(errorMessage); } @@ -261,13 +259,13 @@ public class NuageVspResource extends ManagerBase implements ServerResource { @Override public PingCommand getCurrentStatus(long id) { if ((_relativePath == null) || (_relativePath.isEmpty()) || (_cmsUserInfo == null) || (_cmsUserInfo.length == 0)) { - s_logger.error("Failed to ping to Nuage VSD"); + logger.error("Failed to ping to Nuage VSD"); return null; } try { login(); } catch (Exception e) { - s_logger.error("Failed to ping to Nuage VSD on " + _name + " as user " + _cmsUserInfo[1] + " Exception " + e.getMessage()); + logger.error("Failed to ping to Nuage VSD on " + _name + " as user " + _cmsUserInfo[1] + " Exception " + e.getMessage()); return null; } return new PingCommand(Host.Type.L2Networking, id); @@ -306,7 +304,7 @@ public class NuageVspResource extends ManagerBase implements ServerResource { else if (cmd instanceof SyncVspCommand) { return executeRequest((SyncVspCommand)cmd); } - s_logger.debug("Received unsupported command " + cmd.toString()); + logger.debug("Received unsupported command " + cmd.toString()); return Answer.createUnsupportedCommandAnswer(cmd); } diff --git a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightElement.java b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightElement.java index 05bc8317090..ccb0b9d754f 100644 --- a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightElement.java +++ b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightElement.java @@ -28,7 +28,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.network.opendaylight.agent.commands.StartupOpenDaylightControllerCommand; @@ -62,7 +61,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = {NetworkElement.class, ConnectivityProvider.class}) public class OpendaylightElement extends AdapterBase implements ConnectivityProvider, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(OpendaylightElement.class); private static final Map> s_capabilities = setCapabilities(); @Inject diff --git a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightGuestNetworkGuru.java b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightGuestNetworkGuru.java index cbaa4d024b3..35532496c68 100644 --- a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightGuestNetworkGuru.java +++ b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightGuestNetworkGuru.java @@ -24,7 +24,6 @@ import java.util.UUID; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.network.opendaylight.agent.commands.AddHypervisorCommand; import org.apache.cloudstack.network.opendaylight.agent.commands.ConfigureNetworkCommand; @@ -70,7 +69,6 @@ import com.cloud.vm.ReservationContext; import com.cloud.vm.VirtualMachineProfile; public class OpendaylightGuestNetworkGuru extends GuestNetworkGuru { - private static final Logger s_logger = Logger.getLogger(OpendaylightGuestNetworkGuru.class); @Inject protected NetworkOfferingServiceMapDao ntwkOfferingSrvcDao; @@ -96,7 +94,7 @@ public class OpendaylightGuestNetworkGuru extends GuestNetworkGuru { && ntwkOfferingSrvcDao.isProviderForNetworkOffering(offering.getId(), Provider.Opendaylight)) { return true; } else { - s_logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); + logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); return false; } } @@ -106,17 +104,17 @@ public class OpendaylightGuestNetworkGuru extends GuestNetworkGuru { PhysicalNetworkVO physnet = physicalNetworkDao.findById(plan.getPhysicalNetworkId()); DataCenter dc = _dcDao.findById(plan.getDataCenterId()); if (!canHandle(offering, dc.getNetworkType(), physnet)) { - s_logger.debug("Refusing to design this network"); + logger.debug("Refusing to design this network"); return null; } List devices = openDaylightControllerMappingDao.listByPhysicalNetwork(physnet.getId()); if (devices.isEmpty()) { - s_logger.error("No Controller on physical network " + physnet.getName()); + logger.error("No Controller on physical network " + physnet.getName()); return null; } - s_logger.debug("Controller " + devices.get(0).getUuid() + " found on physical network " + physnet.getId()); - s_logger.debug("Physical isolation type is ODL, asking GuestNetworkGuru to design this network"); + logger.debug("Controller " + devices.get(0).getUuid() + " found on physical network " + physnet.getId()); + logger.debug("Physical isolation type is ODL, asking GuestNetworkGuru to design this network"); NetworkVO networkObject = (NetworkVO)super.design(offering, plan, userSpecified, owner); if (networkObject == null) { @@ -161,7 +159,7 @@ public class OpendaylightGuestNetworkGuru extends GuestNetworkGuru { List devices = openDaylightControllerMappingDao.listByPhysicalNetwork(physicalNetworkId); if (devices.isEmpty()) { - s_logger.error("No Controller on physical network " + physicalNetworkId); + logger.error("No Controller on physical network " + physicalNetworkId); return null; } OpenDaylightControllerVO controller = devices.get(0); @@ -170,13 +168,13 @@ public class OpendaylightGuestNetworkGuru extends GuestNetworkGuru { ConfigureNetworkAnswer answer = (ConfigureNetworkAnswer)agentManager.easySend(controller.getHostId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("ConfigureNetworkCommand failed"); + logger.error("ConfigureNetworkCommand failed"); return null; } implemented.setBroadcastUri(BroadcastDomainType.OpenDaylight.toUri(answer.getNetworkUuid())); implemented.setBroadcastDomainType(BroadcastDomainType.OpenDaylight); - s_logger.info("Implemented OK, network linked to = " + implemented.getBroadcastUri().toString()); + logger.info("Implemented OK, network linked to = " + implemented.getBroadcastUri().toString()); return implemented; } @@ -191,7 +189,7 @@ public class OpendaylightGuestNetworkGuru extends GuestNetworkGuru { List devices = openDaylightControllerMappingDao.listByPhysicalNetwork(physicalNetworkId); if (devices.isEmpty()) { - s_logger.error("No Controller on physical network " + physicalNetworkId); + logger.error("No Controller on physical network " + physicalNetworkId); throw new InsufficientVirtualNetworkCapacityException("No OpenDaylight Controller configured for this network", dest.getPod().getId()); } OpenDaylightControllerVO controller = devices.get(0); @@ -199,7 +197,7 @@ public class OpendaylightGuestNetworkGuru extends GuestNetworkGuru { AddHypervisorCommand addCmd = new AddHypervisorCommand(dest.getHost().getUuid(), dest.getHost().getPrivateIpAddress()); AddHypervisorAnswer addAnswer = (AddHypervisorAnswer)agentManager.easySend(controller.getHostId(), addCmd); if (addAnswer == null || !addAnswer.getResult()) { - s_logger.error("Failed to add " + dest.getHost().getName() + " as a node to the controller"); + logger.error("Failed to add " + dest.getHost().getName() + " as a node to the controller"); throw new InsufficientVirtualNetworkCapacityException("Failed to add destination hypervisor to the OpenDaylight Controller", dest.getPod().getId()); } @@ -208,7 +206,7 @@ public class OpendaylightGuestNetworkGuru extends GuestNetworkGuru { ConfigurePortAnswer answer = (ConfigurePortAnswer)agentManager.easySend(controller.getHostId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("ConfigureNetworkCommand failed"); + logger.error("ConfigureNetworkCommand failed"); throw new InsufficientVirtualNetworkCapacityException("Failed to configure the port on the OpenDaylight Controller", dest.getPod().getId()); } @@ -225,7 +223,7 @@ public class OpendaylightGuestNetworkGuru extends GuestNetworkGuru { List devices = openDaylightControllerMappingDao.listByPhysicalNetwork(physicalNetworkId); if (devices.isEmpty()) { - s_logger.error("No Controller on physical network " + physicalNetworkId); + logger.error("No Controller on physical network " + physicalNetworkId); throw new CloudRuntimeException("No OpenDaylight controller on this physical network"); } OpenDaylightControllerVO controller = devices.get(0); @@ -234,7 +232,7 @@ public class OpendaylightGuestNetworkGuru extends GuestNetworkGuru { DestroyPortAnswer answer = (DestroyPortAnswer)agentManager.easySend(controller.getHostId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("DestroyPortCommand failed"); + logger.error("DestroyPortCommand failed"); success = false; } } @@ -246,13 +244,13 @@ public class OpendaylightGuestNetworkGuru extends GuestNetworkGuru { public void shutdown(NetworkProfile profile, NetworkOffering offering) { NetworkVO networkObject = networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.OpenDaylight || networkObject.getBroadcastUri() == null) { - s_logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); + logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); return; } List devices = openDaylightControllerMappingDao.listByPhysicalNetwork(networkObject.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No Controller on physical network " + networkObject.getPhysicalNetworkId()); + logger.error("No Controller on physical network " + networkObject.getPhysicalNetworkId()); return; } OpenDaylightControllerVO controller = devices.get(0); @@ -261,7 +259,7 @@ public class OpendaylightGuestNetworkGuru extends GuestNetworkGuru { DestroyNetworkAnswer answer = (DestroyNetworkAnswer)agentManager.easySend(controller.getHostId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("DestroyNetworkCommand failed"); + logger.error("DestroyNetworkCommand failed"); } super.shutdown(profile, offering); diff --git a/plugins/network-elements/ovs/src/com/cloud/network/element/OvsElement.java b/plugins/network-elements/ovs/src/com/cloud/network/element/OvsElement.java index 5ce4d935389..66050eaf04e 100644 --- a/plugins/network-elements/ovs/src/com/cloud/network/element/OvsElement.java +++ b/plugins/network-elements/ovs/src/com/cloud/network/element/OvsElement.java @@ -28,7 +28,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.network.topology.NetworkTopology; import org.apache.cloudstack.network.topology.NetworkTopologyContext; -import org.apache.log4j.Logger; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupOvsCommand; @@ -107,7 +106,6 @@ StaticNatServiceProvider, IpDeployer { @Inject NetworkTopologyContext _networkTopologyContext; - private static final Logger s_logger = Logger.getLogger(OvsElement.class); private static final Map> capabilities = setCapabilities(); @Override @@ -121,21 +119,21 @@ StaticNatServiceProvider, IpDeployer { } protected boolean canHandle(final Network network, final Service service) { - s_logger.debug("Checking if OvsElement can handle service " + logger.debug("Checking if OvsElement can handle service " + service.getName() + " on network " + network.getDisplayText()); if (network.getBroadcastDomainType() != BroadcastDomainType.Vswitch) { return false; } if (!_networkModel.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.debug("OvsElement is not a provider for network " + logger.debug("OvsElement is not a provider for network " + network.getDisplayText()); return false; } if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, Network.Provider.Ovs)) { - s_logger.debug("OvsElement can't provide the " + service.getName() + logger.debug("OvsElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); return false; } @@ -156,7 +154,7 @@ StaticNatServiceProvider, IpDeployer { final DeployDestination dest, final ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - s_logger.debug("entering OvsElement implement function for network " + logger.debug("entering OvsElement implement function for network " + network.getDisplayText() + " (state " + network.getState() + ")"); @@ -254,7 +252,7 @@ StaticNatServiceProvider, IpDeployer { @Override public boolean verifyServicesCombination(final Set services) { if (!services.contains(Service.Connectivity)) { - s_logger.warn("Unable to provide services without Connectivity service enabled for this element"); + logger.warn("Unable to provide services without Connectivity service enabled for this element"); return false; } @@ -443,7 +441,7 @@ StaticNatServiceProvider, IpDeployer { List routers = _routerDao.listByNetworkAndRole( network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router element doesn't need to associate ip addresses on the backend; virtual " + logger.debug("Virtual router element doesn't need to associate ip addresses on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; @@ -467,7 +465,7 @@ StaticNatServiceProvider, IpDeployer { List routers = _routerDao.listByNetworkAndRole( network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Ovs element doesn't need to apply static nat on the backend; virtual " + logger.debug("Ovs element doesn't need to apply static nat on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -487,7 +485,7 @@ StaticNatServiceProvider, IpDeployer { List routers = _routerDao.listByNetworkAndRole( network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Ovs element doesn't need to apply firewall rules on the backend; virtual " + logger.debug("Ovs element doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -509,7 +507,7 @@ StaticNatServiceProvider, IpDeployer { List routers = _routerDao.listByNetworkAndRole( network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; @@ -559,7 +557,7 @@ StaticNatServiceProvider, IpDeployer { if (schemeCaps != null) { for (LoadBalancingRule rule : rules) { if (!schemeCaps.contains(rule.getScheme().toString())) { - s_logger.debug("Scheme " + rules.get(0).getScheme() + logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + getName()); return false; diff --git a/plugins/network-elements/ovs/src/com/cloud/network/guru/OvsGuestNetworkGuru.java b/plugins/network-elements/ovs/src/com/cloud/network/guru/OvsGuestNetworkGuru.java index be49757fc5f..ca5b0681bac 100644 --- a/plugins/network-elements/ovs/src/com/cloud/network/guru/OvsGuestNetworkGuru.java +++ b/plugins/network-elements/ovs/src/com/cloud/network/guru/OvsGuestNetworkGuru.java @@ -21,7 +21,6 @@ import com.cloud.network.vpc.dao.VpcDao; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.context.CallContext; @@ -56,8 +55,6 @@ import com.cloud.vm.VirtualMachineProfile; @Component @Local(value = NetworkGuru.class) public class OvsGuestNetworkGuru extends GuestNetworkGuru { - private static final Logger s_logger = Logger - .getLogger(OvsGuestNetworkGuru.class); @Inject OvsTunnelManager _ovsTunnelMgr; @@ -85,7 +82,7 @@ public class OvsGuestNetworkGuru extends GuestNetworkGuru { offering.getId(), Service.Connectivity)) { return true; } else { - s_logger.trace("We only take care of Guest networks of type " + logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); return false; @@ -100,7 +97,7 @@ public class OvsGuestNetworkGuru extends GuestNetworkGuru { .getPhysicalNetworkId()); DataCenter dc = _dcDao.findById(plan.getDataCenterId()); if (!canHandle(offering, dc.getNetworkType(), physnet)) { - s_logger.debug("Refusing to design this network"); + logger.debug("Refusing to design this network"); return null; } NetworkVO config = (NetworkVO)super.design(offering, plan, @@ -135,7 +132,7 @@ public class OvsGuestNetworkGuru extends GuestNetworkGuru { .findById(physicalNetworkId); if (!canHandle(offering, nwType, physnet)) { - s_logger.debug("Refusing to design this network"); + logger.debug("Refusing to design this network"); return null; } NetworkVO implemented = (NetworkVO)super.implement(network, offering, @@ -184,13 +181,13 @@ public class OvsGuestNetworkGuru extends GuestNetworkGuru { NetworkVO networkObject = _networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Vswitch || networkObject.getBroadcastUri() == null) { - s_logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); return; } if (profile.getBroadcastDomainType() == BroadcastDomainType.Vswitch ) { - s_logger.debug("Releasing vnet for the network id=" + profile.getId()); + logger.debug("Releasing vnet for the network id=" + profile.getId()); _dcDao.releaseVnet(BroadcastDomainType.getValue(profile.getBroadcastUri()), profile.getDataCenterId(), profile.getPhysicalNetworkId(), profile.getAccountId(), profile.getReservationId()); } diff --git a/plugins/network-elements/ovs/src/com/cloud/network/ovs/OvsTunnelManagerImpl.java b/plugins/network-elements/ovs/src/com/cloud/network/ovs/OvsTunnelManagerImpl.java index 9024abb1ad8..e9f066d742f 100644 --- a/plugins/network-elements/ovs/src/com/cloud/network/ovs/OvsTunnelManagerImpl.java +++ b/plugins/network-elements/ovs/src/com/cloud/network/ovs/OvsTunnelManagerImpl.java @@ -30,7 +30,6 @@ import javax.persistence.EntityExistsException; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.MessageSubscriber; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -94,7 +93,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Component @Local(value = {OvsTunnelManager.class}) public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManager, StateListener { - public static final Logger s_logger = Logger.getLogger(OvsTunnelManagerImpl.class.getName()); // boolean _isEnabled; ScheduledExecutorService _executorPool; @@ -161,13 +159,13 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage OvsTunnelInterfaceVO lock = _tunnelInterfaceDao .acquireInLockTable(Long.valueOf(1)); if (lock == null) { - s_logger.warn("Cannot lock table ovs_tunnel_account"); + logger.warn("Cannot lock table ovs_tunnel_account"); return null; } _tunnelInterfaceDao.persist(ti); _tunnelInterfaceDao.releaseFromLockTable(lock.getId()); } catch (EntityExistsException e) { - s_logger.debug("A record for the interface for network " + label + logger.debug("A record for the interface for network " + label + " on host id " + hostId + " already exists"); } return ti; @@ -183,7 +181,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage } } // Fetch interface failed! - s_logger.warn("Unable to fetch the IP address for the GRE tunnel endpoint" + logger.warn("Unable to fetch the IP address for the GRE tunnel endpoint" + ans.getDetails()); return null; } @@ -195,13 +193,13 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage ta = new OvsTunnelNetworkVO(from, to, key, networkId); OvsTunnelNetworkVO lock = _tunnelNetworkDao.acquireInLockTable(Long.valueOf(1)); if (lock == null) { - s_logger.warn("Cannot lock table ovs_tunnel_account"); + logger.warn("Cannot lock table ovs_tunnel_account"); return null; } _tunnelNetworkDao.persist(ta); _tunnelNetworkDao.releaseFromLockTable(lock.getId()); } catch (EntityExistsException e) { - s_logger.debug("A record for the tunnel from " + from + " to " + to + " already exists"); + logger.debug("A record for the tunnel from " + from + " to " + to + " already exists"); } return ta; } @@ -223,12 +221,12 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage } if (!r.getResult()) { tunnel.setState(OvsTunnel.State.Failed.name()); - s_logger.warn("Create GRE tunnel from " + from + " to " + to + " failed due to " + r.getDetails() + logger.warn("Create GRE tunnel from " + from + " to " + to + " failed due to " + r.getDetails() + s); } else { tunnel.setState(OvsTunnel.State.Established.name()); tunnel.setPortName(r.getInPortName()); - s_logger.info("Create GRE tunnel from " + from + " to " + to + " succeeded." + r.getDetails() + s); + logger.info("Create GRE tunnel from " + from + " to " + to + " succeeded." + r.getDetails() + s); } _tunnelNetworkDao.update(tunnel.getId(), tunnel); } @@ -273,7 +271,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage //for network with label on target host Commands fetchIfaceCmds = new Commands(new OvsFetchInterfaceCommand(physNetLabel)); - s_logger.debug("Ask host " + host.getId() + + logger.debug("Ask host " + host.getId() + " to retrieve interface for phy net with label:" + physNetLabel); Answer[] fetchIfaceAnswers = _agentMgr.send(host.getId(), fetchIfaceCmds); @@ -299,7 +297,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage return key; } catch (NumberFormatException e) { - s_logger.debug("Well well, how did '" + key + logger.debug("Well well, how did '" + key + "' end up in the broadcast URI for the network?"); throw new CloudRuntimeException(String.format( "Invalid GRE key parsed from" @@ -311,7 +309,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage @DB protected void checkAndCreateTunnel(Network nw, Host host) { - s_logger.debug("Creating tunnels with OVS tunnel manager"); + logger.debug("Creating tunnels with OVS tunnel manager"); long hostId = host.getId(); int key = getGreKey(nw); @@ -326,7 +324,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage OvsTunnelNetworkVO ta = _tunnelNetworkDao.getByFromToNetwork(hostId, rh.longValue(), nw.getId()); // Try and create the tunnel even if a previous attempt failed if (ta == null || ta.getState().equals(OvsTunnel.State.Failed.name())) { - s_logger.debug("Attempting to create tunnel from:" + hostId + " to:" + rh.longValue()); + logger.debug("Attempting to create tunnel from:" + hostId + " to:" + rh.longValue()); if (ta == null) { createTunnelRecord(hostId, rh.longValue(), nw.getId(), key); } @@ -339,7 +337,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage hostId, nw.getId()); // Try and create the tunnel even if a previous attempt failed if (ta == null || ta.getState().equals(OvsTunnel.State.Failed.name())) { - s_logger.debug("Attempting to create tunnel from:" + + logger.debug("Attempting to create tunnel from:" + rh.longValue() + " to:" + hostId); if (ta == null) { createTunnelRecord(rh.longValue(), hostId, @@ -367,8 +365,8 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage Commands cmds = new Commands( new OvsCreateTunnelCommand(otherIp, key, Long.valueOf(hostId), i, nw.getId(), myIp, bridgeName, nw.getUuid())); - s_logger.debug("Attempting to create tunnel from:" + hostId + " to:" + i + " for the network " + nw.getId()); - s_logger.debug("Ask host " + hostId + logger.debug("Attempting to create tunnel from:" + hostId + " to:" + i + " for the network " + nw.getId()); + logger.debug("Ask host " + hostId + " to create gre tunnel to " + i); Answer[] answers = _agentMgr.send(hostId, cmds); handleCreateTunnelAnswer(answers); @@ -380,7 +378,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage String otherIp = getGreEndpointIP(rHost, nw); Commands cmds = new Commands(new OvsCreateTunnelCommand(myIp, key, i, Long.valueOf(hostId), nw.getId(), otherIp, bridgeName, nw.getUuid())); - s_logger.debug("Ask host " + i + " to create gre tunnel to " + logger.debug("Ask host " + i + " to create gre tunnel to " + hostId); Answer[] answers = _agentMgr.send(i, cmds); handleCreateTunnelAnswer(answers); @@ -391,13 +389,13 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage // anyway. This will ensure VIF rules will be triggered if (noHost) { Commands cmds = new Commands(new OvsSetupBridgeCommand(bridgeName, hostId, nw.getId())); - s_logger.debug("Ask host " + hostId + " to configure bridge for network:" + nw.getId()); + logger.debug("Ask host " + hostId + " to configure bridge for network:" + nw.getId()); Answer[] answers = _agentMgr.send(hostId, cmds); handleSetupBridgeAnswer(answers); } } catch (GreTunnelException | OperationTimedoutException | AgentUnavailableException e) { // I really thing we should do a better handling of these exceptions - s_logger.warn("Ovs Tunnel network created tunnel failed", e); + logger.warn("Ovs Tunnel network created tunnel failed", e); } } @@ -427,7 +425,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage if (ans.getResult()) { OvsTunnelNetworkVO lock = _tunnelNetworkDao.acquireInLockTable(Long.valueOf(1)); if (lock == null) { - s_logger.warn(String.format("failed to lock" + + logger.warn(String.format("failed to lock" + "ovs_tunnel_account, remove record of " + "tunnel(from=%1$s, to=%2$s account=%3$s) failed", from, to, networkId)); @@ -437,11 +435,11 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage _tunnelNetworkDao.removeByFromToNetwork(from, to, networkId); _tunnelNetworkDao.releaseFromLockTable(lock.getId()); - s_logger.debug(String.format("Destroy tunnel(account:%1$s," + + logger.debug(String.format("Destroy tunnel(account:%1$s," + "from:%2$s, to:%3$s) successful", networkId, from, to)); } else { - s_logger.debug(String.format("Destroy tunnel(account:%1$s," + "from:%2$s, to:%3$s) failed", networkId, from, to)); + logger.debug(String.format("Destroy tunnel(account:%1$s," + "from:%2$s, to:%3$s) failed", networkId, from, to)); } } @@ -451,24 +449,24 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage if (ans.getResult()) { OvsTunnelNetworkVO lock = _tunnelNetworkDao.acquireInLockTable(Long.valueOf(1)); if (lock == null) { - s_logger.warn("failed to lock ovs_tunnel_network," + "remove record"); + logger.warn("failed to lock ovs_tunnel_network," + "remove record"); return; } _tunnelNetworkDao.removeByFromNetwork(hostId, networkId); _tunnelNetworkDao.releaseFromLockTable(lock.getId()); - s_logger.debug(String.format("Destroy bridge for" + + logger.debug(String.format("Destroy bridge for" + "network %1$s successful", networkId)); } else { - s_logger.debug(String.format("Destroy bridge for" + + logger.debug(String.format("Destroy bridge for" + "network %1$s failed", networkId)); } } private void handleSetupBridgeAnswer(Answer[] answers) { //TODO: Add some error management here? - s_logger.debug("Placeholder for something more meanginful to come"); + logger.debug("Placeholder for something more meanginful to come"); } @Override @@ -495,7 +493,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage if (p.getState().equals(OvsTunnel.State.Established.name())) { Command cmd= new OvsDestroyTunnelCommand(p.getNetworkId(), bridgeName, p.getPortName()); - s_logger.debug("Destroying tunnel to " + host.getId() + + logger.debug("Destroying tunnel to " + host.getId() + " from " + p.getFrom()); Answer ans = _agentMgr.send(p.getFrom(), cmd); handleDestroyTunnelAnswer(ans, p.getFrom(), p.getTo(), p.getNetworkId()); @@ -505,11 +503,11 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage Command cmd = new OvsDestroyBridgeCommand(nw.getId(), generateBridgeNameForVpc(nw.getVpcId()), host.getId()); - s_logger.debug("Destroying bridge for network " + nw.getId() + " on host:" + host.getId()); + logger.debug("Destroying bridge for network " + nw.getId() + " on host:" + host.getId()); Answer ans = _agentMgr.send(host.getId(), cmd); handleDestroyBridgeAnswer(ans, host.getId(), nw.getId()); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "exception while removing host from networks: " + e.getLocalizedMessage()); } } else { @@ -523,7 +521,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage int key = getGreKey(nw); String bridgeName = generateBridgeName(nw, key); Command cmd = new OvsDestroyBridgeCommand(nw.getId(), bridgeName, host.getId()); - s_logger.debug("Destroying bridge for network " + nw.getId() + " on host:" + host.getId()); + logger.debug("Destroying bridge for network " + nw.getId() + " on host:" + host.getId()); Answer ans = _agentMgr.send(host.getId(), cmd); handleDestroyBridgeAnswer(ans, host.getId(), nw.getId()); @@ -536,7 +534,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage if (p.getState().equals(OvsTunnel.State.Established.name())) { cmd = new OvsDestroyTunnelCommand(p.getNetworkId(), bridgeName, p.getPortName()); - s_logger.debug("Destroying tunnel to " + host.getId() + + logger.debug("Destroying tunnel to " + host.getId() + " from " + p.getFrom()); ans = _agentMgr.send(p.getFrom(), cmd); handleDestroyTunnelAnswer(ans, p.getFrom(), @@ -544,7 +542,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage } } } catch (Exception e) { - s_logger.warn("Destroy tunnel failed", e); + logger.warn("Destroy tunnel failed", e); } } } @@ -573,12 +571,12 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage // since this is the first VM from the VPC being launched on the host, first setup the bridge try { Commands cmds = new Commands(new OvsSetupBridgeCommand(bridgeName, hostId, null)); - s_logger.debug("Ask host " + hostId + " to create bridge for vpc " + vpcId + " and configure the " + logger.debug("Ask host " + hostId + " to create bridge for vpc " + vpcId + " and configure the " + " bridge for distributed routing."); Answer[] answers = _agentMgr.send(hostId, cmds); handleSetupBridgeAnswer(answers); } catch (OperationTimedoutException | AgentUnavailableException e) { - s_logger.warn("Ovs Tunnel network created tunnel failed", e); + logger.warn("Ovs Tunnel network created tunnel failed", e); } // now that bridge is setup, populate network acl's before the VM gets created @@ -586,7 +584,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage cmd.setSequenceNumber(getNextRoutingPolicyUpdateSequenceNumber(vpcId)); if (!sendVpcRoutingPolicyChangeUpdate(cmd, hostId, bridgeName)) { - s_logger.debug("Failed to send VPC routing policy change update to host : " + hostId + + logger.debug("Failed to send VPC routing policy change update to host : " + hostId + ". But moving on with sending the updates to the rest of the hosts."); } } @@ -610,7 +608,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage tunnelRecord = _tunnelNetworkDao.getByFromToNetwork(hostId, rh.longValue(), vpcNetwork.getId()); // Try and create the tunnel if does not exit or previous attempt failed if (tunnelRecord == null || tunnelRecord.getState().equals(OvsTunnel.State.Failed.name())) { - s_logger.debug("Attempting to create tunnel from:" + hostId + " to:" + rh.longValue()); + logger.debug("Attempting to create tunnel from:" + hostId + " to:" + rh.longValue()); if (tunnelRecord == null) { createTunnelRecord(hostId, rh.longValue(), vpcNetwork.getId(), key); } @@ -621,7 +619,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage tunnelRecord = _tunnelNetworkDao.getByFromToNetwork(rh.longValue(), hostId, vpcNetwork.getId()); // Try and create the tunnel if does not exit or previous attempt failed if (tunnelRecord == null || tunnelRecord.getState().equals(OvsTunnel.State.Failed.name())) { - s_logger.debug("Attempting to create tunnel from:" + rh.longValue() + " to:" + hostId); + logger.debug("Attempting to create tunnel from:" + rh.longValue() + " to:" + hostId); if (tunnelRecord == null) { createTunnelRecord(rh.longValue(), hostId, vpcNetwork.getId(), key); } @@ -647,9 +645,9 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage + "Failure is on host:" + rHost.getId()); Commands cmds = new Commands( new OvsCreateTunnelCommand(otherIp, key, Long.valueOf(hostId), i, vpcNetwork.getId(), myIp, bridgeName, vpcNetwork.getUuid())); - s_logger.debug("Attempting to create tunnel from:" + hostId + " to:" + i + " for the network " + logger.debug("Attempting to create tunnel from:" + hostId + " to:" + i + " for the network " + vpcNetwork.getId()); - s_logger.debug("Ask host " + hostId + logger.debug("Ask host " + hostId + " to create gre tunnel to " + i); Answer[] answers = _agentMgr.send(hostId, cmds); handleCreateTunnelAnswer(answers); @@ -661,14 +659,14 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage Commands cmds = new Commands(new OvsCreateTunnelCommand(myIp, key, i, Long.valueOf(hostId), vpcNetwork.getId(), otherIp, bridgeName, vpcNetwork.getUuid())); - s_logger.debug("Ask host " + i + " to create gre tunnel to " + logger.debug("Ask host " + i + " to create gre tunnel to " + hostId); Answer[] answers = _agentMgr.send(i, cmds); handleCreateTunnelAnswer(answers); } } catch (GreTunnelException | OperationTimedoutException | AgentUnavailableException e) { // I really thing we should do a better handling of these exceptions - s_logger.warn("Ovs Tunnel network created tunnel failed", e); + logger.warn("Ovs Tunnel network created tunnel failed", e); } } } @@ -725,7 +723,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage // send topology change update to VPC spanned hosts for (Long id: vpcSpannedHostIds) { if (!sendVpcTopologyChangeUpdate(topologyConfigCommand, id, bridgeName)) { - s_logger.debug("Failed to send VPC topology change update to host : " + id + ". Moving on " + + logger.debug("Failed to send VPC topology change update to host : " + id + ". Moving on " + "with rest of the host update."); } } @@ -734,19 +732,19 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage public boolean sendVpcTopologyChangeUpdate(OvsVpcPhysicalTopologyConfigCommand updateCmd, long hostId, String bridgeName) { try { - s_logger.debug("Sending VPC topology change update to the host " + hostId); + logger.debug("Sending VPC topology change update to the host " + hostId); updateCmd.setHostId(hostId); updateCmd.setBridgeName(bridgeName); Answer ans = _agentMgr.send(hostId, updateCmd); if (ans.getResult()) { - s_logger.debug("Successfully updated the host " + hostId + " with latest VPC topology." ); + logger.debug("Successfully updated the host " + hostId + " with latest VPC topology." ); return true; } else { - s_logger.debug("Failed to update the host " + hostId + " with latest VPC topology." ); + logger.debug("Failed to update the host " + hostId + " with latest VPC topology." ); return false; } } catch (Exception e) { - s_logger.debug("Failed to updated the host " + hostId + " with latest VPC topology.", e ); + logger.debug("Failed to updated the host " + hostId + " with latest VPC topology.", e ); return false; } } @@ -770,7 +768,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage try { remoteIp = getGreEndpointIP(hostDetails, network); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "error getting GRE endpoint: " + e.getLocalizedMessage()); } } @@ -838,13 +836,13 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage List vpcSpannedHostIds = _ovsNetworkToplogyGuru.getVpcSpannedHosts(vpcId); for (Long id: vpcSpannedHostIds) { if (!sendVpcRoutingPolicyChangeUpdate(cmd, id, bridgeName)) { - s_logger.debug("Failed to send VPC routing policy change update to host : " + id + + logger.debug("Failed to send VPC routing policy change update to host : " + id + ". But moving on with sending the updates to the rest of the hosts."); } } } } catch (Exception e) { - s_logger.debug("Failed to send VPC routing policy change updates all hosts in vpc", e); + logger.debug("Failed to send VPC routing policy change updates all hosts in vpc", e); } } } @@ -895,19 +893,19 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage private boolean sendVpcRoutingPolicyChangeUpdate(OvsVpcRoutingPolicyConfigCommand updateCmd, long hostId, String bridgeName) { try { - s_logger.debug("Sending VPC routing policies change update to the host " + hostId); + logger.debug("Sending VPC routing policies change update to the host " + hostId); updateCmd.setHostId(hostId); updateCmd.setBridgeName(bridgeName); Answer ans = _agentMgr.send(hostId, updateCmd); if (ans.getResult()) { - s_logger.debug("Successfully updated the host " + hostId + " with latest VPC routing policies." ); + logger.debug("Successfully updated the host " + hostId + " with latest VPC routing policies." ); return true; } else { - s_logger.debug("Failed to update the host " + hostId + " with latest routing policies." ); + logger.debug("Failed to update the host " + hostId + " with latest routing policies." ); return false; } } catch (Exception e) { - s_logger.debug("Failed to updated the host " + hostId + " with latest routing policies due to" , e ); + logger.debug("Failed to updated the host " + hostId + " with latest routing policies due to" , e ); return false; } } diff --git a/plugins/network-elements/ovs/src/com/cloud/network/ovs/dao/VpcDistributedRouterSeqNoDaoImpl.java b/plugins/network-elements/ovs/src/com/cloud/network/ovs/dao/VpcDistributedRouterSeqNoDaoImpl.java index 88fa9c1da4e..cdd031e7834 100644 --- a/plugins/network-elements/ovs/src/com/cloud/network/ovs/dao/VpcDistributedRouterSeqNoDaoImpl.java +++ b/plugins/network-elements/ovs/src/com/cloud/network/ovs/dao/VpcDistributedRouterSeqNoDaoImpl.java @@ -18,7 +18,6 @@ package com.cloud.network.ovs.dao; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; @@ -28,7 +27,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {VpcDistributedRouterSeqNoDao.class}) public class VpcDistributedRouterSeqNoDaoImpl extends GenericDaoBase implements VpcDistributedRouterSeqNoDao { - protected static final Logger s_logger = Logger.getLogger(VpcDistributedRouterSeqNoDaoImpl.class); private SearchBuilder VpcIdSearch; protected VpcDistributedRouterSeqNoDaoImpl() { diff --git a/plugins/network-elements/palo-alto/src/com/cloud/network/element/PaloAltoExternalFirewallElement.java b/plugins/network-elements/palo-alto/src/com/cloud/network/element/PaloAltoExternalFirewallElement.java index cfb64b8bde4..1ed94b08095 100644 --- a/plugins/network-elements/palo-alto/src/com/cloud/network/element/PaloAltoExternalFirewallElement.java +++ b/plugins/network-elements/palo-alto/src/com/cloud/network/element/PaloAltoExternalFirewallElement.java @@ -25,7 +25,6 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; @@ -87,7 +86,6 @@ import com.cloud.vm.VirtualMachineProfile; public class PaloAltoExternalFirewallElement extends ExternalFirewallDeviceManagerImpl implements SourceNatServiceProvider, FirewallServiceProvider, PortForwardingServiceProvider, IpDeployer, PaloAltoFirewallElementService, StaticNatServiceProvider { - private static final Logger s_logger = Logger.getLogger(PaloAltoExternalFirewallElement.class); private static final Map> capabilities = setCapabilities(); @@ -123,18 +121,18 @@ public class PaloAltoExternalFirewallElement extends ExternalFirewallDeviceManag private boolean canHandle(Network network, Service service) { DataCenter zone = _entityMgr.findById(DataCenter.class, network.getDataCenterId()); if (zone.getNetworkType() == NetworkType.Advanced && network.getGuestType() != Network.GuestType.Isolated) { - s_logger.trace("Element " + getProvider().getName() + "is not handling network type = " + network.getGuestType()); + logger.trace("Element " + getProvider().getName() + "is not handling network type = " + network.getGuestType()); return false; } if (service == null) { if (!_networkManager.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.trace("Element " + getProvider().getName() + " is not a provider for the network " + network); + logger.trace("Element " + getProvider().getName() + " is not a provider for the network " + network); return false; } } else { if (!_networkManager.isProviderSupportServiceInNetwork(network.getId(), service, getProvider())) { - s_logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + " in the network " + network); + logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + " in the network " + network); return false; } } @@ -149,7 +147,7 @@ public class PaloAltoExternalFirewallElement extends ExternalFirewallDeviceManag // don't have to implement network is Basic zone if (zone.getNetworkType() == NetworkType.Basic) { - s_logger.debug("Not handling network implement in zone of type " + NetworkType.Basic); + logger.debug("Not handling network implement in zone of type " + NetworkType.Basic); return false; } @@ -162,7 +160,7 @@ public class PaloAltoExternalFirewallElement extends ExternalFirewallDeviceManag } catch (InsufficientCapacityException capacityException) { // TODO: handle out of capacity exception in more gracefule manner when multiple providers are present for // the network - s_logger.error("Fail to implement the Palo Alto for network " + network, capacityException); + logger.error("Fail to implement the Palo Alto for network " + network, capacityException); return false; } } @@ -184,7 +182,7 @@ public class PaloAltoExternalFirewallElement extends ExternalFirewallDeviceManag // don't have to implement network is Basic zone if (zone.getNetworkType() == NetworkType.Basic) { - s_logger.debug("Not handling network shutdown in zone of type " + NetworkType.Basic); + logger.debug("Not handling network shutdown in zone of type " + NetworkType.Basic); return false; } @@ -432,7 +430,7 @@ public class PaloAltoExternalFirewallElement extends ExternalFirewallDeviceManag @Override public boolean verifyServicesCombination(Set services) { if (!services.contains(Service.Firewall)) { - s_logger.warn("Palo Alto must be used as Firewall Service Provider in the network"); + logger.warn("Palo Alto must be used as Firewall Service Provider in the network"); return false; } return true; diff --git a/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/dao/SspUuidDaoImpl.java b/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/dao/SspUuidDaoImpl.java index d088e7fad0a..2d6fe715107 100644 --- a/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/dao/SspUuidDaoImpl.java +++ b/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/dao/SspUuidDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import com.cloud.network.Network; import com.cloud.utils.db.GenericDaoBase; @@ -32,7 +31,6 @@ import com.cloud.vm.NicProfile; @Local(SspUuidDao.class) public class SspUuidDaoImpl extends GenericDaoBase implements SspUuidDao { - private static final Logger s_logger = Logger.getLogger(SspUuidDaoImpl.class); protected final SearchBuilder native2uuid; protected final SearchBuilder uuid2native; diff --git a/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/element/SspElement.java b/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/element/SspElement.java index b41be40b4e9..f8f303c5e9d 100644 --- a/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/element/SspElement.java +++ b/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/element/SspElement.java @@ -30,7 +30,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.commands.AddSspCmd; import org.apache.cloudstack.api.commands.DeleteSspCmd; @@ -90,7 +89,6 @@ import com.cloud.vm.dao.NicDao; */ @Local(value = {NetworkElement.class, SspManager.class}) public class SspElement extends AdapterBase implements ConnectivityProvider, SspManager, SspService, NetworkMigrationResponder { - private static final Logger s_logger = Logger.getLogger(SspElement.class); public static final String s_SSP_NAME = "StratosphereSsp"; private static final Provider s_ssp_provider = new Provider(s_SSP_NAME, false); @@ -183,7 +181,7 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp if (fetchSspClients(physicalNetwork.getId(), physicalNetwork.getDataCenterId(), false).size() > 0) { return true; } - s_logger.warn("Ssp api endpoint not found. " + physicalNetwork.toString()); + logger.warn("Ssp api endpoint not found. " + physicalNetwork.toString()); return false; } @@ -197,9 +195,9 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp if (fetchSspClients(physicalNetwork.getId(), physicalNetwork.getDataCenterId(), true).size() > 0) { return true; } - s_logger.warn("enabled Ssp api endpoint not found. " + physicalNetwork.toString()); + logger.warn("enabled Ssp api endpoint not found. " + physicalNetwork.toString()); } else { - s_logger.warn("PhysicalNetwork is NULL."); + logger.warn("PhysicalNetwork is NULL."); } return false; } @@ -207,7 +205,7 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp private boolean canHandle(Network network) { if (canHandle(_physicalNetworkDao.findById(network.getPhysicalNetworkId()))) { if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), Service.Connectivity, getProvider())) { - s_logger.info("SSP is implicitly active for " + network); + logger.info("SSP is implicitly active for " + network); } return true; } @@ -234,7 +232,7 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp _sspCredentialDao.persist(credential); } else { if (cmd.getUsername() != null || cmd.getPassword() != null) { - s_logger.warn("Tenant credential already configured for zone:" + zoneId); + logger.warn("Tenant credential already configured for zone:" + zoneId); } } @@ -249,7 +247,7 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp _sspTenantDao.persist(tenant); } else { if (cmd.getTenantUuid() != null) { - s_logger.warn("Tenant uuid already configured for zone:" + zoneId); + logger.warn("Tenant uuid already configured for zone:" + zoneId); } } @@ -269,7 +267,7 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp _hostDao.loadDetails(host); if ("v1Api".equals(host.getDetail("sspHost"))) { if (normalizedUrl.equals(host.getDetail("url"))) { - s_logger.warn("Ssp host already registered " + normalizedUrl); + logger.warn("Ssp host already registered " + normalizedUrl); return host; } } @@ -292,14 +290,14 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp @Override public boolean deleteSspHost(DeleteSspCmd cmd) { - s_logger.info("deleteStratosphereSsp"); + logger.info("deleteStratosphereSsp"); return _hostDao.remove(cmd.getHostId()); } @Override public boolean createNetwork(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) { if (_sspUuidDao.findUuidByNetwork(network) != null) { - s_logger.info("Network already has ssp TenantNetwork uuid :" + network.toString()); + logger.info("Network already has ssp TenantNetwork uuid :" + network.toString()); return true; } if (!canHandle(network)) { @@ -325,10 +323,10 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp processed = true; } if (processed) { - s_logger.error("Could not allocate an uuid for network " + network.toString()); + logger.error("Could not allocate an uuid for network " + network.toString()); return false; } else { - s_logger.error("Skipping #createNetwork() for " + network.toString()); + logger.error("Skipping #createNetwork() for " + network.toString()); return true; } } @@ -346,10 +344,10 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp } } if (!processed) { - s_logger.error("Ssp api tenant network deletion failed " + network.toString()); + logger.error("Ssp api tenant network deletion failed " + network.toString()); } } else { - s_logger.debug("Silently skipping #deleteNetwork() for " + network.toString()); + logger.debug("Silently skipping #deleteNetwork() for " + network.toString()); } return true; } @@ -359,7 +357,7 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp public boolean createNicEnv(Network network, NicProfile nic, DeployDestination dest, ReservationContext context) { String tenantNetworkUuid = _sspUuidDao.findUuidByNetwork(network); if (tenantNetworkUuid == null) { - s_logger.debug("Skipping #createNicEnv() for nic on " + network.toString()); + logger.debug("Skipping #createNicEnv() for nic on " + network.toString()); return true; } @@ -367,7 +365,7 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp List tenantPortUuidVos = _sspUuidDao.listUUidVoByNicProfile(nic); for (SspUuidVO tenantPortUuidVo : tenantPortUuidVos) { if (reservationId.equals(tenantPortUuidVo.getReservationId())) { - s_logger.info("Skipping because reservation found " + reservationId); + logger.info("Skipping because reservation found " + reservationId); return true; } } @@ -389,7 +387,7 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp } } if (tenantPortUuid == null) { - s_logger.debug("#createNicEnv() failed for nic on " + network.toString()); + logger.debug("#createNicEnv() failed for nic on " + network.toString()); return false; } @@ -403,14 +401,14 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp return true; } } - s_logger.error("Updating vif failed " + nic.toString()); + logger.error("Updating vif failed " + nic.toString()); return false; } @Override public boolean deleteNicEnv(Network network, NicProfile nic, ReservationContext context) { if (context == null) { - s_logger.error("ReservationContext was null for " + nic + " " + network); + logger.error("ReservationContext was null for " + nic + " " + network); return false; } String reservationId = context.getReservationId(); @@ -437,7 +435,7 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp } } if (!processed) { - s_logger.warn("Ssp api nic detach failed " + nic.toString()); + logger.warn("Ssp api nic detach failed " + nic.toString()); } processed = false; for (SspClient client : fetchSspClients(network.getPhysicalNetworkId(), network.getDataCenterId(), true)) { @@ -448,7 +446,7 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp } } if (!processed) { - s_logger.warn("Ssp api tenant port deletion failed " + nic.toString()); + logger.warn("Ssp api tenant port deletion failed " + nic.toString()); } _sspUuidDao.removeUuid(tenantPortUuid); } @@ -470,7 +468,7 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - s_logger.info("implement"); + logger.info("implement"); return createNetwork(network, offering, dest, context); } @@ -483,7 +481,7 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp */ @Override public boolean shutdown(Network network, ReservationContext context, boolean cleanup) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.trace("shutdown"); + logger.trace("shutdown"); return deleteNetwork(network); } @@ -497,7 +495,7 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp @Override public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - s_logger.trace("prepare"); + logger.trace("prepare"); return createNicEnv(network, nic, dest, context); } @@ -511,7 +509,7 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp @Override public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.trace("release"); + logger.trace("release"); return deleteNicEnv(network, nic, context); } @@ -523,7 +521,7 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp */ @Override public boolean destroy(Network network, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.trace("destroy"); + logger.trace("destroy"); // nothing to do here. return true; } @@ -531,19 +529,19 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp @Override public boolean shutdownProviderInstances(PhysicalNetworkServiceProvider provider, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.trace("shutdownProviderInstances"); + logger.trace("shutdownProviderInstances"); return true; } @Override public boolean canEnableIndividualServices() { - s_logger.trace("canEnableIndividualServices"); + logger.trace("canEnableIndividualServices"); return true; // because there is only Connectivity } @Override public boolean verifyServicesCombination(Set services) { - s_logger.trace("verifyServicesCombination " + services.toString()); + logger.trace("verifyServicesCombination " + services.toString()); return true; } @@ -552,13 +550,13 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp try { prepare(network, nic, vm, dest, context); } catch (ConcurrentOperationException e) { - s_logger.error("prepareForMigration failed.", e); + logger.error("prepareForMigration failed.", e); return false; } catch (ResourceUnavailableException e) { - s_logger.error("prepareForMigration failed.", e); + logger.error("prepareForMigration failed.", e); return false; } catch (InsufficientCapacityException e) { - s_logger.error("prepareForMigration failed.", e); + logger.error("prepareForMigration failed.", e); return false; } return true; @@ -569,9 +567,9 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp try { release(network, nic, vm, dst); } catch (ConcurrentOperationException e) { - s_logger.error("rollbackMigration failed.", e); + logger.error("rollbackMigration failed.", e); } catch (ResourceUnavailableException e) { - s_logger.error("rollbackMigration failed.", e); + logger.error("rollbackMigration failed.", e); } } @@ -580,9 +578,9 @@ public class SspElement extends AdapterBase implements ConnectivityProvider, Ssp try { release(network, nic, vm, src); } catch (ConcurrentOperationException e) { - s_logger.error("commitMigration failed.", e); + logger.error("commitMigration failed.", e); } catch (ResourceUnavailableException e) { - s_logger.error("commitMigration failed.", e); + logger.error("commitMigration failed.", e); } } diff --git a/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/guru/SspGuestNetworkGuru.java b/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/guru/SspGuestNetworkGuru.java index 7226873238b..3e0af508d13 100644 --- a/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/guru/SspGuestNetworkGuru.java +++ b/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/guru/SspGuestNetworkGuru.java @@ -19,7 +19,6 @@ package org.apache.cloudstack.network.guru; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.network.element.SspElement; import org.apache.cloudstack.network.element.SspManager; @@ -48,7 +47,6 @@ import com.cloud.vm.VirtualMachineProfile; */ @Local(value = NetworkGuru.class) public class SspGuestNetworkGuru extends GuestNetworkGuru implements NetworkMigrationResponder { - private static final Logger s_logger = Logger.getLogger(SspGuestNetworkGuru.class); @Inject SspManager _sspMgr; @@ -64,7 +62,7 @@ public class SspGuestNetworkGuru extends GuestNetworkGuru implements NetworkMigr @Override protected boolean canHandle(NetworkOffering offering, NetworkType networkType, PhysicalNetwork physicalNetwork) { - s_logger.trace("canHandle"); + logger.trace("canHandle"); String setting = null; if (physicalNetwork != null && physicalNetwork.getIsolationMethods().contains("SSP")) { @@ -75,18 +73,18 @@ public class SspGuestNetworkGuru extends GuestNetworkGuru implements NetworkMigr } if (setting != null) { if (networkType != NetworkType.Advanced) { - s_logger.info("SSP enebled by " + setting + " but not active because networkType was " + networkType); + logger.info("SSP enebled by " + setting + " but not active because networkType was " + networkType); } else if (!isMyTrafficType(offering.getTrafficType())) { - s_logger.info("SSP enabled by " + setting + " but not active because traffic type not Guest"); + logger.info("SSP enabled by " + setting + " but not active because traffic type not Guest"); } else if (offering.getGuestType() != Network.GuestType.Isolated) { - s_logger.info("SSP works for network isolatation."); + logger.info("SSP works for network isolatation."); } else if (!_sspMgr.canHandle(physicalNetwork)) { - s_logger.info("SSP manager not ready"); + logger.info("SSP manager not ready"); } else { return true; } } else { - s_logger.debug("SSP not configured to be active"); + logger.debug("SSP not configured to be active"); } return false; } @@ -101,7 +99,7 @@ public class SspGuestNetworkGuru extends GuestNetworkGuru implements NetworkMigr @Override public Network implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws InsufficientVirtualNetworkCapacityException { - s_logger.trace("implement " + network.toString()); + logger.trace("implement " + network.toString()); super.implement(network, offering, dest, context); _sspMgr.createNetwork(network, offering, dest, context); return network; @@ -109,7 +107,7 @@ public class SspGuestNetworkGuru extends GuestNetworkGuru implements NetworkMigr @Override public void shutdown(NetworkProfile profile, NetworkOffering offering) { - s_logger.trace("shutdown " + profile.toString()); + logger.trace("shutdown " + profile.toString()); _sspMgr.deleteNetwork(profile); super.shutdown(profile, offering); } @@ -138,10 +136,10 @@ public class SspGuestNetworkGuru extends GuestNetworkGuru implements NetworkMigr try { reserve(nic, network, vm, dest, context); } catch (InsufficientVirtualNetworkCapacityException e) { - s_logger.error("prepareForMigration failed", e); + logger.error("prepareForMigration failed", e); return false; } catch (InsufficientAddressCapacityException e) { - s_logger.error("prepareForMigration failed", e); + logger.error("prepareForMigration failed", e); return false; } return true; diff --git a/plugins/network-elements/vxlan/src/com/cloud/network/guru/VxlanGuestNetworkGuru.java b/plugins/network-elements/vxlan/src/com/cloud/network/guru/VxlanGuestNetworkGuru.java index 4138b228e10..f891543151b 100644 --- a/plugins/network-elements/vxlan/src/com/cloud/network/guru/VxlanGuestNetworkGuru.java +++ b/plugins/network-elements/vxlan/src/com/cloud/network/guru/VxlanGuestNetworkGuru.java @@ -18,7 +18,6 @@ package com.cloud.network.guru; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.context.CallContext; @@ -49,7 +48,6 @@ import com.cloud.vm.VirtualMachineProfile; @Component @Local(value = NetworkGuru.class) public class VxlanGuestNetworkGuru extends GuestNetworkGuru { - private static final Logger s_logger = Logger.getLogger(VxlanGuestNetworkGuru.class); public VxlanGuestNetworkGuru() { super(); @@ -63,7 +61,7 @@ public class VxlanGuestNetworkGuru extends GuestNetworkGuru { isMyIsolationMethod(physicalNetwork)) { return true; } else { - s_logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); + logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); return false; } } @@ -151,7 +149,7 @@ public class VxlanGuestNetworkGuru extends GuestNetworkGuru { public void shutdown(NetworkProfile profile, NetworkOffering offering) { NetworkVO networkObject = _networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Vxlan || networkObject.getBroadcastUri() == null) { - s_logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); + logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); return; } diff --git a/plugins/storage-allocators/random/src/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java b/plugins/storage-allocators/random/src/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java index 83c23c2a536..04ee5c069ef 100644 --- a/plugins/storage-allocators/random/src/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java +++ b/plugins/storage-allocators/random/src/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; @@ -35,7 +34,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = StoragePoolAllocator.class) public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator { - private static final Logger s_logger = Logger.getLogger(RandomStoragePoolAllocator.class); @Override public List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { @@ -50,18 +48,18 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator { return null; } - s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); + logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); List pools = _storagePoolDao.listBy(dcId, podId, clusterId, ScopeType.CLUSTER); if (pools.size() == 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("No storage pools available for allocation, returning"); + if (logger.isDebugEnabled()) { + logger.debug("No storage pools available for allocation, returning"); } return suitablePools; } Collections.shuffle(pools); - if (s_logger.isDebugEnabled()) { - s_logger.debug("RandomStoragePoolAllocator has " + pools.size() + " pools to check for allocation"); + if (logger.isDebugEnabled()) { + logger.debug("RandomStoragePoolAllocator has " + pools.size() + " pools to check for allocation"); } for (StoragePoolVO pool : pools) { if (suitablePools.size() == returnUpTo) { @@ -74,8 +72,8 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator { } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("RandomStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools"); + if (logger.isDebugEnabled()) { + logger.debug("RandomStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools"); } return suitablePools; diff --git a/plugins/storage/volume/cloudbyte/src/org/apache/cloudstack/storage/datastore/util/ElastistorVolumeApiServiceImpl.java b/plugins/storage/volume/cloudbyte/src/org/apache/cloudstack/storage/datastore/util/ElastistorVolumeApiServiceImpl.java index 709c1fe4265..83f7356eb44 100644 --- a/plugins/storage/volume/cloudbyte/src/org/apache/cloudstack/storage/datastore/util/ElastistorVolumeApiServiceImpl.java +++ b/plugins/storage/volume/cloudbyte/src/org/apache/cloudstack/storage/datastore/util/ElastistorVolumeApiServiceImpl.java @@ -26,7 +26,6 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.ListResponse; @@ -47,7 +46,6 @@ import com.cloud.vm.dao.UserVmDao; @Component public class ElastistorVolumeApiServiceImpl extends ManagerBase implements ElastistorVolumeApiService { - private static final Logger s_logger = Logger.getLogger(ElastistorVolumeApiServiceImpl.class); @Inject protected VolumeDao _volsDao; @@ -74,7 +72,7 @@ public class ElastistorVolumeApiServiceImpl extends ManagerBase implements Elast cmdList.add(ListElastistorPoolCmd.class); cmdList.add(ListElastistorInterfaceCmd.class); - s_logger.info("Commands were registered successfully with elastistor volume api service. [cmdcount:" + cmdList.size() + "]"); + logger.info("Commands were registered successfully with elastistor volume api service. [cmdcount:" + cmdList.size() + "]"); return cmdList; } @@ -125,7 +123,7 @@ public class ElastistorVolumeApiServiceImpl extends ManagerBase implements Elast return response; } catch (Throwable e) { - s_logger.error("Unable to list elastistor volume.", e); + logger.error("Unable to list elastistor volume.", e); throw new CloudRuntimeException("Unable to list elastistor volume. " + e.getMessage()); } } @@ -165,7 +163,7 @@ public class ElastistorVolumeApiServiceImpl extends ManagerBase implements Elast return response; } catch (Throwable e) { - s_logger.error("Unable to list elastistor pools.", e); + logger.error("Unable to list elastistor pools.", e); throw new CloudRuntimeException("Unable to list elastistor pools. " + e.getMessage()); } @@ -199,7 +197,7 @@ public class ElastistorVolumeApiServiceImpl extends ManagerBase implements Elast return response; } catch (Throwable e) { - s_logger.error("Unable to list elastistor interfaces.", e); + logger.error("Unable to list elastistor interfaces.", e); throw new CloudRuntimeException("Unable to list elastistor interfaces. " + e.getMessage()); } diff --git a/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapAuthenticator.java b/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapAuthenticator.java index 8c6820f8458..fd7bcd0b8b4 100644 --- a/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapAuthenticator.java +++ b/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapAuthenticator.java @@ -21,13 +21,11 @@ import com.cloud.user.UserAccount; import com.cloud.user.dao.UserAccountDao; import com.cloud.utils.Pair; import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.Map; public class LdapAuthenticator extends DefaultUserAuthenticator { - private static final Logger s_logger = Logger.getLogger(LdapAuthenticator.class.getName()); @Inject private LdapManager _ldapManager; @@ -48,14 +46,14 @@ public class LdapAuthenticator extends DefaultUserAuthenticator { public Pair authenticate(final String username, final String password, final Long domainId, final Map requestParameters) { if (StringUtils.isEmpty(username) || StringUtils.isEmpty(password)) { - s_logger.debug("Username or Password cannot be empty"); + logger.debug("Username or Password cannot be empty"); return new Pair(false, null); } final UserAccount user = _userAccountDao.getUserAccount(username, domainId); if (user == null) { - s_logger.debug("Unable to find user with " + username + " in domain " + domainId); + logger.debug("Unable to find user with " + username + " in domain " + domainId); return new Pair(false, null); } else if (_ldapManager.isLdapEnabled()) { boolean result = _ldapManager.canAuthenticate(username, password); diff --git a/plugins/user-authenticators/md5/src/com/cloud/server/auth/MD5UserAuthenticator.java b/plugins/user-authenticators/md5/src/com/cloud/server/auth/MD5UserAuthenticator.java index f08ec378b4e..f91146bce76 100644 --- a/plugins/user-authenticators/md5/src/com/cloud/server/auth/MD5UserAuthenticator.java +++ b/plugins/user-authenticators/md5/src/com/cloud/server/auth/MD5UserAuthenticator.java @@ -20,7 +20,6 @@ import com.cloud.user.dao.UserAccountDao; import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; import javax.ejb.Local; import javax.inject.Inject; @@ -36,30 +35,29 @@ import java.util.Map; */ @Local(value = {UserAuthenticator.class}) public class MD5UserAuthenticator extends DefaultUserAuthenticator { - public static final Logger s_logger = Logger.getLogger(MD5UserAuthenticator.class); @Inject private UserAccountDao _userAccountDao; @Override public Pair authenticate(String username, String password, Long domainId, Map requestParameters) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Retrieving user: " + username); + if (logger.isDebugEnabled()) { + logger.debug("Retrieving user: " + username); } if (StringUtils.isEmpty(username) || StringUtils.isEmpty(password)) { - s_logger.debug("Username or Password cannot be empty"); + logger.debug("Username or Password cannot be empty"); return new Pair(false, null); } UserAccount user = _userAccountDao.getUserAccount(username, domainId); if (user == null) { - s_logger.debug("Unable to find user with " + username + " in domain " + domainId); + logger.debug("Unable to find user with " + username + " in domain " + domainId); return new Pair(false, null); } if (!user.getPassword().equals(encode(password))) { - s_logger.debug("Password does not match"); + logger.debug("Password does not match"); return new Pair(false, ActionOnFailedAuthentication.INCREMENT_INCORRECT_LOGIN_ATTEMPT_COUNT); } return new Pair(true, null); diff --git a/plugins/user-authenticators/pbkdf2/src/org/apache/cloudstack/server/auth/PBKDF2UserAuthenticator.java b/plugins/user-authenticators/pbkdf2/src/org/apache/cloudstack/server/auth/PBKDF2UserAuthenticator.java index 130950d5186..735405e08a4 100644 --- a/plugins/user-authenticators/pbkdf2/src/org/apache/cloudstack/server/auth/PBKDF2UserAuthenticator.java +++ b/plugins/user-authenticators/pbkdf2/src/org/apache/cloudstack/server/auth/PBKDF2UserAuthenticator.java @@ -23,7 +23,6 @@ import com.cloud.utils.ConstantTimeComparator; import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; import org.bouncycastle.crypto.PBEParametersGenerator; import org.bouncycastle.crypto.generators.PKCS5S2ParametersGenerator; import org.bouncycastle.crypto.params.KeyParameter; @@ -41,7 +40,6 @@ import static java.lang.String.format; @Local({UserAuthenticator.class}) public class PBKDF2UserAuthenticator extends DefaultUserAuthenticator { - public static final Logger s_logger = Logger.getLogger(PBKDF2UserAuthenticator.class); private static final int s_saltlen = 64; private static final int s_rounds = 100000; private static final int s_keylen = 512; @@ -50,12 +48,12 @@ public class PBKDF2UserAuthenticator extends DefaultUserAuthenticator { private UserAccountDao _userAccountDao; public Pair authenticate(String username, String password, Long domainId, Map requestParameters) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Retrieving user: " + username); + if (logger.isDebugEnabled()) { + logger.debug("Retrieving user: " + username); } if (StringUtils.isEmpty(username) || StringUtils.isEmpty(password)) { - s_logger.debug("Username or Password cannot be empty"); + logger.debug("Username or Password cannot be empty"); return new Pair(false, null); } @@ -64,7 +62,7 @@ public class PBKDF2UserAuthenticator extends DefaultUserAuthenticator { if (user != null) { isValidUser = true; } else { - s_logger.debug("Unable to find user with " + username + " in domain " + domainId); + logger.debug("Unable to find user with " + username + " in domain " + domainId); } byte[] salt = new byte[0]; @@ -73,7 +71,7 @@ public class PBKDF2UserAuthenticator extends DefaultUserAuthenticator { if (isValidUser) { String[] storedPassword = user.getPassword().split(":"); if ((storedPassword.length != 3) || (!StringUtils.isNumeric(storedPassword[2]))) { - s_logger.warn("The stored password for " + username + " isn't in the right format for this authenticator"); + logger.warn("The stored password for " + username + " isn't in the right format for this authenticator"); isValidUser = false; } else { // Encoding format = :: @@ -112,7 +110,7 @@ public class PBKDF2UserAuthenticator extends DefaultUserAuthenticator { } catch (UnsupportedEncodingException e) { throw new CloudRuntimeException("Unable to hash password", e); } catch (InvalidKeySpecException e) { - s_logger.error("Exception in EncryptUtil.createKey ", e); + logger.error("Exception in EncryptUtil.createKey ", e); throw new CloudRuntimeException("Unable to hash password", e); } } diff --git a/plugins/user-authenticators/plain-text/src/com/cloud/server/auth/PlainTextUserAuthenticator.java b/plugins/user-authenticators/plain-text/src/com/cloud/server/auth/PlainTextUserAuthenticator.java index aaff27e58f3..099feb9fa15 100644 --- a/plugins/user-authenticators/plain-text/src/com/cloud/server/auth/PlainTextUserAuthenticator.java +++ b/plugins/user-authenticators/plain-text/src/com/cloud/server/auth/PlainTextUserAuthenticator.java @@ -19,7 +19,6 @@ import com.cloud.user.UserAccount; import com.cloud.user.dao.UserAccountDao; import com.cloud.utils.Pair; import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; import javax.ejb.Local; import javax.inject.Inject; @@ -27,30 +26,29 @@ import java.util.Map; @Local(value = {UserAuthenticator.class}) public class PlainTextUserAuthenticator extends DefaultUserAuthenticator { - public static final Logger s_logger = Logger.getLogger(PlainTextUserAuthenticator.class); @Inject private UserAccountDao _userAccountDao; @Override public Pair authenticate(String username, String password, Long domainId, Map requestParameters) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Retrieving user: " + username); + if (logger.isDebugEnabled()) { + logger.debug("Retrieving user: " + username); } if (StringUtils.isEmpty(username) || StringUtils.isEmpty(password)) { - s_logger.debug("Username or Password cannot be empty"); + logger.debug("Username or Password cannot be empty"); return new Pair(false, null); } UserAccount user = _userAccountDao.getUserAccount(username, domainId); if (user == null) { - s_logger.debug("Unable to find user with " + username + " in domain " + domainId); + logger.debug("Unable to find user with " + username + " in domain " + domainId); return new Pair(false, null); } if (!user.getPassword().equals(password)) { - s_logger.debug("Password does not match"); + logger.debug("Password does not match"); return new Pair(false, ActionOnFailedAuthentication.INCREMENT_INCORRECT_LOGIN_ATTEMPT_COUNT); } return new Pair(true, null); diff --git a/plugins/user-authenticators/saml2/src/org/apache/cloudstack/saml/SAML2AuthManagerImpl.java b/plugins/user-authenticators/saml2/src/org/apache/cloudstack/saml/SAML2AuthManagerImpl.java index a8740ac2709..455f28e6c55 100644 --- a/plugins/user-authenticators/saml2/src/org/apache/cloudstack/saml/SAML2AuthManagerImpl.java +++ b/plugins/user-authenticators/saml2/src/org/apache/cloudstack/saml/SAML2AuthManagerImpl.java @@ -37,7 +37,6 @@ import org.apache.cloudstack.framework.security.keystore.KeystoreDao; import org.apache.cloudstack.framework.security.keystore.KeystoreVO; import org.apache.commons.codec.binary.Base64; import org.apache.commons.httpclient.HttpClient; -import org.apache.log4j.Logger; import org.opensaml.DefaultBootstrap; import org.opensaml.common.xml.SAMLConstants; import org.opensaml.saml2.metadata.ContactPerson; @@ -93,7 +92,6 @@ import java.util.TimerTask; @Component @Local(value = {SAML2AuthManager.class, PluggableAPIAuthenticator.class}) public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManager, Configurable { - private static final Logger s_logger = Logger.getLogger(SAML2AuthManagerImpl.class); private SAMLProviderMetadata _spMetadata = new SAMLProviderMetadata(); private Map _idpMetadataMap = new HashMap(); @@ -124,10 +122,10 @@ public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManage @Override public boolean start() { if (isSAMLPluginEnabled()) { - s_logger.info("SAML auth plugin loaded"); + logger.info("SAML auth plugin loaded"); return setup(); } else { - s_logger.info("SAML auth plugin not enabled so not loading"); + logger.info("SAML auth plugin not enabled so not loading"); return super.start(); } } @@ -147,9 +145,9 @@ public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManage KeyPair keyPair = SAMLUtils.generateRandomKeyPair(); _ksDao.save(SAMLPluginConstants.SAMLSP_KEYPAIR, SAMLUtils.savePrivateKey(keyPair.getPrivate()), SAMLUtils.savePublicKey(keyPair.getPublic()), "samlsp-keypair"); keyStoreVO = _ksDao.findByName(SAMLPluginConstants.SAMLSP_KEYPAIR); - s_logger.info("No SAML keystore found, created and saved a new Service Provider keypair"); + logger.info("No SAML keystore found, created and saved a new Service Provider keypair"); } catch (NoSuchProviderException | NoSuchAlgorithmException e) { - s_logger.error("Unable to create and save SAML keypair: " + e.toString()); + logger.error("Unable to create and save SAML keypair: " + e.toString()); } } @@ -178,7 +176,7 @@ public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManage _ksDao.save(SAMLPluginConstants.SAMLSP_X509CERT, Base64.encodeBase64String(bos.toByteArray()), "", "samlsp-x509cert"); bos.close(); } catch (NoSuchAlgorithmException | NoSuchProviderException | CertificateEncodingException | SignatureException | InvalidKeyException | IOException e) { - s_logger.error("SAML Plugin won't be able to use X509 signed authentication"); + logger.error("SAML Plugin won't be able to use X509 signed authentication"); } } else { try { @@ -187,7 +185,7 @@ public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManage spX509Key = (X509Certificate) si.readObject(); bi.close(); } catch (IOException | ClassNotFoundException ignored) { - s_logger.error("SAML Plugin won't be able to use X509 signed authentication. Failed to load X509 Certificate from Database."); + logger.error("SAML Plugin won't be able to use X509 signed authentication. Failed to load X509 Certificate from Database."); } } } @@ -214,7 +212,7 @@ public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManage private void addIdpToMap(EntityDescriptor descriptor, Map idpMap) { SAMLProviderMetadata idpMetadata = new SAMLProviderMetadata(); idpMetadata.setEntityId(descriptor.getEntityID()); - s_logger.debug("Adding IdP to the list of discovered IdPs: " + descriptor.getEntityID()); + logger.debug("Adding IdP to the list of discovered IdPs: " + descriptor.getEntityID()); if (descriptor.getOrganization() != null) { if (descriptor.getOrganization().getDisplayNames() != null) { for (OrganizationDisplayName orgName : descriptor.getOrganization().getDisplayNames()) { @@ -288,21 +286,21 @@ public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManage try { idpMetadata.setSigningCertificate(KeyInfoHelper.getCertificates(kd.getKeyInfo()).get(0)); } catch (CertificateException ignored) { - s_logger.info("[ignored] encountered invalid certificate signing.", ignored); + logger.info("[ignored] encountered invalid certificate signing.", ignored); } } if (kd.getUse() == UsageType.ENCRYPTION) { try { idpMetadata.setEncryptionCertificate(KeyInfoHelper.getCertificates(kd.getKeyInfo()).get(0)); } catch (CertificateException ignored) { - s_logger.info("[ignored] encountered invalid certificate encryption.", ignored); + logger.info("[ignored] encountered invalid certificate encryption.", ignored); } } if (kd.getUse() == UsageType.UNSPECIFIED) { try { unspecifiedKey = KeyInfoHelper.getCertificates(kd.getKeyInfo()).get(0); } catch (CertificateException ignored) { - s_logger.info("[ignored] encountered invalid certificate.", ignored); + logger.info("[ignored] encountered invalid certificate.", ignored); } } } @@ -314,7 +312,7 @@ public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManage idpMetadata.setEncryptionCertificate(unspecifiedKey); } if (idpMap.containsKey(idpMetadata.getEntityId())) { - s_logger.warn("Duplicate IdP metadata found with entity Id: " + idpMetadata.getEntityId()); + logger.warn("Duplicate IdP metadata found with entity Id: " + idpMetadata.getEntityId()); } idpMap.put(idpMetadata.getEntityId(), idpMetadata); } @@ -345,16 +343,16 @@ public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManage if (_idpMetaDataProvider == null) { return; } - s_logger.debug("Starting SAML IDP Metadata Refresh Task"); + logger.debug("Starting SAML IDP Metadata Refresh Task"); Map metadataMap = new HashMap(); try { discoverAndAddIdp(_idpMetaDataProvider.getMetadata(), metadataMap); _idpMetadataMap = metadataMap; expireTokens(); - s_logger.debug("Finished refreshing SAML Metadata and expiring old auth tokens"); + logger.debug("Finished refreshing SAML Metadata and expiring old auth tokens"); } catch (MetadataProviderException e) { - s_logger.warn("SAML Metadata Refresh task failed with exception: " + e.getMessage()); + logger.warn("SAML Metadata Refresh task failed with exception: " + e.getMessage()); } } @@ -362,7 +360,7 @@ public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManage private boolean setup() { if (!initSP()) { - s_logger.error("SAML Plugin failed to initialize, please fix the configuration and restart management server"); + logger.error("SAML Plugin failed to initialize, please fix the configuration and restart management server"); return false; } _timer = new Timer(); @@ -378,11 +376,11 @@ public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManage } else { File metadataFile = PropertiesUtil.findConfigFile(idpMetaDataUrl); if (metadataFile == null) { - s_logger.error("Provided Metadata is not a URL, Unable to locate metadata file from local path: " + idpMetaDataUrl); + logger.error("Provided Metadata is not a URL, Unable to locate metadata file from local path: " + idpMetaDataUrl); return false; } else{ - s_logger.debug("Provided Metadata is not a URL, trying to read metadata file from local path: " + metadataFile.getAbsolutePath()); + logger.debug("Provided Metadata is not a URL, trying to read metadata file from local path: " + metadataFile.getAbsolutePath()); _idpMetaDataProvider = new FilesystemMetadataProvider(_timer, metadataFile); } } @@ -392,14 +390,14 @@ public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManage _timer.scheduleAtFixedRate(new MetadataRefreshTask(), 0, _refreshInterval * 1000); } catch (MetadataProviderException e) { - s_logger.error("Unable to read SAML2 IDP MetaData URL, error:" + e.getMessage()); - s_logger.error("SAML2 Authentication may be unavailable"); + logger.error("Unable to read SAML2 IDP MetaData URL, error:" + e.getMessage()); + logger.error("SAML2 Authentication may be unavailable"); return false; } catch (ConfigurationException | FactoryConfigurationError e) { - s_logger.error("OpenSAML bootstrapping failed: error: " + e.getMessage()); + logger.error("OpenSAML bootstrapping failed: error: " + e.getMessage()); return false; } catch (NullPointerException e) { - s_logger.error("Unable to setup SAML Auth Plugin due to NullPointerException" + + logger.error("Unable to setup SAML Auth Plugin due to NullPointerException" + " please check the SAML global settings: " + e.getMessage()); return false; } @@ -477,7 +475,7 @@ public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManage if (_samlTokenDao.findByUuid(authnId) == null) { _samlTokenDao.persist(token); } else { - s_logger.warn("Duplicate SAML token for entity=" + entity + " token id=" + authnId + " domain=" + domainPath); + logger.warn("Duplicate SAML token for entity=" + entity + " token id=" + authnId + " domain=" + domainPath); } } diff --git a/plugins/user-authenticators/saml2/src/org/apache/cloudstack/saml/SAML2UserAuthenticator.java b/plugins/user-authenticators/saml2/src/org/apache/cloudstack/saml/SAML2UserAuthenticator.java index 65a7959d997..fd507ae8197 100644 --- a/plugins/user-authenticators/saml2/src/org/apache/cloudstack/saml/SAML2UserAuthenticator.java +++ b/plugins/user-authenticators/saml2/src/org/apache/cloudstack/saml/SAML2UserAuthenticator.java @@ -22,7 +22,6 @@ import com.cloud.user.dao.UserAccountDao; import com.cloud.user.dao.UserDao; import com.cloud.utils.Pair; import org.apache.cxf.common.util.StringUtils; -import org.apache.log4j.Logger; import javax.ejb.Local; import javax.inject.Inject; @@ -30,7 +29,6 @@ import java.util.Map; @Local(value = {UserAuthenticator.class}) public class SAML2UserAuthenticator extends DefaultUserAuthenticator { - public static final Logger s_logger = Logger.getLogger(SAML2UserAuthenticator.class); @Inject private UserAccountDao _userAccountDao; @@ -39,18 +37,18 @@ public class SAML2UserAuthenticator extends DefaultUserAuthenticator { @Override public Pair authenticate(String username, String password, Long domainId, Map requestParameters) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Trying SAML2 auth for user: " + username); + if (logger.isDebugEnabled()) { + logger.debug("Trying SAML2 auth for user: " + username); } if (StringUtils.isEmpty(username) || StringUtils.isEmpty(password)) { - s_logger.debug("Username or Password cannot be empty"); + logger.debug("Username or Password cannot be empty"); return new Pair(false, null); } final UserAccount userAccount = _userAccountDao.getUserAccount(username, domainId); if (userAccount == null || userAccount.getSource() != User.Source.SAML2) { - s_logger.debug("Unable to find user with " + username + " in domain " + domainId + ", or user source is not SAML2"); + logger.debug("Unable to find user with " + username + " in domain " + domainId + ", or user source is not SAML2"); return new Pair(false, null); } else { User user = _userDao.getUser(userAccount.getId()); diff --git a/plugins/user-authenticators/sha256salted/src/com/cloud/server/auth/SHA256SaltedUserAuthenticator.java b/plugins/user-authenticators/sha256salted/src/com/cloud/server/auth/SHA256SaltedUserAuthenticator.java index e35c29d3e25..d23679fecfc 100644 --- a/plugins/user-authenticators/sha256salted/src/com/cloud/server/auth/SHA256SaltedUserAuthenticator.java +++ b/plugins/user-authenticators/sha256salted/src/com/cloud/server/auth/SHA256SaltedUserAuthenticator.java @@ -21,7 +21,6 @@ import com.cloud.user.dao.UserAccountDao; import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; import org.bouncycastle.util.encoders.Base64; import javax.ejb.Local; @@ -34,7 +33,6 @@ import java.util.Map; @Local(value = {UserAuthenticator.class}) public class SHA256SaltedUserAuthenticator extends DefaultUserAuthenticator { - public static final Logger s_logger = Logger.getLogger(SHA256SaltedUserAuthenticator.class); private static final String s_defaultPassword = "000000000000000000000000000="; private static final String s_defaultSalt = "0000000000000000000000000000000="; @Inject @@ -46,19 +44,19 @@ public class SHA256SaltedUserAuthenticator extends DefaultUserAuthenticator { */ @Override public Pair authenticate(String username, String password, Long domainId, Map requestParameters) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Retrieving user: " + username); + if (logger.isDebugEnabled()) { + logger.debug("Retrieving user: " + username); } if (StringUtils.isEmpty(username) || StringUtils.isEmpty(password)) { - s_logger.debug("Username or Password cannot be empty"); + logger.debug("Username or Password cannot be empty"); return new Pair(false, null); } boolean realUser = true; UserAccount user = _userAccountDao.getUserAccount(username, domainId); if (user == null) { - s_logger.debug("Unable to find user with " + username + " in domain " + domainId); + logger.debug("Unable to find user with " + username + " in domain " + domainId); realUser = false; } /* Fake Data */ @@ -67,7 +65,7 @@ public class SHA256SaltedUserAuthenticator extends DefaultUserAuthenticator { if (realUser) { String storedPassword[] = user.getPassword().split(":"); if (storedPassword.length != 2) { - s_logger.warn("The stored password for " + username + " isn't in the right format for this authenticator"); + logger.warn("The stored password for " + username + " isn't in the right format for this authenticator"); realUser = false; } else { realPassword = storedPassword[1]; diff --git a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java index 3a2b280c632..9fc0f164574 100644 --- a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java +++ b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java @@ -27,7 +27,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.manager.allocator.HostAllocator; @@ -69,7 +68,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Component @Local(value = {HostAllocator.class}) public class FirstFitAllocator extends AdapterBase implements HostAllocator { - private static final Logger s_logger = Logger.getLogger(FirstFitAllocator.class); @Inject protected HostDao _hostDao = null; @Inject @@ -119,8 +117,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { return new ArrayList(); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); + if (logger.isDebugEnabled()) { + logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); } String hostTagOnOffering = offering.getHostTag(); @@ -141,29 +139,29 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { List hostsMatchingOfferingTag = new ArrayList(); List hostsMatchingTemplateTag = new ArrayList(); if (hasSvcOfferingTag) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for hosts having tag specified on SvcOffering:" + hostTagOnOffering); + if (logger.isDebugEnabled()) { + logger.debug("Looking for hosts having tag specified on SvcOffering:" + hostTagOnOffering); } hostsMatchingOfferingTag = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnOffering); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Hosts with tag '" + hostTagOnOffering + "' are:" + hostsMatchingOfferingTag); + if (logger.isDebugEnabled()) { + logger.debug("Hosts with tag '" + hostTagOnOffering + "' are:" + hostsMatchingOfferingTag); } } if (hasTemplateTag) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for hosts having tag specified on Template:" + hostTagOnTemplate); + if (logger.isDebugEnabled()) { + logger.debug("Looking for hosts having tag specified on Template:" + hostTagOnTemplate); } hostsMatchingTemplateTag = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Hosts with tag '" + hostTagOnTemplate + "' are:" + hostsMatchingTemplateTag); + if (logger.isDebugEnabled()) { + logger.debug("Hosts with tag '" + hostTagOnTemplate + "' are:" + hostsMatchingTemplateTag); } } if (hasSvcOfferingTag && hasTemplateTag) { hostsMatchingOfferingTag.retainAll(hostsMatchingTemplateTag); clusterHosts = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found " + hostsMatchingOfferingTag.size() + " Hosts satisfying both tags, host ids are:" + hostsMatchingOfferingTag); + if (logger.isDebugEnabled()) { + logger.debug("Found " + hostsMatchingOfferingTag.size() + " Hosts satisfying both tags, host ids are:" + hostsMatchingOfferingTag); } clusterHosts = hostsMatchingOfferingTag; @@ -218,25 +216,25 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { hostsCopy.retainAll(_resourceMgr.listAllUpAndEnabledNonHAHosts(type, clusterId, podId, dcId)); } else { if (hasSvcOfferingTag) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for hosts having tag specified on SvcOffering:" + hostTagOnOffering); + if (logger.isDebugEnabled()) { + logger.debug("Looking for hosts having tag specified on SvcOffering:" + hostTagOnOffering); } hostsCopy.retainAll(_hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnOffering)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Hosts with tag '" + hostTagOnOffering + "' are:" + hostsCopy); + if (logger.isDebugEnabled()) { + logger.debug("Hosts with tag '" + hostTagOnOffering + "' are:" + hostsCopy); } } if (hasTemplateTag) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for hosts having tag specified on Template:" + hostTagOnTemplate); + if (logger.isDebugEnabled()) { + logger.debug("Looking for hosts having tag specified on Template:" + hostTagOnTemplate); } hostsCopy.retainAll(_hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Hosts with tag '" + hostTagOnTemplate + "' are:" + hostsCopy); + if (logger.isDebugEnabled()) { + logger.debug("Hosts with tag '" + hostTagOnTemplate + "' are:" + hostsCopy); } } } @@ -260,20 +258,20 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { hosts = reorderHostsByCapacity(plan, hosts); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("FirstFitAllocator has " + hosts.size() + " hosts to check for allocation: " + hosts); + if (logger.isDebugEnabled()) { + logger.debug("FirstFitAllocator has " + hosts.size() + " hosts to check for allocation: " + hosts); } // We will try to reorder the host lists such that we give priority to hosts that have // the minimums to support a VM's requirements hosts = prioritizeHosts(template, offering, hosts); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found " + hosts.size() + " hosts for allocation after prioritization: " + hosts); + if (logger.isDebugEnabled()) { + logger.debug("Found " + hosts.size() + " hosts for allocation after prioritization: " + hosts); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for speed=" + (offering.getCpu() * offering.getSpeed()) + "Mhz, Ram=" + offering.getRamSize()); + if (logger.isDebugEnabled()) { + logger.debug("Looking for speed=" + (offering.getCpu() * offering.getSpeed()) + "Mhz, Ram=" + offering.getRamSize()); } long serviceOfferingId = offering.getId(); @@ -285,16 +283,16 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { break; } if (avoid.shouldAvoid(host)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " is in avoid set, skipping this and trying other available hosts"); + if (logger.isDebugEnabled()) { + logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " is in avoid set, skipping this and trying other available hosts"); } continue; } //find number of guest VMs occupying capacity on this host. if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + + if (logger.isDebugEnabled()) { + logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); } continue; @@ -304,7 +302,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(serviceOfferingId, GPU.Keys.vgpuType.toString())) != null) { ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(serviceOfferingId, GPU.Keys.pciDevice.toString()); if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){ - s_logger.info("Host name: " + host.getName() + ", hostId: "+ host.getId() +" does not have required GPU devices available"); + logger.info("Host name: " + host.getName() + ", hostId: "+ host.getId() +" does not have required GPU devices available"); continue; } } @@ -322,20 +320,20 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { considerReservedCapacity); if (hostHasCpuCapability && hostHasCapacity) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found a suitable host, adding to list: " + host.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Found a suitable host, adding to list: " + host.getId()); } suitableHosts.add(host); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Not using host " + host.getId() + "; host has cpu capability? " + hostHasCpuCapability + ", host has capacity?" + hostHasCapacity); + if (logger.isDebugEnabled()) { + logger.debug("Not using host " + host.getId() + "; host has cpu capability? " + hostHasCpuCapability + ", host has capacity?" + hostHasCapacity); } avoid.addHost(host.getId()); } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host Allocator returning " + suitableHosts.size() + " suitable hosts"); + if (logger.isDebugEnabled()) { + logger.debug("Host Allocator returning " + suitableHosts.size() + " suitable hosts"); } return suitableHosts; @@ -351,8 +349,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { capacityType = CapacityVO.CAPACITY_TYPE_MEMORY; } List hostIdsByFreeCapacity = _capacityDao.orderHostsByFreeCapacity(clusterId, capacityType); - if (s_logger.isDebugEnabled()) { - s_logger.debug("List of hosts in descending order of free capacity in the cluster: "+ hostIdsByFreeCapacity); + if (logger.isDebugEnabled()) { + logger.debug("List of hosts in descending order of free capacity in the cluster: "+ hostIdsByFreeCapacity); } //now filter the given list of Hosts by this ordered list @@ -381,8 +379,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { Long clusterId = plan.getClusterId(); List hostIdsByVmCount = _vmInstanceDao.listHostIdsByVmCount(dcId, podId, clusterId, account.getAccountId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("List of hosts in ascending order of number of VMs: " + hostIdsByVmCount); + if (logger.isDebugEnabled()) { + logger.debug("List of hosts in ascending order of number of VMs: " + hostIdsByVmCount); } //now filter the given list of Hosts by this ordered list @@ -434,9 +432,9 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { hostsToCheck.addAll(hosts); } - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (noHvmHosts.size() > 0) { - s_logger.debug("Not considering hosts: " + noHvmHosts + " to deploy template: " + template + " as they are not HVM enabled"); + logger.debug("Not considering hosts: " + noHvmHosts + " to deploy template: " + template + " as they are not HVM enabled"); } } // If a host is tagged with the same guest OS category as the template, move it to a high priority list diff --git a/server/src/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java b/server/src/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java index 161c9658be0..c2d1abde7df 100644 --- a/server/src/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java +++ b/server/src/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java @@ -27,7 +27,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -58,7 +57,6 @@ import com.cloud.vm.VirtualMachineProfile; @Component @Local(value = HostAllocator.class) public class RecreateHostAllocator extends FirstFitRoutingAllocator { - private final static Logger s_logger = Logger.getLogger(RecreateHostAllocator.class); @Inject HostPodDao _podDao; @@ -83,10 +81,10 @@ public class RecreateHostAllocator extends FirstFitRoutingAllocator { return hosts; } - s_logger.debug("First fit was unable to find a host"); + logger.debug("First fit was unable to find a host"); VirtualMachine.Type vmType = vm.getType(); if (vmType == VirtualMachine.Type.User) { - s_logger.debug("vm is not a system vm so let's just return empty list"); + logger.debug("vm is not a system vm so let's just return empty list"); return new ArrayList(); } @@ -95,11 +93,11 @@ public class RecreateHostAllocator extends FirstFitRoutingAllocator { //getting rid of direct.attached.untagged.vlan.enabled config param: Bug 7204 //basic network type for zone maps to direct untagged case if (dc.getNetworkType().equals(NetworkType.Basic)) { - s_logger.debug("Direct Networking mode so we can only allow the host to be allocated in the same pod due to public ip address cannot change"); + logger.debug("Direct Networking mode so we can only allow the host to be allocated in the same pod due to public ip address cannot change"); List vols = _volsDao.findByInstance(vm.getId()); VolumeVO vol = vols.get(0); long podId = vol.getPodId(); - s_logger.debug("Pod id determined from volume " + vol.getId() + " is " + podId); + logger.debug("Pod id determined from volume " + vol.getId() + " is " + podId); Iterator it = pcs.iterator(); while (it.hasNext()) { PodCluster pc = it.next(); @@ -120,22 +118,22 @@ public class RecreateHostAllocator extends FirstFitRoutingAllocator { } for (Pair pcId : avoidPcs) { - s_logger.debug("Removing " + pcId + " from the list of available pods"); + logger.debug("Removing " + pcId + " from the list of available pods"); pcs.remove(new PodCluster(new HostPodVO(pcId.first()), pcId.second() != null ? new ClusterVO(pcId.second()) : null)); } for (PodCluster p : pcs) { if (p.getPod().getAllocationState() != Grouping.AllocationState.Enabled) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Pod name: " + p.getPod().getName() + ", podId: " + p.getPod().getId() + " is in " + p.getPod().getAllocationState().name() + + if (logger.isDebugEnabled()) { + logger.debug("Pod name: " + p.getPod().getName() + ", podId: " + p.getPod().getId() + " is in " + p.getPod().getAllocationState().name() + " state, skipping this and trying other pods"); } continue; } Long clusterId = p.getCluster() == null ? null : p.getCluster().getId(); if (p.getCluster() != null && p.getCluster().getAllocationState() != Grouping.AllocationState.Enabled) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cluster name: " + p.getCluster().getName() + ", clusterId: " + clusterId + " is in " + p.getCluster().getAllocationState().name() + + if (logger.isDebugEnabled()) { + logger.debug("Cluster name: " + p.getCluster().getName() + ", clusterId: " + clusterId + " is in " + p.getCluster().getAllocationState().name() + " state, skipping this and trying other pod-clusters"); } continue; @@ -148,7 +146,7 @@ public class RecreateHostAllocator extends FirstFitRoutingAllocator { } - s_logger.debug("Unable to find any available pods at all!"); + logger.debug("Unable to find any available pods at all!"); return new ArrayList(); } diff --git a/server/src/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java b/server/src/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java index 885032e1761..0f4b47068e1 100644 --- a/server/src/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java +++ b/server/src/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java @@ -27,7 +27,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -60,7 +59,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Local(value = PodAllocator.class) public class UserConcentratedAllocator extends AdapterBase implements PodAllocator { - private final static Logger s_logger = Logger.getLogger(UserConcentratedAllocator.class); @Inject UserVmDao _vmDao; @@ -89,7 +87,7 @@ public class UserConcentratedAllocator extends AdapterBase implements PodAllocat List podsInZone = _podDao.listByDataCenterId(zoneId); if (podsInZone.size() == 0) { - s_logger.debug("No pods found in zone " + zone.getName()); + logger.debug("No pods found in zone " + zone.getName()); return null; } @@ -112,8 +110,8 @@ public class UserConcentratedAllocator extends AdapterBase implements PodAllocat dataCenterAndPodHasEnoughCapacity(zoneId, podId, (offering.getRamSize()) * 1024L * 1024L, Capacity.CAPACITY_TYPE_MEMORY, hostCandiates); if (!enoughCapacity) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Not enough RAM available in zone/pod to allocate storage for user VM (zone: " + zoneId + ", pod: " + podId + ")"); + if (logger.isDebugEnabled()) { + logger.debug("Not enough RAM available in zone/pod to allocate storage for user VM (zone: " + zoneId + ", pod: " + podId + ")"); } continue; } @@ -122,8 +120,8 @@ public class UserConcentratedAllocator extends AdapterBase implements PodAllocat enoughCapacity = dataCenterAndPodHasEnoughCapacity(zoneId, podId, ((long)offering.getCpu() * offering.getSpeed()), Capacity.CAPACITY_TYPE_CPU, hostCandiates); if (!enoughCapacity) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Not enough cpu available in zone/pod to allocate storage for user VM (zone: " + zoneId + ", pod: " + podId + ")"); + if (logger.isDebugEnabled()) { + logger.debug("Not enough cpu available in zone/pod to allocate storage for user VM (zone: " + zoneId + ", pod: " + podId + ")"); } continue; } @@ -147,13 +145,13 @@ public class UserConcentratedAllocator extends AdapterBase implements PodAllocat } if (availablePods.size() == 0) { - s_logger.debug("There are no pods with enough memory/CPU capacity in zone " + zone.getName()); + logger.debug("There are no pods with enough memory/CPU capacity in zone " + zone.getName()); return null; } else { // Return a random pod int next = _rand.nextInt(availablePods.size()); HostPodVO selectedPod = availablePods.get(next); - s_logger.debug("Found pod " + selectedPod.getName() + " in zone " + zone.getName()); + logger.debug("Found pod " + selectedPod.getName() + " in zone " + zone.getName()); return new Pair(selectedPod, podHostCandidates.get(selectedPod.getId())); } } @@ -165,9 +163,9 @@ public class UserConcentratedAllocator extends AdapterBase implements PodAllocat sc.addAnd("capacityType", SearchCriteria.Op.EQ, capacityType); sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, dataCenterId); sc.addAnd("podId", SearchCriteria.Op.EQ, podId); - s_logger.trace("Executing search"); + logger.trace("Executing search"); capacities = _capacityDao.search(sc, null); - s_logger.trace("Done with a search"); + logger.trace("Done with a search"); boolean enoughCapacity = false; if (capacities != null) { @@ -196,8 +194,8 @@ public class UserConcentratedAllocator extends AdapterBase implements PodAllocat private boolean skipCalculation(VMInstanceVO vm) { if (vm.getState() == State.Expunging) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Skip counting capacity for Expunging VM : " + vm.getInstanceName()); + if (logger.isDebugEnabled()) { + logger.debug("Skip counting capacity for Expunging VM : " + vm.getInstanceName()); } return true; } @@ -217,8 +215,8 @@ public class UserConcentratedAllocator extends AdapterBase implements PodAllocat long millisecondsSinceLastUpdate = DateUtil.currentGMTTime().getTime() - vm.getUpdateTime().getTime(); if (millisecondsSinceLastUpdate > secondsToSkipVMs * 1000L) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Skip counting " + vm.getState().toString() + " vm " + vm.getInstanceName() + " in capacity allocation as it has been " + + if (logger.isDebugEnabled()) { + logger.debug("Skip counting " + vm.getState().toString() + " vm " + vm.getInstanceName() + " in capacity allocation as it has been " + vm.getState().toString().toLowerCase() + " for " + millisecondsSinceLastUpdate / 60000 + " minutes"); } return true; @@ -262,15 +260,15 @@ public class UserConcentratedAllocator extends AdapterBase implements PodAllocat if (capacityType == Capacity.CAPACITY_TYPE_MEMORY) { usedCapacity += so.getRamSize() * 1024L * 1024L; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Counting memory capacity used by vm: " + vm.getId() + ", size: " + so.getRamSize() + "MB, host: " + hostId + ", currently counted: " + + if (logger.isDebugEnabled()) { + logger.debug("Counting memory capacity used by vm: " + vm.getId() + ", size: " + so.getRamSize() + "MB, host: " + hostId + ", currently counted: " + usedCapacity + " Bytes"); } } else if (capacityType == Capacity.CAPACITY_TYPE_CPU) { usedCapacity += so.getCpu() * so.getSpeed(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Counting cpu capacity used by vm: " + vm.getId() + ", cpu: " + so.getCpu() + ", speed: " + so.getSpeed() + ", currently counted: " + + if (logger.isDebugEnabled()) { + logger.debug("Counting cpu capacity used by vm: " + vm.getId() + ", cpu: " + so.getCpu() + ", speed: " + so.getSpeed() + ", currently counted: " + usedCapacity + " Bytes"); } } @@ -287,9 +285,9 @@ public class UserConcentratedAllocator extends AdapterBase implements PodAllocat * List tpvoList = _templatePoolDao.listByTemplateStatus(templateId, dcId, podId, * Status.DOWNLOADED); * - * if (thvoList != null && thvoList.size() > 0) { if (s_logger.isDebugEnabled()) { s_logger.debug("Found " + + * if (thvoList != null && thvoList.size() > 0) { if (logger.isDebugEnabled()) { logger.debug("Found " + * thvoList.size() + " storage hosts in pod " + podId + " with template " + templateId); } return true; } else if - * (tpvoList != null && tpvoList.size() > 0) { if (s_logger.isDebugEnabled()) { s_logger.debug("Found " + + * (tpvoList != null && tpvoList.size() > 0) { if (logger.isDebugEnabled()) { logger.debug("Found " + * tpvoList.size() + " storage pools in pod " + podId + " with template " + templateId); } return true; }else { return * false; } */ diff --git a/server/src/com/cloud/agent/manager/authn/impl/BasicAgentAuthManager.java b/server/src/com/cloud/agent/manager/authn/impl/BasicAgentAuthManager.java index 1aa8f9168f0..24d3dfd5515 100644 --- a/server/src/com/cloud/agent/manager/authn/impl/BasicAgentAuthManager.java +++ b/server/src/com/cloud/agent/manager/authn/impl/BasicAgentAuthManager.java @@ -22,7 +22,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -39,7 +38,6 @@ import com.cloud.utils.component.AdapterBase; @Component @Local(value = {AgentAuthorizer.class, StartupCommandProcessor.class}) public class BasicAgentAuthManager extends AdapterBase implements AgentAuthorizer, StartupCommandProcessor { - private static final Logger s_logger = Logger.getLogger(BasicAgentAuthManager.class); @Inject HostDao _hostDao = null; @Inject @@ -54,7 +52,7 @@ public class BasicAgentAuthManager extends AdapterBase implements AgentAuthorize } catch (AgentAuthnException e) { throw new ConnectionException(true, "Failed to authenticate/authorize", e); } - s_logger.debug("Authorized agent with guid " + cmd[0].getGuid()); + logger.debug("Authorized agent with guid " + cmd[0].getGuid()); return false;//so that the next host creator can process it } diff --git a/server/src/com/cloud/alert/AlertManagerImpl.java b/server/src/com/cloud/alert/AlertManagerImpl.java index 86530699675..41a9dffa836 100644 --- a/server/src/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/com/cloud/alert/AlertManagerImpl.java @@ -40,7 +40,6 @@ import javax.mail.URLName; import javax.mail.internet.InternetAddress; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.sun.mail.smtp.SMTPMessage; import com.sun.mail.smtp.SMTPSSLTransport; @@ -89,8 +88,6 @@ import com.cloud.utils.db.SearchCriteria; @Local(value = {AlertManager.class}) public class AlertManagerImpl extends ManagerBase implements AlertManager, Configurable { - private static final Logger s_logger = Logger.getLogger(AlertManagerImpl.class.getName()); - private static final Logger s_alertsLogger = Logger.getLogger("org.apache.cloudstack.alerts"); private static final long INITIAL_CAPACITY_CHECK_DELAY = 30L * 1000L; // thirty seconds expressed in milliseconds @@ -235,7 +232,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi _emailAlert.clearAlert(alertType.getType(), dataCenterId, podId); } } catch (Exception ex) { - s_logger.error("Problem clearing email alert", ex); + logger.error("Problem clearing email alert", ex); } } @@ -251,11 +248,11 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi if (_emailAlert != null) { _emailAlert.sendAlert(alertType, dataCenterId, podId, null, subject, body); } else { - s_alertsLogger.warn(" alertType:: " + alertType + " // dataCenterId:: " + dataCenterId + " // podId:: " + podId + + logger.warn(" alertType:: " + alertType + " // dataCenterId:: " + dataCenterId + " // podId:: " + podId + " // message:: " + subject + " // body:: " + body); } } catch (Exception ex) { - s_logger.error("Problem sending email alert", ex); + logger.error("Problem sending email alert", ex); } } @@ -270,9 +267,9 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("recalculating system capacity"); - s_logger.debug("Executing cpu/ram capacity update"); + if (logger.isDebugEnabled()) { + logger.debug("recalculating system capacity"); + logger.debug("Executing cpu/ram capacity update"); } // Calculate CPU and RAM capacities @@ -283,9 +280,9 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi _capacityMgr.updateCapacityForHost(host); } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Done executing cpu/ram capacity update"); - s_logger.debug("Executing storage capacity update"); + if (logger.isDebugEnabled()) { + logger.debug("Done executing cpu/ram capacity update"); + logger.debug("Executing storage capacity update"); } // Calculate storage pool capacity List storagePools = _storagePoolDao.listAll(); @@ -298,9 +295,9 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Done executing storage capacity update"); - s_logger.debug("Executing capacity updates for public ip and Vlans"); + if (logger.isDebugEnabled()) { + logger.debug("Done executing storage capacity update"); + logger.debug("Executing capacity updates for public ip and Vlans"); } List datacenters = _dcDao.listAll(); @@ -327,9 +324,9 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Done capacity updates for public ip and Vlans"); - s_logger.debug("Executing capacity updates for private ip"); + if (logger.isDebugEnabled()) { + logger.debug("Done capacity updates for public ip and Vlans"); + logger.debug("Executing capacity updates for private ip"); } // Calculate new Private IP capacity @@ -341,13 +338,13 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi createOrUpdateIpCapacity(dcId, podId, Capacity.CAPACITY_TYPE_PRIVATE_IP, _configMgr.findPodAllocationState(pod)); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Done executing capacity updates for private ip"); - s_logger.debug("Done recalculating system capacity"); + if (logger.isDebugEnabled()) { + logger.debug("Done executing capacity updates for private ip"); + logger.debug("Done recalculating system capacity"); } } catch (Throwable t) { - s_logger.error("Caught exception in recalculating capacity", t); + logger.error("Caught exception in recalculating capacity", t); } } @@ -420,11 +417,11 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi @Override protected void runInContext() { try { - s_logger.debug("Running Capacity Checker ... "); + logger.debug("Running Capacity Checker ... "); checkForAlerts(); - s_logger.debug("Done running Capacity Checker ... "); + logger.debug("Done running Capacity Checker ... "); } catch (Throwable t) { - s_logger.error("Exception in CapacityChecker", t); + logger.error("Exception in CapacityChecker", t); } } } @@ -632,13 +629,13 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi } try { - if (s_logger.isDebugEnabled()) { - s_logger.debug(msgSubject); - s_logger.debug(msgContent); + if (logger.isDebugEnabled()) { + logger.debug(msgSubject); + logger.debug(msgContent); } _emailAlert.sendAlert(alertType, dc.getId(), podId, clusterId, msgSubject, msgContent); } catch (Exception ex) { - s_logger.error("Exception in CapacityChecker", ex); + logger.error("Exception in CapacityChecker", ex); } } @@ -694,7 +691,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi try { _recipientList[i] = new InternetAddress(recipientList[i], recipientList[i]); } catch (Exception ex) { - s_logger.error("Exception creating address for: " + recipientList[i], ex); + logger.error("Exception creating address for: " + recipientList[i], ex); } } } @@ -749,7 +746,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi // TODO: make sure this handles SSL transport (useAuth is true) and regular public void sendAlert(AlertType alertType, long dataCenterId, Long podId, Long clusterId, String subject, String content) throws MessagingException, UnsupportedEncodingException { - s_alertsLogger.warn(" alertType:: " + alertType + " // dataCenterId:: " + dataCenterId + " // podId:: " + + logger.warn(" alertType:: " + alertType + " // dataCenterId:: " + dataCenterId + " // podId:: " + podId + " // clusterId:: " + clusterId + " // message:: " + subject); AlertVO alert = null; if ((alertType != AlertManager.AlertType.ALERT_TYPE_HOST) && @@ -776,8 +773,8 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi newAlert.setName(alertType.getName()); _alertDao.persist(newAlert); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Have already sent: " + alert.getSentCount() + " emails for alert type '" + alertType + "' -- skipping send email"); + if (logger.isDebugEnabled()) { + logger.debug("Have already sent: " + alert.getSentCount() + " emails for alert type '" + alertType + "' -- skipping send email"); } return; } @@ -813,9 +810,9 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi smtpTrans.sendMessage(msg, msg.getAllRecipients()); smtpTrans.close(); } catch (SendFailedException e) { - s_logger.error(" Failed to send email alert " + e); + logger.error(" Failed to send email alert " + e); } catch (MessagingException e) { - s_logger.error(" Failed to send email alert " + e); + logger.error(" Failed to send email alert " + e); } } }); @@ -859,7 +856,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi sendAlert(alertType, dataCenterId, podId, msg, msg); return true; } catch (Exception ex) { - s_logger.warn("Failed to generate an alert of type=" + alertType + "; msg=" + msg); + logger.warn("Failed to generate an alert of type=" + alertType + "; msg=" + msg); return false; } } diff --git a/server/src/com/cloud/alert/ClusterAlertAdapter.java b/server/src/com/cloud/alert/ClusterAlertAdapter.java index ccb10746c86..db1e5ce5828 100644 --- a/server/src/com/cloud/alert/ClusterAlertAdapter.java +++ b/server/src/com/cloud/alert/ClusterAlertAdapter.java @@ -22,7 +22,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.cluster.ClusterManager; @@ -38,7 +37,6 @@ import com.cloud.utils.events.SubscriptionMgr; @Local(value = AlertAdapter.class) public class ClusterAlertAdapter extends AdapterBase implements AlertAdapter { - private static final Logger s_logger = Logger.getLogger(ClusterAlertAdapter.class); @Inject private AlertManager _alertMgr; @@ -46,8 +44,8 @@ public class ClusterAlertAdapter extends AdapterBase implements AlertAdapter { private ManagementServerHostDao _mshostDao; public void onClusterAlert(Object sender, EventArgs args) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Receive cluster alert, EventArgs: " + args.getClass().getName()); + if (logger.isDebugEnabled()) { + logger.debug("Receive cluster alert, EventArgs: " + args.getClass().getName()); } if (args instanceof ClusterNodeJoinEventArgs) { @@ -55,21 +53,21 @@ public class ClusterAlertAdapter extends AdapterBase implements AlertAdapter { } else if (args instanceof ClusterNodeLeftEventArgs) { onClusterNodeLeft(sender, (ClusterNodeLeftEventArgs)args); } else { - s_logger.error("Unrecognized cluster alert event"); + logger.error("Unrecognized cluster alert event"); } } private void onClusterNodeJoined(Object sender, ClusterNodeJoinEventArgs args) { - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { for (ManagementServerHostVO mshost : args.getJoinedNodes()) { - s_logger.debug("Handle cluster node join alert, joined node: " + mshost.getServiceIP() + ", msidL: " + mshost.getMsid()); + logger.debug("Handle cluster node join alert, joined node: " + mshost.getServiceIP() + ", msidL: " + mshost.getMsid()); } } for (ManagementServerHostVO mshost : args.getJoinedNodes()) { if (mshost.getId() == args.getSelf().longValue()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Management server node " + mshost.getServiceIP() + " is up, send alert"); + if (logger.isDebugEnabled()) { + logger.debug("Management server node " + mshost.getServiceIP() + " is up, send alert"); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_MANAGMENT_NODE, 0, new Long(0), "Management server node " + mshost.getServiceIP() + " is up", ""); @@ -80,23 +78,23 @@ public class ClusterAlertAdapter extends AdapterBase implements AlertAdapter { private void onClusterNodeLeft(Object sender, ClusterNodeLeftEventArgs args) { - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { for (ManagementServerHostVO mshost : args.getLeftNodes()) { - s_logger.debug("Handle cluster node left alert, leaving node: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); + logger.debug("Handle cluster node left alert, leaving node: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); } } for (ManagementServerHostVO mshost : args.getLeftNodes()) { if (mshost.getId() != args.getSelf().longValue()) { if (_mshostDao.increaseAlertCount(mshost.getId()) > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Detected management server node " + mshost.getServiceIP() + " is down, send alert"); + if (logger.isDebugEnabled()) { + logger.debug("Detected management server node " + mshost.getServiceIP() + " is down, send alert"); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_MANAGMENT_NODE, 0, new Long(0), "Management server node " + mshost.getServiceIP() + " is down", ""); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Detected management server node " + mshost.getServiceIP() + " is down, but alert has already been set"); + if (logger.isDebugEnabled()) { + logger.debug("Detected management server node " + mshost.getServiceIP() + " is down, but alert has already been set"); } } } @@ -106,8 +104,8 @@ public class ClusterAlertAdapter extends AdapterBase implements AlertAdapter { @Override public boolean configure(String name, Map params) throws ConfigurationException { - if (s_logger.isInfoEnabled()) { - s_logger.info("Start configuring cluster alert manager : " + name); + if (logger.isInfoEnabled()) { + logger.info("Start configuring cluster alert manager : " + name); } try { diff --git a/server/src/com/cloud/alert/ConsoleProxyAlertAdapter.java b/server/src/com/cloud/alert/ConsoleProxyAlertAdapter.java index ba1dca4a855..7962906b349 100644 --- a/server/src/com/cloud/alert/ConsoleProxyAlertAdapter.java +++ b/server/src/com/cloud/alert/ConsoleProxyAlertAdapter.java @@ -22,7 +22,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.consoleproxy.ConsoleProxyAlertEventArgs; @@ -39,7 +38,6 @@ import com.cloud.vm.dao.ConsoleProxyDao; @Local(value = AlertAdapter.class) public class ConsoleProxyAlertAdapter extends AdapterBase implements AlertAdapter { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyAlertAdapter.class); @Inject private AlertManager _alertMgr; @@ -49,8 +47,8 @@ public class ConsoleProxyAlertAdapter extends AdapterBase implements AlertAdapte private ConsoleProxyDao _consoleProxyDao; public void onProxyAlert(Object sender, ConsoleProxyAlertEventArgs args) { - if (s_logger.isDebugEnabled()) - s_logger.debug("received console proxy alert"); + if (logger.isDebugEnabled()) + logger.debug("received console proxy alert"); DataCenterVO dc = _dcDao.findById(args.getZoneId()); ConsoleProxyVO proxy = args.getProxy(); @@ -64,14 +62,14 @@ public class ConsoleProxyAlertAdapter extends AdapterBase implements AlertAdapte switch (args.getType()) { case ConsoleProxyAlertEventArgs.PROXY_CREATED: - if (s_logger.isDebugEnabled()) - s_logger.debug("New console proxy created, zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + + if (logger.isDebugEnabled()) + logger.debug("New console proxy created, zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + proxy.getPrivateIpAddress()); break; case ConsoleProxyAlertEventArgs.PROXY_UP: - if (s_logger.isDebugEnabled()) - s_logger.debug("Console proxy is up, zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + + if (logger.isDebugEnabled()) + logger.debug("Console proxy is up, zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + proxy.getPrivateIpAddress()); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_CONSOLE_PROXY, args.getZoneId(), proxy.getPodIdToDeployIn(), @@ -81,8 +79,8 @@ public class ConsoleProxyAlertAdapter extends AdapterBase implements AlertAdapte break; case ConsoleProxyAlertEventArgs.PROXY_DOWN: - if (s_logger.isDebugEnabled()) - s_logger.debug("Console proxy is down, zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + + if (logger.isDebugEnabled()) + logger.debug("Console proxy is down, zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_CONSOLE_PROXY, args.getZoneId(), proxy.getPodIdToDeployIn(), @@ -92,8 +90,8 @@ public class ConsoleProxyAlertAdapter extends AdapterBase implements AlertAdapte break; case ConsoleProxyAlertEventArgs.PROXY_REBOOTED: - if (s_logger.isDebugEnabled()) - s_logger.debug("Console proxy is rebooted, zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + + if (logger.isDebugEnabled()) + logger.debug("Console proxy is rebooted, zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_CONSOLE_PROXY, args.getZoneId(), proxy.getPodIdToDeployIn(), @@ -103,16 +101,16 @@ public class ConsoleProxyAlertAdapter extends AdapterBase implements AlertAdapte break; case ConsoleProxyAlertEventArgs.PROXY_CREATE_FAILURE: - if (s_logger.isDebugEnabled()) - s_logger.debug("Console proxy creation failure, zone: " + dc.getName()); + if (logger.isDebugEnabled()) + logger.debug("Console proxy creation failure, zone: " + dc.getName()); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_CONSOLE_PROXY, args.getZoneId(), null, "Console proxy creation failure. zone: " + dc.getName() + ", error details: " + args.getMessage(), "Console proxy creation failure (zone " + dc.getName() + ")"); break; case ConsoleProxyAlertEventArgs.PROXY_START_FAILURE: - if (s_logger.isDebugEnabled()) - s_logger.debug("Console proxy startup failure, zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + + if (logger.isDebugEnabled()) + logger.debug("Console proxy startup failure, zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_CONSOLE_PROXY, args.getZoneId(), proxy.getPodIdToDeployIn(), @@ -122,8 +120,8 @@ public class ConsoleProxyAlertAdapter extends AdapterBase implements AlertAdapte break; case ConsoleProxyAlertEventArgs.PROXY_FIREWALL_ALERT: - if (s_logger.isDebugEnabled()) - s_logger.debug("Console proxy firewall alert, zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + + if (logger.isDebugEnabled()) + logger.debug("Console proxy firewall alert, zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())); _alertMgr.sendAlert( @@ -136,8 +134,8 @@ public class ConsoleProxyAlertAdapter extends AdapterBase implements AlertAdapte break; case ConsoleProxyAlertEventArgs.PROXY_STORAGE_ALERT: - if (s_logger.isDebugEnabled()) - s_logger.debug("Console proxy storage alert, zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + + if (logger.isDebugEnabled()) + logger.debug("Console proxy storage alert, zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + proxy.getPrivateIpAddress() + ", message: " + args.getMessage()); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_STORAGE_MISC, args.getZoneId(), proxy.getPodIdToDeployIn(), @@ -149,8 +147,8 @@ public class ConsoleProxyAlertAdapter extends AdapterBase implements AlertAdapte @Override public boolean configure(String name, Map params) throws ConfigurationException { - if (s_logger.isInfoEnabled()) - s_logger.info("Start configuring console proxy alert manager : " + name); + if (logger.isInfoEnabled()) + logger.info("Start configuring console proxy alert manager : " + name); try { SubscriptionMgr.getInstance().subscribe(ConsoleProxyManager.ALERT_SUBJECT, this, "onProxyAlert"); diff --git a/server/src/com/cloud/alert/SecondaryStorageVmAlertAdapter.java b/server/src/com/cloud/alert/SecondaryStorageVmAlertAdapter.java index 392015b8f0e..eaf918fde89 100644 --- a/server/src/com/cloud/alert/SecondaryStorageVmAlertAdapter.java +++ b/server/src/com/cloud/alert/SecondaryStorageVmAlertAdapter.java @@ -22,7 +22,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenterVO; @@ -39,7 +38,6 @@ import com.cloud.vm.dao.SecondaryStorageVmDao; @Local(value = AlertAdapter.class) public class SecondaryStorageVmAlertAdapter extends AdapterBase implements AlertAdapter { - private static final Logger s_logger = Logger.getLogger(SecondaryStorageVmAlertAdapter.class); @Inject private AlertManager _alertMgr; @@ -49,8 +47,8 @@ public class SecondaryStorageVmAlertAdapter extends AdapterBase implements Alert private SecondaryStorageVmDao _ssvmDao; public void onSSVMAlert(Object sender, SecStorageVmAlertEventArgs args) { - if (s_logger.isDebugEnabled()) - s_logger.debug("received secondary storage vm alert"); + if (logger.isDebugEnabled()) + logger.debug("received secondary storage vm alert"); DataCenterVO dc = _dcDao.findById(args.getZoneId()); SecondaryStorageVmVO secStorageVm = args.getSecStorageVm(); @@ -63,14 +61,14 @@ public class SecondaryStorageVmAlertAdapter extends AdapterBase implements Alert switch (args.getType()) { case SecStorageVmAlertEventArgs.SSVM_CREATED: - if (s_logger.isDebugEnabled()) - s_logger.debug("New secondary storage vm created, zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + + if (logger.isDebugEnabled()) + logger.debug("New secondary storage vm created, zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " + secStorageVm.getPrivateIpAddress()); break; case SecStorageVmAlertEventArgs.SSVM_UP: - if (s_logger.isDebugEnabled()) - s_logger.debug("Secondary Storage Vm is up, zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + + if (logger.isDebugEnabled()) + logger.debug("Secondary Storage Vm is up, zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " + secStorageVm.getPrivateIpAddress()); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SSVM, args.getZoneId(), secStorageVm.getPodIdToDeployIn(), "Secondary Storage Vm up in zone: " + @@ -79,8 +77,8 @@ public class SecondaryStorageVmAlertAdapter extends AdapterBase implements Alert break; case SecStorageVmAlertEventArgs.SSVM_DOWN: - if (s_logger.isDebugEnabled()) - s_logger.debug("Secondary Storage Vm is down, zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + + if (logger.isDebugEnabled()) + logger.debug("Secondary Storage Vm is down, zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " + (secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress())); _alertMgr.sendAlert( @@ -93,8 +91,8 @@ public class SecondaryStorageVmAlertAdapter extends AdapterBase implements Alert break; case SecStorageVmAlertEventArgs.SSVM_REBOOTED: - if (s_logger.isDebugEnabled()) - s_logger.debug("Secondary Storage Vm is rebooted, zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + + if (logger.isDebugEnabled()) + logger.debug("Secondary Storage Vm is rebooted, zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " + (secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress())); _alertMgr.sendAlert( @@ -107,8 +105,8 @@ public class SecondaryStorageVmAlertAdapter extends AdapterBase implements Alert break; case SecStorageVmAlertEventArgs.SSVM_CREATE_FAILURE: - if (s_logger.isDebugEnabled()) - s_logger.debug("Secondary Storage Vm creation failure, zone: " + dc.getName()); + if (logger.isDebugEnabled()) + logger.debug("Secondary Storage Vm creation failure, zone: " + dc.getName()); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SSVM, args.getZoneId(), null, "Secondary Storage Vm creation failure. zone: " + dc.getName() + ", error details: " + args.getMessage(), @@ -116,8 +114,8 @@ public class SecondaryStorageVmAlertAdapter extends AdapterBase implements Alert break; case SecStorageVmAlertEventArgs.SSVM_START_FAILURE: - if (s_logger.isDebugEnabled()) - s_logger.debug("Secondary Storage Vm startup failure, zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + + if (logger.isDebugEnabled()) + logger.debug("Secondary Storage Vm startup failure, zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " + (secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress())); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SSVM, args.getZoneId(), secStorageVm.getPodIdToDeployIn(), @@ -128,8 +126,8 @@ public class SecondaryStorageVmAlertAdapter extends AdapterBase implements Alert break; case SecStorageVmAlertEventArgs.SSVM_FIREWALL_ALERT: - if (s_logger.isDebugEnabled()) - s_logger.debug("Secondary Storage Vm firewall alert, zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + + if (logger.isDebugEnabled()) + logger.debug("Secondary Storage Vm firewall alert, zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " + (secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress())); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SSVM, args.getZoneId(), secStorageVm.getPodIdToDeployIn(), @@ -139,8 +137,8 @@ public class SecondaryStorageVmAlertAdapter extends AdapterBase implements Alert break; case SecStorageVmAlertEventArgs.SSVM_STORAGE_ALERT: - if (s_logger.isDebugEnabled()) - s_logger.debug("Secondary Storage Vm storage alert, zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + + if (logger.isDebugEnabled()) + logger.debug("Secondary Storage Vm storage alert, zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " + secStorageVm.getPrivateIpAddress() + ", message: " + args.getMessage()); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_STORAGE_MISC, args.getZoneId(), secStorageVm.getPodIdToDeployIn(), @@ -153,8 +151,8 @@ public class SecondaryStorageVmAlertAdapter extends AdapterBase implements Alert @Override public boolean configure(String name, Map params) throws ConfigurationException { - if (s_logger.isInfoEnabled()) - s_logger.info("Start configuring secondary storage vm alert manager : " + name); + if (logger.isInfoEnabled()) + logger.info("Start configuring secondary storage vm alert manager : " + name); try { SubscriptionMgr.getInstance().subscribe(SecondaryStorageVmManager.ALERT_SUBJECT, this, "onSSVMAlert"); diff --git a/server/src/com/cloud/api/ApiAsyncJobDispatcher.java b/server/src/com/cloud/api/ApiAsyncJobDispatcher.java index 0b7d681ad95..c925e1a36bf 100644 --- a/server/src/com/cloud/api/ApiAsyncJobDispatcher.java +++ b/server/src/com/cloud/api/ApiAsyncJobDispatcher.java @@ -21,7 +21,6 @@ import java.util.Map; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.google.gson.Gson; import com.google.gson.reflect.TypeToken; @@ -45,7 +44,6 @@ import com.cloud.utils.component.ComponentContext; import com.cloud.utils.db.EntityManager; public class ApiAsyncJobDispatcher extends AdapterBase implements AsyncJobDispatcher { - private static final Logger s_logger = Logger.getLogger(ApiAsyncJobDispatcher.class); @Inject private ApiDispatcher _dispatcher; @@ -118,7 +116,7 @@ public class ApiAsyncJobDispatcher extends AdapterBase implements AsyncJobDispat String errorMsg = null; int errorCode = ApiErrorCode.INTERNAL_ERROR.getHttpCode(); if (!(e instanceof ServerApiException)) { - s_logger.error("Unexpected exception while executing " + job.getCmd(), e); + logger.error("Unexpected exception while executing " + job.getCmd(), e); errorMsg = e.getMessage(); } else { ServerApiException sApiEx = (ServerApiException)e; diff --git a/server/src/com/cloud/api/ApiServer.java b/server/src/com/cloud/api/ApiServer.java index 4da8b1e1be8..96cc5e527b8 100644 --- a/server/src/com/cloud/api/ApiServer.java +++ b/server/src/com/cloud/api/ApiServer.java @@ -16,45 +16,44 @@ // under the License. package com.cloud.api; -import com.cloud.api.dispatch.DispatchChainFactory; -import com.cloud.api.dispatch.DispatchTask; -import com.cloud.api.response.ApiResponseSerializer; -import com.cloud.configuration.Config; -import com.cloud.domain.Domain; -import com.cloud.domain.DomainVO; -import com.cloud.domain.dao.DomainDao; -import com.cloud.event.ActionEventUtils; -import com.cloud.event.EventCategory; -import com.cloud.event.EventTypes; -import com.cloud.exception.AccountLimitException; -import com.cloud.exception.CloudAuthenticationException; -import com.cloud.exception.InsufficientCapacityException; -import com.cloud.exception.InvalidParameterValueException; -import com.cloud.exception.PermissionDeniedException; -import com.cloud.exception.RequestLimitException; -import com.cloud.exception.ResourceAllocationException; -import com.cloud.exception.ResourceUnavailableException; -import com.cloud.user.Account; -import com.cloud.user.AccountManager; -import com.cloud.user.DomainManager; -import com.cloud.user.User; -import com.cloud.user.UserAccount; -import com.cloud.user.UserVO; -import com.cloud.utils.ConstantTimeComparator; -import com.cloud.utils.HttpUtils; -import com.cloud.utils.NumbersUtil; -import com.cloud.utils.Pair; -import com.cloud.utils.StringUtils; -import com.cloud.utils.component.ComponentContext; -import com.cloud.utils.component.ManagerBase; -import com.cloud.utils.component.PluggableService; -import com.cloud.utils.concurrency.NamedThreadFactory; -import com.cloud.utils.db.EntityManager; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.TransactionLegacy; -import com.cloud.utils.db.UUIDManager; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.exception.ExceptionProxyObject; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.net.InetAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URLEncoder; +import java.security.SecureRandom; +import java.text.DateFormat; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TimeZone; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; +import javax.inject.Inject; +import javax.naming.ConfigurationException; +import javax.servlet.http.HttpServletResponse; +import javax.servlet.http.HttpSession; + import org.apache.cloudstack.acl.APIChecker; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -134,51 +133,51 @@ import org.apache.http.protocol.ResponseConnControl; import org.apache.http.protocol.ResponseContent; import org.apache.http.protocol.ResponseDate; import org.apache.http.protocol.ResponseServer; -import org.apache.log4j.Logger; import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.springframework.stereotype.Component; -import javax.crypto.Mac; -import javax.crypto.spec.SecretKeySpec; -import javax.inject.Inject; -import javax.naming.ConfigurationException; -import javax.servlet.http.HttpServletResponse; -import javax.servlet.http.HttpSession; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InterruptedIOException; -import java.net.InetAddress; -import java.net.ServerSocket; -import java.net.Socket; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.URLEncoder; -import java.security.SecureRandom; -import java.text.DateFormat; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Date; -import java.util.Enumeration; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TimeZone; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.regex.Matcher; -import java.util.regex.Pattern; +import com.cloud.api.dispatch.DispatchChainFactory; +import com.cloud.api.dispatch.DispatchTask; +import com.cloud.api.response.ApiResponseSerializer; +import com.cloud.configuration.Config; +import com.cloud.domain.Domain; +import com.cloud.domain.DomainVO; +import com.cloud.domain.dao.DomainDao; +import com.cloud.event.ActionEventUtils; +import com.cloud.event.EventCategory; +import com.cloud.event.EventTypes; +import com.cloud.exception.AccountLimitException; +import com.cloud.exception.CloudAuthenticationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.exception.RequestLimitException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.DomainManager; +import com.cloud.user.User; +import com.cloud.user.UserAccount; +import com.cloud.user.UserVO; +import com.cloud.utils.ConstantTimeComparator; +import com.cloud.utils.HttpUtils; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; +import com.cloud.utils.StringUtils; +import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.component.PluggableService; +import com.cloud.utils.concurrency.NamedThreadFactory; +import com.cloud.utils.db.EntityManager; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.db.UUIDManager; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.exception.ExceptionProxyObject; @Component public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiServerService { - private static final Logger s_logger = Logger.getLogger(ApiServer.class.getName()); - private static final Logger s_accessLogger = Logger.getLogger("apiserver." + ApiServer.class.getName()); public static boolean encodeApiResponse = false; public static boolean s_enableSecureCookie = false; @@ -243,8 +242,8 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer AsyncJob job = eventInfo.first(); String jobEvent = eventInfo.second(); - if (s_logger.isTraceEnabled()) - s_logger.trace("Handle asyjob publish event " + jobEvent); + if (logger.isTraceEnabled()) + logger.trace("Handle asyjob publish event " + jobEvent); EventBus eventBus = null; try { @@ -269,11 +268,11 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer if (begin >= 0) { cmdEventType = info.substring(begin + marker.length() + 2, info.indexOf(",", begin) - 1); - if (s_logger.isDebugEnabled()) - s_logger.debug("Retrieved cmdEventType from job info: " + cmdEventType); + if (logger.isDebugEnabled()) + logger.debug("Retrieved cmdEventType from job info: " + cmdEventType); } else { - if (s_logger.isDebugEnabled()) - s_logger.debug("Unable to locate cmdEventType marker in job info. publish as unknown event"); + if (logger.isDebugEnabled()) + logger.debug("Unable to locate cmdEventType marker in job info. publish as unknown event"); } } // For some reason, the instanceType / instanceId are not abstract, which means we may get null values. @@ -310,7 +309,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer eventBus.publish(event); } catch (EventBusException evx) { String errMsg = "Failed to publish async job event on the the event bus."; - s_logger.warn(errMsg, evx); + logger.warn(errMsg, evx); } } @@ -332,7 +331,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer if (strSnapshotLimit != null) { final Long snapshotLimit = NumbersUtil.parseLong(strSnapshotLimit, 1L); if (snapshotLimit.longValue() <= 0) { - s_logger.debug("Global config parameter " + Config.ConcurrentSnapshotsThresholdPerHost.toString() + " is less or equal 0; defaulting to unlimited"); + logger.debug("Global config parameter " + Config.ConcurrentSnapshotsThresholdPerHost.toString() + " is less or equal 0; defaulting to unlimited"); } else { _dispatcher.setCreateSnapshotQueueSizeLimit(snapshotLimit); } @@ -341,8 +340,8 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer final Set> cmdClasses = new HashSet>(); for (final PluggableService pluggableService : _pluggableServices) { cmdClasses.addAll(pluggableService.getCommands()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Discovered plugin " + pluggableService.getClass().getSimpleName()); + if (logger.isDebugEnabled()) { + logger.debug("Discovered plugin " + pluggableService.getClass().getSimpleName()); } } @@ -400,7 +399,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer try { paramList = URLEncodedUtils.parse(new URI(request.getRequestLine().getUri()), HttpUtils.UTF_8); } catch (final URISyntaxException e) { - s_logger.error("Error parsing url request", e); + logger.error("Error parsing url request", e); } // Use Multimap as the parameter map should be in the form (name=String, value=String[]) @@ -442,11 +441,10 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer sb.append(" " + se.getErrorCode() + " " + se.getDescription()); } catch (final RuntimeException e) { // log runtime exception like NullPointerException to help identify the source easier - s_logger.error("Unhandled exception, ", e); + logger.error("Unhandled exception, ", e); throw e; } } finally { - s_accessLogger.info(sb.toString()); CallContext.unregister(); } } @@ -483,13 +481,13 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer try { command = (String[])params.get("command"); if (command == null) { - s_logger.error("invalid request, no command sent"); - if (s_logger.isTraceEnabled()) { - s_logger.trace("dumping request parameters"); + logger.error("invalid request, no command sent"); + if (logger.isTraceEnabled()) { + logger.trace("dumping request parameters"); for (final Object key : params.keySet()) { final String keyStr = (String)key; final String[] value = (String[])params.get(key); - s_logger.trace(" key: " + keyStr + ", value: " + ((value == null) ? "'null'" : value[0])); + logger.trace(" key: " + keyStr + ", value: " + ((value == null) ? "'null'" : value[0])); } } throw new ServerApiException(ApiErrorCode.UNSUPPORTED_ACTION_ERROR, "Invalid request, no command sent"); @@ -514,7 +512,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer if (cmdClass != null) { APICommand annotation = cmdClass.getAnnotation(APICommand.class); if (annotation == null) { - s_logger.error("No APICommand annotation found for class " + cmdClass.getCanonicalName()); + logger.error("No APICommand annotation found for class " + cmdClass.getCanonicalName()); throw new CloudRuntimeException("No APICommand annotation found for class " + cmdClass.getCanonicalName()); } @@ -536,16 +534,16 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer buildAuditTrail(auditTrailSb, command[0], response); } else { final String errorString = "Unknown API command: " + command[0]; - s_logger.warn(errorString); + logger.warn(errorString); auditTrailSb.append(" " + errorString); throw new ServerApiException(ApiErrorCode.UNSUPPORTED_ACTION_ERROR, errorString); } } } catch (final InvalidParameterValueException ex) { - s_logger.info(ex.getMessage()); + logger.info(ex.getMessage()); throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex.getMessage(), ex); } catch (final IllegalArgumentException ex) { - s_logger.info(ex.getMessage()); + logger.info(ex.getMessage()); throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex.getMessage(), ex); } catch (final PermissionDeniedException ex) { final ArrayList idList = ex.getIdProxyList(); @@ -557,16 +555,16 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer buf.append(obj.getUuid()); buf.append(" "); } - s_logger.info("PermissionDenied: " + ex.getMessage() + " on objs: [" + buf.toString() + "]"); + logger.info("PermissionDenied: " + ex.getMessage() + " on objs: [" + buf.toString() + "]"); } else { - s_logger.info("PermissionDenied: " + ex.getMessage()); + logger.info("PermissionDenied: " + ex.getMessage()); } throw new ServerApiException(ApiErrorCode.ACCOUNT_ERROR, ex.getMessage(), ex); } catch (final AccountLimitException ex) { - s_logger.info(ex.getMessage()); + logger.info(ex.getMessage()); throw new ServerApiException(ApiErrorCode.ACCOUNT_RESOURCE_LIMIT_ERROR, ex.getMessage(), ex); } catch (final InsufficientCapacityException ex) { - s_logger.info(ex.getMessage()); + logger.info(ex.getMessage()); String errorMsg = ex.getMessage(); if (!_accountMgr.isRootAdmin(CallContext.current().getCallingAccount().getId())) { // hide internal details to non-admin user for security reason @@ -574,10 +572,10 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer } throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, errorMsg, ex); } catch (final ResourceAllocationException ex) { - s_logger.info(ex.getMessage()); + logger.info(ex.getMessage()); throw new ServerApiException(ApiErrorCode.RESOURCE_ALLOCATION_ERROR, ex.getMessage(), ex); } catch (final ResourceUnavailableException ex) { - s_logger.info(ex.getMessage()); + logger.info(ex.getMessage()); String errorMsg = ex.getMessage(); if (!_accountMgr.isRootAdmin(CallContext.current().getCallingAccount().getId())) { // hide internal details to non-admin user for security reason @@ -585,10 +583,10 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer } throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, errorMsg, ex); } catch (final ServerApiException ex) { - s_logger.info(ex.getDescription()); + logger.info(ex.getDescription()); throw ex; } catch (final Exception ex) { - s_logger.error("unhandled exception executing api command: " + ((command == null) ? "null" : command), ex); + logger.error("unhandled exception executing api command: " + ((command == null) ? "null" : command), ex); String errorMsg = ex.getMessage(); if (!_accountMgr.isRootAdmin(CallContext.current().getCallingAccount().getId())) { // hide internal details to non-admin user for security reason @@ -682,14 +680,14 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer AsyncJobVO job = new AsyncJobVO("", callerUserId, caller.getId(), cmdObj.getClass().getName(), ApiGsonHelper.getBuilder().create().toJson(params), instanceId, asyncCmd.getInstanceType() != null ? asyncCmd.getInstanceType().toString() : null, - injectedJobId); + injectedJobId); job.setDispatcher(_asyncDispatcher.getName()); final long jobId = _asyncMgr.submitAsyncJob(job); if (jobId == 0L) { final String errorMsg = "Unable to schedule async job for command " + job.getCmd(); - s_logger.warn(errorMsg); + logger.warn(errorMsg); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg); } @@ -780,7 +778,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer final String[] command = (String[])requestParameters.get(ApiConstants.COMMAND); if (command == null) { - s_logger.info("missing command, ignoring request..."); + logger.info("missing command, ignoring request..."); return false; } @@ -793,17 +791,17 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer try { checkCommandAvailable(user, commandName); } catch (final RequestLimitException ex) { - s_logger.debug(ex.getMessage()); + logger.debug(ex.getMessage()); throw new ServerApiException(ApiErrorCode.API_LIMIT_EXCEED, ex.getMessage()); } catch (final PermissionDeniedException ex) { - s_logger.debug("The given command:" + commandName + " does not exist or it is not available for user with id:" + userId); + logger.debug("The given command:" + commandName + " does not exist or it is not available for user with id:" + userId); throw new ServerApiException(ApiErrorCode.UNSUPPORTED_ACTION_ERROR, "The given command does not exist or it is not available for user"); } return true; } else { // check against every available command to see if the command exists or not if (!s_apiNameCmdClassMap.containsKey(commandName) && !commandName.equals("login") && !commandName.equals("logout")) { - s_logger.debug("The given command:" + commandName + " does not exist or it is not available for user with id:" + userId); + logger.debug("The given command:" + commandName + " does not exist or it is not available for user with id:" + userId); throw new ServerApiException(ApiErrorCode.UNSUPPORTED_ACTION_ERROR, "The given command does not exist or it is not available for user"); } } @@ -846,7 +844,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer // if api/secret key are passed to the parameters if ((signature == null) || (apiKey == null)) { - s_logger.debug("Expired session, missing signature, or missing apiKey -- ignoring request. Signature: " + signature + ", apiKey: " + apiKey); + logger.debug("Expired session, missing signature, or missing apiKey -- ignoring request. Signature: " + signature + ", apiKey: " + apiKey); return false; // no signature, bad request } @@ -855,20 +853,20 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer if ("3".equals(signatureVersion)) { // New signature authentication. Check for expire parameter and its validity if (expires == null) { - s_logger.debug("Missing Expires parameter -- ignoring request. Signature: " + signature + ", apiKey: " + apiKey); + logger.debug("Missing Expires parameter -- ignoring request. Signature: " + signature + ", apiKey: " + apiKey); return false; } synchronized (DateFormatToUse) { try { expiresTS = DateFormatToUse.parse(expires); } catch (final ParseException pe) { - s_logger.debug("Incorrect date format for Expires parameter", pe); + logger.debug("Incorrect date format for Expires parameter", pe); return false; } } final Date now = new Date(System.currentTimeMillis()); if (expiresTS.before(now)) { - s_logger.debug("Request expired -- ignoring ...sig: " + signature + ", apiKey: " + apiKey); + logger.debug("Request expired -- ignoring ...sig: " + signature + ", apiKey: " + apiKey); return false; } } @@ -879,7 +877,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer // verify there is a user with this api key final Pair userAcctPair = _accountMgr.findUserByApiKey(apiKey); if (userAcctPair == null) { - s_logger.debug("apiKey does not map to a valid user -- ignoring request, apiKey: " + apiKey); + logger.debug("apiKey does not map to a valid user -- ignoring request, apiKey: " + apiKey); return false; } @@ -887,7 +885,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer final Account account = userAcctPair.second(); if (user.getState() != Account.State.enabled || !account.getState().equals(Account.State.enabled)) { - s_logger.info("disabled or locked user accessing the api, userid = " + user.getId() + "; name = " + user.getUsername() + "; state: " + user.getState() + + logger.info("disabled or locked user accessing the api, userid = " + user.getId() + "; name = " + user.getUsername() + "; state: " + user.getState() + "; accountState: " + account.getState()); return false; } @@ -895,10 +893,10 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer try { checkCommandAvailable(user, commandName); } catch (final RequestLimitException ex) { - s_logger.debug(ex.getMessage()); + logger.debug(ex.getMessage()); throw new ServerApiException(ApiErrorCode.API_LIMIT_EXCEED, ex.getMessage()); } catch (final PermissionDeniedException ex) { - s_logger.debug("The given command:" + commandName + " does not exist or it is not available for user"); + logger.debug("The given command:" + commandName + " does not exist or it is not available for user"); throw new ServerApiException(ApiErrorCode.UNSUPPORTED_ACTION_ERROR, "The given command:" + commandName + " does not exist or it is not available for user with id:" + userId); } @@ -906,7 +904,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer // verify secret key exists secretKey = user.getSecretKey(); if (secretKey == null) { - s_logger.info("User does not have a secret key associated with the account -- ignoring request, username: " + user.getUsername()); + logger.info("User does not have a secret key associated with the account -- ignoring request, username: " + user.getUsername()); return false; } @@ -922,7 +920,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer final boolean equalSig = ConstantTimeComparator.compareStrings(signature, computedSignature); if (!equalSig) { - s_logger.info("User signature: " + signature + " is not equaled to computed signature: " + computedSignature); + logger.info("User signature: " + signature + " is not equaled to computed signature: " + computedSignature); } else { CallContext.register(user, account); } @@ -930,7 +928,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer } catch (final ServerApiException ex) { throw ex; } catch (final Exception ex) { - s_logger.error("unable to verify request signature"); + logger.error("unable to verify request signature"); } return false; } @@ -1018,13 +1016,13 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer float offsetInHrs = 0f; if (timezone != null) { final TimeZone t = TimeZone.getTimeZone(timezone); - s_logger.info("Current user logged in under " + timezone + " timezone"); + logger.info("Current user logged in under " + timezone + " timezone"); final java.util.Date date = new java.util.Date(); final long longDate = date.getTime(); final float offsetInMs = (t.getOffset(longDate)); offsetInHrs = offsetInMs / (1000 * 60 * 60); - s_logger.info("Timezone offset from UTC is: " + offsetInHrs); + logger.info("Timezone offset from UTC is: " + offsetInHrs); } final Account account = _accountMgr.getAccount(userAcct.getAccountId()); @@ -1086,7 +1084,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer if ((user == null) || (user.getRemoved() != null) || !user.getState().equals(Account.State.enabled) || (account == null) || !account.getState().equals(Account.State.enabled)) { - s_logger.warn("Deleted/Disabled/Locked user with id=" + userId + " attempting to access public API"); + logger.warn("Deleted/Disabled/Locked user with id=" + userId + " attempting to access public API"); return false; } return true; @@ -1157,7 +1155,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer } resp.setEntity(body); } catch (final Exception ex) { - s_logger.error("error!", ex); + logger.error("error!", ex); } } @@ -1166,7 +1164,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer // we have to cite a license if we are using this code directly, so we need to add the appropriate citation or // modify the // code to be very specific to our needs - static class ListenerThread extends Thread { + class ListenerThread extends Thread { private HttpService _httpService = null; private ServerSocket _serverSocket = null; private HttpParams _params = null; @@ -1175,16 +1173,16 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer try { _serverSocket = new ServerSocket(port); } catch (final IOException ioex) { - s_logger.error("error initializing api server", ioex); + logger.error("error initializing api server", ioex); return; } _params = new BasicHttpParams(); _params.setIntParameter(CoreConnectionPNames.SO_TIMEOUT, 30000) - .setIntParameter(CoreConnectionPNames.SOCKET_BUFFER_SIZE, 8 * 1024) - .setBooleanParameter(CoreConnectionPNames.STALE_CONNECTION_CHECK, false) - .setBooleanParameter(CoreConnectionPNames.TCP_NODELAY, true) - .setParameter(CoreProtocolPNames.ORIGIN_SERVER, "HttpComponents/1.1"); + .setIntParameter(CoreConnectionPNames.SOCKET_BUFFER_SIZE, 8 * 1024) + .setBooleanParameter(CoreConnectionPNames.STALE_CONNECTION_CHECK, false) + .setBooleanParameter(CoreConnectionPNames.TCP_NODELAY, true) + .setParameter(CoreProtocolPNames.ORIGIN_SERVER, "HttpComponents/1.1"); // Set up the HTTP protocol processor final BasicHttpProcessor httpproc = new BasicHttpProcessor(); @@ -1205,7 +1203,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer @Override public void run() { - s_logger.info("ApiServer listening on port " + _serverSocket.getLocalPort()); + logger.info("ApiServer listening on port " + _serverSocket.getLocalPort()); while (!Thread.interrupted()) { try { // Set up HTTP connection @@ -1218,14 +1216,14 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer } catch (final InterruptedIOException ex) { break; } catch (final IOException e) { - s_logger.error("I/O error initializing connection thread", e); + logger.error("I/O error initializing connection thread", e); break; } } } } - static class WorkerTask extends ManagedContextRunnable { + class WorkerTask extends ManagedContextRunnable { private final HttpService _httpService; private final HttpServerConnection _conn; @@ -1243,15 +1241,15 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer _conn.close(); } } catch (final ConnectionClosedException ex) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("ApiServer: Client closed connection"); + if (logger.isTraceEnabled()) { + logger.trace("ApiServer: Client closed connection"); } } catch (final IOException ex) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("ApiServer: IOException - " + ex); + if (logger.isTraceEnabled()) { + logger.trace("ApiServer: IOException - " + ex); } } catch (final HttpException ex) { - s_logger.warn("ApiServer: Unrecoverable HTTP protocol violation" + ex); + logger.warn("ApiServer: Unrecoverable HTTP protocol violation" + ex); } finally { try { _conn.shutdown(); @@ -1291,7 +1289,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer responseText = ApiResponseSerializer.toSerializedString(apiResponse, responseType); } catch (final Exception e) { - s_logger.error("Exception responding to http request", e); + logger.error("Exception responding to http request", e); } return responseText; } @@ -1341,7 +1339,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer responseText = ApiResponseSerializer.toSerializedString(apiResponse, responseType); } catch (final Exception e) { - s_logger.error("Exception responding to http request", e); + logger.error("Exception responding to http request", e); } return responseText; } diff --git a/server/src/com/cloud/api/auth/APIAuthenticationManagerImpl.java b/server/src/com/cloud/api/auth/APIAuthenticationManagerImpl.java index 28a9c0f5d46..4016ffc3e04 100644 --- a/server/src/com/cloud/api/auth/APIAuthenticationManagerImpl.java +++ b/server/src/com/cloud/api/auth/APIAuthenticationManagerImpl.java @@ -23,7 +23,6 @@ import java.util.concurrent.ConcurrentHashMap; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.auth.APIAuthenticationManager; @@ -36,7 +35,6 @@ import com.cloud.utils.component.ManagerBase; @Local(value = APIAuthenticationManager.class) @SuppressWarnings("unchecked") public class APIAuthenticationManagerImpl extends ManagerBase implements APIAuthenticationManager { - public static final Logger s_logger = Logger.getLogger(APIAuthenticationManagerImpl.class.getName()); private List _apiAuthenticators; @@ -84,7 +82,7 @@ public class APIAuthenticationManagerImpl extends ManagerBase implements APIAuth if (commands != null) { cmdList.addAll(commands); } else { - s_logger.warn("API Authenticator returned null api commands:" + apiAuthenticator.getName()); + logger.warn("API Authenticator returned null api commands:" + apiAuthenticator.getName()); } } return cmdList; @@ -100,8 +98,8 @@ public class APIAuthenticationManagerImpl extends ManagerBase implements APIAuth apiAuthenticator = ComponentContext.inject(apiAuthenticator); apiAuthenticator.setAuthenticators(_apiAuthenticators); } catch (InstantiationException | IllegalAccessException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("APIAuthenticationManagerImpl::getAPIAuthenticator failed: " + e.getMessage()); + if (logger.isDebugEnabled()) { + logger.debug("APIAuthenticationManagerImpl::getAPIAuthenticator failed: " + e.getMessage()); } } } diff --git a/server/src/com/cloud/api/query/QueryManagerImpl.java b/server/src/com/cloud/api/query/QueryManagerImpl.java index 6994b27327b..7aa46d16a90 100644 --- a/server/src/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/com/cloud/api/query/QueryManagerImpl.java @@ -107,7 +107,6 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.query.QueryService; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.query.dao.AccountJoinDao; @@ -227,7 +226,6 @@ import com.cloud.vm.dao.UserVmDetailsDao; @Local(value = {QueryService.class}) public class QueryManagerImpl extends ManagerBase implements QueryService, Configurable { - public static final Logger s_logger = Logger.getLogger(QueryManagerImpl.class); @Inject private AccountManager _accountMgr; @@ -1565,10 +1563,10 @@ public class QueryManagerImpl extends ManagerBase implements QueryService, Confi // FIXME: do we need to support list hosts with VmId, maybe we should // create another command just for this // Right now it is handled separately outside this QueryService - s_logger.debug(">>>Searching for hosts>>>"); + logger.debug(">>>Searching for hosts>>>"); Pair, Integer> hosts = searchForServersInternal(cmd); ListResponse response = new ListResponse(); - s_logger.debug(">>>Generating Response>>>"); + logger.debug(">>>Generating Response>>>"); List hostResponses = ViewResponseHelper.createHostResponse(cmd.getDetails(), hosts.first().toArray(new HostJoinVO[hosts.first().size()])); response.setResponses(hostResponses, hosts.second()); return response; @@ -2562,7 +2560,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService, Confi domainIds = new ArrayList(); DomainVO domainRecord = _domainDao.findById(account.getDomainId()); if ( domainRecord == null ){ - s_logger.error("Could not find the domainId for account:" + account.getAccountName()); + logger.error("Could not find the domainId for account:" + account.getAccountName()); throw new CloudAuthenticationException("Could not find the domainId for account:" + account.getAccountName()); } domainIds.add(domainRecord.getId()); @@ -2733,13 +2731,13 @@ public class QueryManagerImpl extends ManagerBase implements QueryService, Confi UserVmVO vmInstance = _userVmDao.findById(vmId); domainRecord = _domainDao.findById(vmInstance.getDomainId()); if ( domainRecord == null ){ - s_logger.error("Could not find the domainId for vmId:" + vmId); + logger.error("Could not find the domainId for vmId:" + vmId); throw new CloudAuthenticationException("Could not find the domainId for vmId:" + vmId); } } else { domainRecord = _domainDao.findById(caller.getDomainId()); if ( domainRecord == null ){ - s_logger.error("Could not find the domainId for account:" + caller.getAccountName()); + logger.error("Could not find the domainId for account:" + caller.getAccountName()); throw new CloudAuthenticationException("Could not find the domainId for account:" + caller.getAccountName()); } } @@ -2885,7 +2883,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService, Confi List domainIds = new ArrayList(); DomainVO domainRecord = _domainDao.findById(account.getDomainId()); if (domainRecord == null) { - s_logger.error("Could not find the domainId for account:" + account.getAccountName()); + logger.error("Could not find the domainId for account:" + account.getAccountName()); throw new CloudAuthenticationException("Could not find the domainId for account:" + account.getAccountName()); } domainIds.add(domainRecord.getId()); @@ -2926,7 +2924,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService, Confi List domainIds = new ArrayList(); DomainVO domainRecord = _domainDao.findById(account.getDomainId()); if (domainRecord == null) { - s_logger.error("Could not find the domainId for account:" + account.getAccountName()); + logger.error("Could not find the domainId for account:" + account.getAccountName()); throw new CloudAuthenticationException("Could not find the domainId for account:" + account.getAccountName()); } domainIds.add(domainRecord.getId()); @@ -3118,14 +3116,14 @@ public class QueryManagerImpl extends ManagerBase implements QueryService, Confi throw new InvalidParameterValueException("Please specify a valid template ID."); }// If ISO requested then it should be ISO. if (isIso && template.getFormat() != ImageFormat.ISO) { - s_logger.error("Template Id " + templateId + " is not an ISO"); + logger.error("Template Id " + templateId + " is not an ISO"); InvalidParameterValueException ex = new InvalidParameterValueException( "Specified Template Id is not an ISO"); ex.addProxyObject(template.getUuid(), "templateId"); throw ex; }// If ISO not requested then it shouldn't be an ISO. if (!isIso && template.getFormat() == ImageFormat.ISO) { - s_logger.error("Incorrect format of the template id " + templateId); + logger.error("Incorrect format of the template id " + templateId); InvalidParameterValueException ex = new InvalidParameterValueException("Incorrect format " + template.getFormat() + " of the specified template id"); ex.addProxyObject(template.getUuid(), "templateId"); diff --git a/server/src/com/cloud/api/query/dao/AccountJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/AccountJoinDaoImpl.java index fadaed52874..4d5d9f719d4 100644 --- a/server/src/com/cloud/api/query/dao/AccountJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/AccountJoinDaoImpl.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.ResponseObject.ResponseView; @@ -43,7 +42,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {AccountJoinDao.class}) public class AccountJoinDaoImpl extends GenericDaoBase implements AccountJoinDao { - public static final Logger s_logger = Logger.getLogger(AccountJoinDaoImpl.class); private final SearchBuilder acctIdSearch; @Inject diff --git a/server/src/com/cloud/api/query/dao/AffinityGroupJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/AffinityGroupJoinDaoImpl.java index cdd389ca72b..f1a6a721014 100644 --- a/server/src/com/cloud/api/query/dao/AffinityGroupJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/AffinityGroupJoinDaoImpl.java @@ -22,7 +22,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupResponse; @@ -36,7 +35,6 @@ import com.cloud.utils.db.SearchCriteria; @Local(value = {AffinityGroupJoinDao.class}) public class AffinityGroupJoinDaoImpl extends GenericDaoBase implements AffinityGroupJoinDao { - public static final Logger s_logger = Logger.getLogger(AffinityGroupJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java index a4cdb6718ca..b9f6db52716 100644 --- a/server/src/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.ResponseObject.ResponseView; @@ -42,7 +41,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {DataCenterJoinDao.class}) public class DataCenterJoinDaoImpl extends GenericDaoBase implements DataCenterJoinDao { - public static final Logger s_logger = Logger.getLogger(DataCenterJoinDaoImpl.class); private SearchBuilder dofIdSearch; @Inject diff --git a/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java index a99f19dd0dd..7dea3f77b7f 100644 --- a/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.DiskOfferingResponse; @@ -36,7 +35,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {DiskOfferingJoinDao.class}) public class DiskOfferingJoinDaoImpl extends GenericDaoBase implements DiskOfferingJoinDao { - public static final Logger s_logger = Logger.getLogger(DiskOfferingJoinDaoImpl.class); private final SearchBuilder dofIdSearch; private final Attribute _typeAttr; diff --git a/server/src/com/cloud/api/query/dao/DomainJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/DomainJoinDaoImpl.java index 220f8b64afe..b1917dd5352 100644 --- a/server/src/com/cloud/api/query/dao/DomainJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/DomainJoinDaoImpl.java @@ -23,7 +23,6 @@ import javax.ejb.Local; import org.apache.cloudstack.api.ResponseObject.ResponseView; import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.api.response.ResourceLimitAndCountResponse; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; @@ -37,7 +36,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value={DomainJoinDao.class}) public class DomainJoinDaoImpl extends GenericDaoBase implements DomainJoinDao { - public static final Logger s_logger = Logger.getLogger(DomainJoinDaoImpl.class); private SearchBuilder domainIdSearch; diff --git a/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java index 6f7ef43048c..4f66a6b5bce 100644 --- a/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.api.response.DomainRouterResponse; import org.apache.cloudstack.api.response.NicResponse; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.ApiResponseHelper; @@ -44,7 +43,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {DomainRouterJoinDao.class}) public class DomainRouterJoinDaoImpl extends GenericDaoBase implements DomainRouterJoinDao { - public static final Logger s_logger = Logger.getLogger(DomainRouterJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/com/cloud/api/query/dao/HostJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/HostJoinDaoImpl.java index dca4de18814..bfebc926948 100644 --- a/server/src/com/cloud/api/query/dao/HostJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/HostJoinDaoImpl.java @@ -28,7 +28,6 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.ApiConstants.HostDetails; @@ -55,7 +54,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {HostJoinDao.class}) public class HostJoinDaoImpl extends GenericDaoBase implements HostJoinDao { - public static final Logger s_logger = Logger.getLogger(HostJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; @@ -197,7 +195,7 @@ public class HostJoinDaoImpl extends GenericDaoBase implements hostVoDetails = h.getDetails(); hostResponse.setDetails(hostVoDetails); } catch (Exception e) { - s_logger.debug("failed to get host details", e); + logger.debug("failed to get host details", e); } } diff --git a/server/src/com/cloud/api/query/dao/HostTagDaoImpl.java b/server/src/com/cloud/api/query/dao/HostTagDaoImpl.java index d73deb5b01c..1043802bde1 100644 --- a/server/src/com/cloud/api/query/dao/HostTagDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/HostTagDaoImpl.java @@ -24,7 +24,6 @@ import javax.inject.Inject; import org.apache.cloudstack.api.response.HostTagResponse; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.query.vo.HostTagVO; @@ -35,7 +34,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {HostTagDao.class}) public class HostTagDaoImpl extends GenericDaoBase implements HostTagDao { - public static final Logger s_logger = Logger.getLogger(HostTagDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/com/cloud/api/query/dao/ImageStoreJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/ImageStoreJoinDaoImpl.java index b1e5025be50..b76a3ae365d 100644 --- a/server/src/com/cloud/api/query/dao/ImageStoreJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/ImageStoreJoinDaoImpl.java @@ -22,7 +22,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.ApiConstants; @@ -40,7 +39,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {ImageStoreJoinDao.class}) public class ImageStoreJoinDaoImpl extends GenericDaoBase implements ImageStoreJoinDao { - public static final Logger s_logger = Logger.getLogger(ImageStoreJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/com/cloud/api/query/dao/InstanceGroupJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/InstanceGroupJoinDaoImpl.java index 3a580a46cd2..1766451745b 100644 --- a/server/src/com/cloud/api/query/dao/InstanceGroupJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/InstanceGroupJoinDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.InstanceGroupResponse; @@ -35,7 +34,6 @@ import com.cloud.vm.InstanceGroup; @Component @Local(value = {InstanceGroupJoinDao.class}) public class InstanceGroupJoinDaoImpl extends GenericDaoBase implements InstanceGroupJoinDao { - public static final Logger s_logger = Logger.getLogger(InstanceGroupJoinDaoImpl.class); private SearchBuilder vrIdSearch; diff --git a/server/src/com/cloud/api/query/dao/ProjectAccountJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/ProjectAccountJoinDaoImpl.java index 676a995fdb2..44096c3a812 100644 --- a/server/src/com/cloud/api/query/dao/ProjectAccountJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/ProjectAccountJoinDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.ProjectAccountResponse; @@ -34,7 +33,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {ProjectAccountJoinDao.class}) public class ProjectAccountJoinDaoImpl extends GenericDaoBase implements ProjectAccountJoinDao { - public static final Logger s_logger = Logger.getLogger(ProjectAccountJoinDaoImpl.class); private SearchBuilder paIdSearch; diff --git a/server/src/com/cloud/api/query/dao/ProjectInvitationJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/ProjectInvitationJoinDaoImpl.java index 864fcf8ed4f..175273c9caf 100644 --- a/server/src/com/cloud/api/query/dao/ProjectInvitationJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/ProjectInvitationJoinDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.ProjectInvitationResponse; @@ -34,7 +33,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {ProjectInvitationJoinDao.class}) public class ProjectInvitationJoinDaoImpl extends GenericDaoBase implements ProjectInvitationJoinDao { - public static final Logger s_logger = Logger.getLogger(ProjectInvitationJoinDaoImpl.class); private SearchBuilder piIdSearch; diff --git a/server/src/com/cloud/api/query/dao/ProjectJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/ProjectJoinDaoImpl.java index 696487792b7..829fcb90994 100644 --- a/server/src/com/cloud/api/query/dao/ProjectJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/ProjectJoinDaoImpl.java @@ -22,7 +22,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.ProjectResponse; @@ -42,7 +41,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {ProjectJoinDao.class}) public class ProjectJoinDaoImpl extends GenericDaoBase implements ProjectJoinDao { - public static final Logger s_logger = Logger.getLogger(ProjectJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java index ff04c5bc7c3..272043a0cf9 100644 --- a/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java @@ -22,7 +22,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -40,7 +39,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {StoragePoolJoinDao.class}) public class StoragePoolJoinDaoImpl extends GenericDaoBase implements StoragePoolJoinDao { - public static final Logger s_logger = Logger.getLogger(StoragePoolJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/com/cloud/api/query/dao/StorageTagDaoImpl.java b/server/src/com/cloud/api/query/dao/StorageTagDaoImpl.java index d1511f2d61f..3f1cb33756e 100644 --- a/server/src/com/cloud/api/query/dao/StorageTagDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/StorageTagDaoImpl.java @@ -24,7 +24,6 @@ import javax.inject.Inject; import org.apache.cloudstack.api.response.StorageTagResponse; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.query.vo.StorageTagVO; @@ -35,7 +34,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {StorageTagDao.class}) public class StorageTagDaoImpl extends GenericDaoBase implements StorageTagDao { - public static final Logger s_logger = Logger.getLogger(StorageTagDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/com/cloud/api/query/dao/TemplateJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/TemplateJoinDaoImpl.java index 15cefa54616..c568bef7817 100644 --- a/server/src/com/cloud/api/query/dao/TemplateJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/TemplateJoinDaoImpl.java @@ -24,7 +24,6 @@ import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.ResponseObject.ResponseView; @@ -56,7 +55,6 @@ import com.cloud.utils.db.SearchCriteria; @Local(value = {TemplateJoinDao.class}) public class TemplateJoinDaoImpl extends GenericDaoBase implements TemplateJoinDao { - public static final Logger s_logger = Logger.getLogger(TemplateJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/com/cloud/api/query/dao/UserAccountJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/UserAccountJoinDaoImpl.java index 923a2382fdd..2a65daaf179 100644 --- a/server/src/com/cloud/api/query/dao/UserAccountJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/UserAccountJoinDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.UserResponse; @@ -35,7 +34,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {UserAccountJoinDao.class}) public class UserAccountJoinDaoImpl extends GenericDaoBase implements UserAccountJoinDao { - public static final Logger s_logger = Logger.getLogger(UserAccountJoinDaoImpl.class); private SearchBuilder vrIdSearch; diff --git a/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index 9d883349d60..6520f5f0e64 100644 --- a/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -28,7 +28,6 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.affinity.AffinityGroupResponse; @@ -64,7 +63,6 @@ import com.cloud.vm.dao.UserVmDetailsDao; @Component @Local(value = {UserVmJoinDao.class}) public class UserVmJoinDaoImpl extends GenericDaoBase implements UserVmJoinDao { - public static final Logger s_logger = Logger.getLogger(UserVmJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java index 68a578fd218..c5eb6a93db8 100644 --- a/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java @@ -25,7 +25,6 @@ import javax.inject.Inject; import org.apache.cloudstack.api.ResponseObject.ResponseView; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; @@ -45,7 +44,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {VolumeJoinDao.class}) public class VolumeJoinDaoImpl extends GenericDaoBase implements VolumeJoinDao { - public static final Logger s_logger = Logger.getLogger(VolumeJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/com/cloud/capacity/CapacityManagerImpl.java b/server/src/com/cloud/capacity/CapacityManagerImpl.java index 541c8475fb8..b81180de37d 100644 --- a/server/src/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/com/cloud/capacity/CapacityManagerImpl.java @@ -29,7 +29,6 @@ import javax.naming.ConfigurationException; import com.cloud.resource.ResourceState; import com.cloud.utils.fsm.StateMachine2; -import org.apache.log4j.Logger; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; @@ -104,7 +103,6 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDao; @Local(value = CapacityManager.class) public class CapacityManagerImpl extends ManagerBase implements CapacityManager, StateListener, Listener, ResourceListener, Configurable { - private static final Logger s_logger = Logger.getLogger(CapacityManagerImpl.class); @Inject CapacityDao _capacityDao; @Inject @@ -192,7 +190,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, if (hostId != null) { HostVO host = _hostDao.findById(hostId); if (host == null) { - s_logger.warn("Host " + hostId + " no long exist anymore!"); + logger.warn("Host " + hostId + " no long exist anymore!"); return true; } @@ -224,9 +222,9 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long actualTotalMem = capacityMemory.getTotalCapacity(); long totalMem = (long)(actualTotalMem * memoryOvercommitRatio); long totalCpu = (long)(actualTotalCpu * cpuOvercommitRatio); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); - s_logger.debug("Hosts's actual total RAM: " + actualTotalMem + " and RAM after applying overprovisioning: " + totalMem); + if (logger.isDebugEnabled()) { + logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); + logger.debug("Hosts's actual total RAM: " + actualTotalMem + " and RAM after applying overprovisioning: " + totalMem); } if (!moveFromReserved) { @@ -255,11 +253,11 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, } } - s_logger.debug("release cpu from host: " + hostId + ", old used: " + usedCpu + ",reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + + logger.debug("release cpu from host: " + hostId + ", old used: " + usedCpu + ",reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + ", total with overprovisioning: " + totalCpu + "; new used: " + capacityCpu.getUsedCapacity() + ",reserved:" + capacityCpu.getReservedCapacity() + "; movedfromreserved: " + moveFromReserved + ",moveToReservered" + moveToReservered); - s_logger.debug("release mem from host: " + hostId + ", old used: " + usedMem + ",reserved: " + reservedMem + ", total: " + totalMem + "; new used: " + + logger.debug("release mem from host: " + hostId + ", old used: " + usedMem + ",reserved: " + reservedMem + ", total: " + totalMem + "; new used: " + capacityMemory.getUsedCapacity() + ",reserved:" + capacityMemory.getReservedCapacity() + "; movedfromreserved: " + moveFromReserved + ",moveToReservered" + moveToReservered); @@ -270,7 +268,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, return true; } catch (Exception e) { - s_logger.debug("Failed to transit vm's state, due to " + e.getMessage()); + logger.debug("Failed to transit vm's state, due to " + e.getMessage()); return false; } } @@ -315,27 +313,27 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long actualTotalMem = capacityMem.getTotalCapacity(); long totalCpu = (long)(actualTotalCpu * cpuOvercommitRatio); long totalMem = (long)(actualTotalMem * memoryOvercommitRatio); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); + if (logger.isDebugEnabled()) { + logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); } long freeCpu = totalCpu - (reservedCpu + usedCpu); long freeMem = totalMem - (reservedMem + usedMem); - if (s_logger.isDebugEnabled()) { - s_logger.debug("We are allocating VM, increasing the used capacity of this host:" + hostId); - s_logger.debug("Current Used CPU: " + usedCpu + " , Free CPU:" + freeCpu + " ,Requested CPU: " + cpu); - s_logger.debug("Current Used RAM: " + usedMem + " , Free RAM:" + freeMem + " ,Requested RAM: " + ram); + if (logger.isDebugEnabled()) { + logger.debug("We are allocating VM, increasing the used capacity of this host:" + hostId); + logger.debug("Current Used CPU: " + usedCpu + " , Free CPU:" + freeCpu + " ,Requested CPU: " + cpu); + logger.debug("Current Used RAM: " + usedMem + " , Free RAM:" + freeMem + " ,Requested RAM: " + ram); } capacityCpu.setUsedCapacity(usedCpu + cpu); capacityMem.setUsedCapacity(usedMem + ram); if (fromLastHost) { /* alloc from reserved */ - if (s_logger.isDebugEnabled()) { - s_logger.debug("We are allocating VM to the last host again, so adjusting the reserved capacity if it is not less than required"); - s_logger.debug("Reserved CPU: " + reservedCpu + " , Requested CPU: " + cpu); - s_logger.debug("Reserved RAM: " + reservedMem + " , Requested RAM: " + ram); + if (logger.isDebugEnabled()) { + logger.debug("We are allocating VM to the last host again, so adjusting the reserved capacity if it is not less than required"); + logger.debug("Reserved CPU: " + reservedCpu + " , Requested CPU: " + cpu); + logger.debug("Reserved RAM: " + reservedMem + " , Requested RAM: " + ram); } if (reservedCpu >= cpu && reservedMem >= ram) { capacityCpu.setReservedCapacity(reservedCpu - cpu); @@ -344,18 +342,18 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, } else { /* alloc from free resource */ if (!((reservedCpu + usedCpu + cpu <= totalCpu) && (reservedMem + usedMem + ram <= totalMem))) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host doesnt seem to have enough free capacity, but increasing the used capacity anyways, " + + if (logger.isDebugEnabled()) { + logger.debug("Host doesnt seem to have enough free capacity, but increasing the used capacity anyways, " + "since the VM is already starting on this host "); } } } - s_logger.debug("CPU STATS after allocation: for host: " + hostId + ", old used: " + usedCpu + ", old reserved: " + reservedCpu + ", actual total: " + + logger.debug("CPU STATS after allocation: for host: " + hostId + ", old used: " + usedCpu + ", old reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + ", total with overprovisioning: " + totalCpu + "; new used:" + capacityCpu.getUsedCapacity() + ", reserved:" + capacityCpu.getReservedCapacity() + "; requested cpu:" + cpu + ",alloc_from_last:" + fromLastHost); - s_logger.debug("RAM STATS after allocation: for host: " + hostId + ", old used: " + usedMem + ", old reserved: " + reservedMem + ", total: " + + logger.debug("RAM STATS after allocation: for host: " + hostId + ", old used: " + usedMem + ", old reserved: " + reservedMem + ", total: " + totalMem + "; new used: " + capacityMem.getUsedCapacity() + ", reserved: " + capacityMem.getReservedCapacity() + "; requested mem: " + ram + ",alloc_from_last:" + fromLastHost); @@ -364,7 +362,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, } }); } catch (Exception e) { - s_logger.error("Exception allocating VM capacity", e); + logger.error("Exception allocating VM capacity", e); return; } } @@ -377,14 +375,14 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, boolean isCpuNumGood = host.getCpus().intValue() >= cpuNum; boolean isCpuSpeedGood = host.getSpeed().intValue() >= cpuSpeed; if (isCpuNumGood && isCpuSpeedGood) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host: " + hostId + " has cpu capability (cpu:" + host.getCpus() + ", speed:" + host.getSpeed() + + if (logger.isDebugEnabled()) { + logger.debug("Host: " + hostId + " has cpu capability (cpu:" + host.getCpus() + ", speed:" + host.getSpeed() + ") to support requested CPU: " + cpuNum + " and requested speed: " + cpuSpeed); } return true; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host: " + hostId + " doesn't have cpu capability (cpu:" + host.getCpus() + ", speed:" + host.getSpeed() + + if (logger.isDebugEnabled()) { + logger.debug("Host: " + hostId + " doesn't have cpu capability (cpu:" + host.getCpus() + ", speed:" + host.getSpeed() + ") to support requested CPU: " + cpuNum + " and requested speed: " + cpuSpeed); } return false; @@ -396,8 +394,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, boolean considerReservedCapacity) { boolean hasCapacity = false; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking if host: " + hostId + " has enough capacity for requested CPU: " + cpu + " and requested RAM: " + ram + + if (logger.isDebugEnabled()) { + logger.debug("Checking if host: " + hostId + " has enough capacity for requested CPU: " + cpu + " and requested RAM: " + ram + " , cpuOverprovisioningFactor: " + cpuOvercommitRatio); } @@ -406,13 +404,13 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, if (capacityCpu == null || capacityMem == null) { if (capacityCpu == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for CPU not found in Db, for hostId: " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for CPU not found in Db, for hostId: " + hostId); } } if (capacityMem == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for RAM not found in Db, for hostId: " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for RAM not found in Db, for hostId: " + hostId); } } @@ -427,8 +425,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long actualTotalMem = capacityMem.getTotalCapacity(); long totalCpu = (long)(actualTotalCpu * cpuOvercommitRatio); long totalMem = (long)(actualTotalMem * memoryOvercommitRatio); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); + if (logger.isDebugEnabled()) { + logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); } String failureReason = ""; @@ -436,10 +434,10 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long freeCpu = reservedCpu; long freeMem = reservedMem; - if (s_logger.isDebugEnabled()) { - s_logger.debug("We need to allocate to the last host again, so checking if there is enough reserved capacity"); - s_logger.debug("Reserved CPU: " + freeCpu + " , Requested CPU: " + cpu); - s_logger.debug("Reserved RAM: " + freeMem + " , Requested RAM: " + ram); + if (logger.isDebugEnabled()) { + logger.debug("We need to allocate to the last host again, so checking if there is enough reserved capacity"); + logger.debug("Reserved CPU: " + freeCpu + " , Requested CPU: " + cpu); + logger.debug("Reserved RAM: " + freeMem + " , Requested RAM: " + ram); } /* alloc from reserved */ if (reservedCpu >= cpu) { @@ -457,8 +455,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long reservedMemValueToUse = reservedMem; if (!considerReservedCapacity) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("considerReservedCapacity is" + considerReservedCapacity + " , not considering reserved capacity for calculating free capacity"); + if (logger.isDebugEnabled()) { + logger.debug("considerReservedCapacity is" + considerReservedCapacity + " , not considering reserved capacity for calculating free capacity"); } reservedCpuValueToUse = 0; reservedMemValueToUse = 0; @@ -466,9 +464,9 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long freeCpu = totalCpu - (reservedCpuValueToUse + usedCpu); long freeMem = totalMem - (reservedMemValueToUse + usedMem); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Free CPU: " + freeCpu + " , Requested CPU: " + cpu); - s_logger.debug("Free RAM: " + freeMem + " , Requested RAM: " + ram); + if (logger.isDebugEnabled()) { + logger.debug("Free CPU: " + freeCpu + " , Requested CPU: " + cpu); + logger.debug("Free RAM: " + freeMem + " , Requested RAM: " + ram); } /* alloc from free resource */ if ((reservedCpuValueToUse + usedCpu + cpu <= totalCpu)) { @@ -483,29 +481,29 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, } if (hasCapacity) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host has enough CPU and RAM available"); + if (logger.isDebugEnabled()) { + logger.debug("Host has enough CPU and RAM available"); } - s_logger.debug("STATS: Can alloc CPU from host: " + hostId + ", used: " + usedCpu + ", reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + + logger.debug("STATS: Can alloc CPU from host: " + hostId + ", used: " + usedCpu + ", reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + ", total with overprovisioning: " + totalCpu + "; requested cpu:" + cpu + ",alloc_from_last_host?:" + checkFromReservedCapacity + " ,considerReservedCapacity?: " + considerReservedCapacity); - s_logger.debug("STATS: Can alloc MEM from host: " + hostId + ", used: " + usedMem + ", reserved: " + reservedMem + ", total: " + totalMem + + logger.debug("STATS: Can alloc MEM from host: " + hostId + ", used: " + usedMem + ", reserved: " + reservedMem + ", total: " + totalMem + "; requested mem: " + ram + ",alloc_from_last_host?:" + checkFromReservedCapacity + " ,considerReservedCapacity?: " + considerReservedCapacity); } else { if (checkFromReservedCapacity) { - s_logger.debug("STATS: Failed to alloc resource from host: " + hostId + " reservedCpu: " + reservedCpu + ", requested cpu: " + cpu + ", reservedMem: " + + logger.debug("STATS: Failed to alloc resource from host: " + hostId + " reservedCpu: " + reservedCpu + ", requested cpu: " + cpu + ", reservedMem: " + reservedMem + ", requested mem: " + ram); } else { - s_logger.debug("STATS: Failed to alloc resource from host: " + hostId + " reservedCpu: " + reservedCpu + ", used cpu: " + usedCpu + ", requested cpu: " + + logger.debug("STATS: Failed to alloc resource from host: " + hostId + " reservedCpu: " + reservedCpu + ", used cpu: " + usedCpu + ", requested cpu: " + cpu + ", actual total cpu: " + actualTotalCpu + ", total cpu with overprovisioning: " + totalCpu + ", reservedMem: " + reservedMem + ", used Mem: " + usedMem + ", requested mem: " + ram + ", total Mem:" + totalMem + " ,considerReservedCapacity?: " + considerReservedCapacity); } - if (s_logger.isDebugEnabled()) { - s_logger.debug(failureReason + ", cannot allocate to this host."); + if (logger.isDebugEnabled()) { + logger.debug(failureReason + ", cannot allocate to this host."); } } @@ -593,8 +591,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, final CapacityState capacityState = (host.getResourceState() == ResourceState.Enabled) ? CapacityState.Enabled : CapacityState.Disabled; List vms = _vmDao.listUpByHostId(host.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found " + vms.size() + " VMs on host " + host.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Found " + vms.size() + " VMs on host " + host.getId()); } ClusterVO cluster = _clusterDao.findById(host.getClusterId()); @@ -628,8 +626,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, } List vmsByLastHostId = _vmDao.listByLastHostId(host.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found " + vmsByLastHostId.size() + " VM, not running on host " + host.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Found " + vmsByLastHostId.size() + " VM, not running on host " + host.getId()); } for (VMInstanceVO vm : vmsByLastHostId) { long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - vm.getUpdateTime().getTime()) / 1000; @@ -680,50 +678,50 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long hostTotalCpu = host.getCpus().longValue() * host.getSpeed().longValue(); if (cpuCap.getTotalCapacity() != hostTotalCpu) { - s_logger.debug("Calibrate total cpu for host: " + host.getId() + " old total CPU:" + cpuCap.getTotalCapacity() + " new total CPU:" + hostTotalCpu); + logger.debug("Calibrate total cpu for host: " + host.getId() + " old total CPU:" + cpuCap.getTotalCapacity() + " new total CPU:" + hostTotalCpu); cpuCap.setTotalCapacity(hostTotalCpu); } // Set the capacity state as per the host allocation state. if(capacityState != cpuCap.getCapacityState()){ - s_logger.debug("Calibrate cpu capacity state for host: " + host.getId() + " old capacity state:" + cpuCap.getTotalCapacity() + " new capacity state:" + hostTotalCpu); + logger.debug("Calibrate cpu capacity state for host: " + host.getId() + " old capacity state:" + cpuCap.getTotalCapacity() + " new capacity state:" + hostTotalCpu); cpuCap.setCapacityState(capacityState); } memCap.setCapacityState(capacityState); if (cpuCap.getUsedCapacity() == usedCpu && cpuCap.getReservedCapacity() == reservedCpu) { - s_logger.debug("No need to calibrate cpu capacity, host:" + host.getId() + " usedCpu: " + cpuCap.getUsedCapacity() + " reservedCpu: " + + logger.debug("No need to calibrate cpu capacity, host:" + host.getId() + " usedCpu: " + cpuCap.getUsedCapacity() + " reservedCpu: " + cpuCap.getReservedCapacity()); } else { if (cpuCap.getReservedCapacity() != reservedCpu) { - s_logger.debug("Calibrate reserved cpu for host: " + host.getId() + " old reservedCpu:" + cpuCap.getReservedCapacity() + " new reservedCpu:" + + logger.debug("Calibrate reserved cpu for host: " + host.getId() + " old reservedCpu:" + cpuCap.getReservedCapacity() + " new reservedCpu:" + reservedCpu); cpuCap.setReservedCapacity(reservedCpu); } if (cpuCap.getUsedCapacity() != usedCpu) { - s_logger.debug("Calibrate used cpu for host: " + host.getId() + " old usedCpu:" + cpuCap.getUsedCapacity() + " new usedCpu:" + usedCpu); + logger.debug("Calibrate used cpu for host: " + host.getId() + " old usedCpu:" + cpuCap.getUsedCapacity() + " new usedCpu:" + usedCpu); cpuCap.setUsedCapacity(usedCpu); } } if (memCap.getTotalCapacity() != host.getTotalMemory()) { - s_logger.debug("Calibrate total memory for host: " + host.getId() + " old total memory:" + memCap.getTotalCapacity() + " new total memory:" + + logger.debug("Calibrate total memory for host: " + host.getId() + " old total memory:" + memCap.getTotalCapacity() + " new total memory:" + host.getTotalMemory()); memCap.setTotalCapacity(host.getTotalMemory()); } // Set the capacity state as per the host allocation state. if(capacityState != memCap.getCapacityState()){ - s_logger.debug("Calibrate memory capacity state for host: " + host.getId() + " old capacity state:" + memCap.getTotalCapacity() + " new capacity state:" + hostTotalCpu); + logger.debug("Calibrate memory capacity state for host: " + host.getId() + " old capacity state:" + memCap.getTotalCapacity() + " new capacity state:" + hostTotalCpu); memCap.setCapacityState(capacityState); } if (memCap.getUsedCapacity() == usedMemory && memCap.getReservedCapacity() == reservedMemory) { - s_logger.debug("No need to calibrate memory capacity, host:" + host.getId() + " usedMem: " + memCap.getUsedCapacity() + " reservedMem: " + + logger.debug("No need to calibrate memory capacity, host:" + host.getId() + " usedMem: " + memCap.getUsedCapacity() + " reservedMem: " + memCap.getReservedCapacity()); } else { if (memCap.getReservedCapacity() != reservedMemory) { - s_logger.debug("Calibrate reserved memory for host: " + host.getId() + " old reservedMem:" + memCap.getReservedCapacity() + " new reservedMem:" + + logger.debug("Calibrate reserved memory for host: " + host.getId() + " old reservedMem:" + memCap.getReservedCapacity() + " new reservedMem:" + reservedMemory); memCap.setReservedCapacity(reservedMemory); } @@ -733,7 +731,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, * state(starting/migrating) that I don't know on which host * they are allocated */ - s_logger.debug("Calibrate used memory for host: " + host.getId() + " old usedMem: " + memCap.getUsedCapacity() + " new usedMem: " + usedMemory); + logger.debug("Calibrate used memory for host: " + host.getId() + " old usedMem: " + memCap.getUsedCapacity() + " new usedMem: " + usedMemory); memCap.setUsedCapacity(usedMemory); } } @@ -742,7 +740,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, _capacityDao.update(cpuCap.getId(), cpuCap); _capacityDao.update(memCap.getId(), memCap); } catch (Exception e) { - s_logger.error("Caught exception while updating cpu/memory capacity for the host " + host.getId(), e); + logger.error("Caught exception while updating cpu/memory capacity for the host " + host.getId(), e); } } else { final long usedMemoryFinal = usedMemory; @@ -789,7 +787,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, State oldState = transition.getCurrentState(); State newState = transition.getToState(); Event event = transition.getEvent(); - s_logger.debug("VM state transitted from :" + oldState + " to " + newState + " with event: " + event + "vm's original host id: " + vm.getLastHostId() + + logger.debug("VM state transitted from :" + oldState + " to " + newState + " with event: " + event + "vm's original host id: " + vm.getLastHostId() + " new host id: " + vm.getHostId() + " host id before state transition: " + oldHostId); if (oldState == State.Starting) { @@ -832,7 +830,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, if ((newState == State.Starting || newState == State.Migrating || event == Event.AgentReportMigrated) && vm.getHostId() != null) { boolean fromLastHost = false; if (vm.getHostId().equals(vm.getLastHostId())) { - s_logger.debug("VM starting again on the last host it was stopped on"); + logger.debug("VM starting again on the last host it was stopped on"); fromLastHost = true; } allocateVmCapacity(vm, fromLastHost); @@ -880,7 +878,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, CapacityVOCpu.setReservedCapacity(0); CapacityVOCpu.setTotalCapacity(newTotalCpu); } else { - s_logger.debug("What? new cpu is :" + newTotalCpu + ", old one is " + CapacityVOCpu.getUsedCapacity() + "," + CapacityVOCpu.getReservedCapacity() + + logger.debug("What? new cpu is :" + newTotalCpu + ", old one is " + CapacityVOCpu.getUsedCapacity() + "," + CapacityVOCpu.getReservedCapacity() + "," + CapacityVOCpu.getTotalCapacity()); } _capacityDao.update(CapacityVOCpu.getId(), CapacityVOCpu); @@ -907,7 +905,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, CapacityVOMem.setReservedCapacity(0); CapacityVOMem.setTotalCapacity(newTotalMem); } else { - s_logger.debug("What? new cpu is :" + newTotalMem + ", old one is " + CapacityVOMem.getUsedCapacity() + "," + CapacityVOMem.getReservedCapacity() + + logger.debug("What? new cpu is :" + newTotalMem + ", old one is " + CapacityVOMem.getUsedCapacity() + "," + CapacityVOMem.getReservedCapacity() + "," + CapacityVOMem.getTotalCapacity()); } _capacityDao.update(CapacityVOMem.getId(), CapacityVOMem); @@ -949,14 +947,14 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, float cpuConsumption = _capacityDao.findClusterConsumption(clusterId, Capacity.CAPACITY_TYPE_CPU, cpuRequested); if (cpuConsumption / clusterCpuOverProvisioning > clusterCpuCapacityDisableThreshold) { - s_logger.debug("Cluster: " + clusterId + " cpu consumption " + cpuConsumption / clusterCpuOverProvisioning + logger.debug("Cluster: " + clusterId + " cpu consumption " + cpuConsumption / clusterCpuOverProvisioning + " crosses disable threshold " + clusterCpuCapacityDisableThreshold); return true; } float memoryConsumption = _capacityDao.findClusterConsumption(clusterId, Capacity.CAPACITY_TYPE_MEMORY, ramRequested); if (memoryConsumption / clusterMemoryOverProvisioning > clusterMemoryCapacityDisableThreshold) { - s_logger.debug("Cluster: " + clusterId + " memory consumption " + memoryConsumption / clusterMemoryOverProvisioning + logger.debug("Cluster: " + clusterId + " memory consumption " + memoryConsumption / clusterMemoryOverProvisioning + " crosses disable threshold " + clusterMemoryCapacityDisableThreshold); return true; } @@ -1067,8 +1065,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, String hypervisorVersion = host.getHypervisorVersion(); Long maxGuestLimit = _hypervisorCapabilitiesDao.getMaxGuestsLimit(hypervisorType, hypervisorVersion); if (vmCount.longValue() >= maxGuestLimit.longValue()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " already reached max Running VMs(count includes system VMs), limit is: " + + if (logger.isDebugEnabled()) { + logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " already reached max Running VMs(count includes system VMs), limit is: " + maxGuestLimit + ",Running VM counts is: " + vmCount.longValue()); } return true; diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java index fc7bff9c2aa..ee3d7a26aae 100644 --- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java @@ -37,7 +37,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.affinity.AffinityGroup; @@ -217,7 +216,6 @@ import com.cloud.vm.dao.NicSecondaryIpDao; @Local(value = {ConfigurationManager.class, ConfigurationService.class}) public class ConfigurationManagerImpl extends ManagerBase implements ConfigurationManager, ConfigurationService, Configurable { - public static final Logger s_logger = Logger.getLogger(ConfigurationManagerImpl.class); @Inject EntityManager _entityMgr; @@ -421,13 +419,13 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (mgtCidr == null || mgtCidr.trim().isEmpty()) { final String[] localCidrs = NetUtils.getLocalCidrs(); if (localCidrs != null && localCidrs.length > 0) { - s_logger.warn("Management network CIDR is not configured originally. Set it default to " + localCidrs[0]); + logger.warn("Management network CIDR is not configured originally. Set it default to " + localCidrs[0]); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_MANAGMENT_NODE, 0, new Long(0), "Management network CIDR is not configured originally. Set it default to " + localCidrs[0], ""); _configDao.update(Config.ManagementNetwork.key(), Config.ManagementNetwork.getCategory(), localCidrs[0]); } else { - s_logger.warn("Management network CIDR is not properly configured and we are not able to find a default setting"); + logger.warn("Management network CIDR is not properly configured and we are not able to find a default setting"); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_MANAGMENT_NODE, 0, new Long(0), "Management network CIDR is not properly configured and we are not able to find a default setting", ""); } @@ -447,7 +445,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati final String validationMsg = validateConfigurationValue(name, value, scope); if (validationMsg != null) { - s_logger.error("Invalid configuration option, name: " + name + ", value:" + value); + logger.error("Invalid configuration option, name: " + name + ", value:" + value); throw new InvalidParameterValueException(validationMsg); } @@ -519,7 +517,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati txn.start(); if (!_configDao.update(name, category, value)) { - s_logger.error("Failed to update configuration option, name: " + name + ", value:" + value); + logger.error("Failed to update configuration option, name: " + name + ", value:" + value); throw new CloudRuntimeException("Failed to update configuration value. Please contact Cloud Support."); } @@ -622,7 +620,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // FIX ME - All configuration parameters are not moved from config.java to configKey if (config == null) { if (_configDepot.get(name) == null) { - s_logger.warn("Probably the component manager where configuration variable " + name + " is defined needs to implement Configurable interface"); + logger.warn("Probably the component manager where configuration variable " + name + " is defined needs to implement Configurable interface"); throw new InvalidParameterValueException("Config parameter with name " + name + " doesn't exist"); } catergory = _configDepot.get(name).category(); @@ -681,24 +679,24 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati final ConfigurationVO cfg = _configDao.findByName(name); if (cfg == null) { - s_logger.error("Missing configuration variable " + name + " in configuration table"); + logger.error("Missing configuration variable " + name + " in configuration table"); return "Invalid configuration variable."; } final String configScope = cfg.getScope(); if (scope != null) { if (!configScope.contains(scope)) { - s_logger.error("Invalid scope id provided for the parameter " + name); + logger.error("Invalid scope id provided for the parameter " + name); return "Invalid scope id provided for the parameter " + name; } } Class type = null; final Config c = Config.getConfig(name); if (c == null) { - s_logger.warn("Did not find configuration " + name + " in Config.java. Perhaps moved to ConfigDepot"); + logger.warn("Did not find configuration " + name + " in Config.java. Perhaps moved to ConfigDepot"); final ConfigKey configKey = _configDepot.get(name); if(configKey == null) { - s_logger.warn("Did not find configuration " + name + " in ConfigDepot too."); + logger.warn("Did not find configuration " + name + " in ConfigDepot too."); return null; } type = configKey.type(); @@ -717,7 +715,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } catch (final Exception e) { // catching generic exception as some throws NullPointerException and some throws NumberFormatExcpeion - s_logger.error(errMsg); + logger.error(errMsg); return errMsg; } @@ -727,7 +725,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } if (overprovisioningFactorsForValidation.contains(name)) { final String msg = "value cannot be null for the parameter " + name; - s_logger.error(msg); + logger.error(msg); return msg; } return null; @@ -737,18 +735,18 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati try { if (overprovisioningFactorsForValidation.contains(name) && Float.parseFloat(value) < 1f) { final String msg = name + " should be greater than or equal to 1"; - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } } catch (final NumberFormatException e) { final String msg = "There was an error trying to parse the float value for: " + name; - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } if (type.equals(Boolean.class)) { if (!(value.equals("true") || value.equals("false"))) { - s_logger.error("Configuration variable " + name + " is expecting true or false instead of " + value); + logger.error("Configuration variable " + name + " is expecting true or false instead of " + value); return "Please enter either 'true' or 'false'."; } return null; @@ -765,7 +763,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("Please enter a value greater than 6 for the configuration parameter:" + name); } } catch (final NumberFormatException e) { - s_logger.error("There was an error trying to parse the integer value for:" + name); + logger.error("There was an error trying to parse the integer value for:" + name); throw new InvalidParameterValueException("There was an error trying to parse the integer value for:" + name); } } @@ -777,7 +775,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("Please enter a value between 0 and 1 for the configuration parameter: " + name); } } catch (final NumberFormatException e) { - s_logger.error("There was an error trying to parse the float value for:" + name); + logger.error("There was an error trying to parse the float value for:" + name); throw new InvalidParameterValueException("There was an error trying to parse the float value for:" + name); } } @@ -797,16 +795,16 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (range.equals("privateip")) { try { if (!NetUtils.isSiteLocalAddress(value)) { - s_logger.error("privateip range " + value + " is not a site local address for configuration variable " + name); + logger.error("privateip range " + value + " is not a site local address for configuration variable " + name); return "Please enter a site local IP address."; } } catch (final NullPointerException e) { - s_logger.error("Error parsing ip address for " + name); + logger.error("Error parsing ip address for " + name); throw new InvalidParameterValueException("Error parsing ip address"); } } else if (range.equals("netmask")) { if (!NetUtils.isValidNetmask(value)) { - s_logger.error("netmask " + value + " is not a valid net mask for configuration variable " + name); + logger.error("netmask " + value + " is not a valid net mask for configuration variable " + name); return "Please enter a valid netmask."; } } else if (range.equals("hypervisorList")) { @@ -850,7 +848,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati return null; } } - s_logger.error("configuration value for " + name + " is invalid"); + logger.error("configuration value for " + name + " is invalid"); return "Please enter : " + range; } @@ -858,14 +856,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati final String[] options = range.split("-"); if (options.length != 2) { final String msg = "configuration range " + range + " for " + name + " is invalid"; - s_logger.error(msg); + logger.error(msg); return msg; } final int min = Integer.parseInt(options[0]); final int max = Integer.parseInt(options[1]); final int val = Integer.parseInt(value); if (val < min || val > max) { - s_logger.error("configuration value for " + name + " is invalid"); + logger.error("configuration value for " + name + " is invalid"); return "Please enter : " + range; } } @@ -1235,7 +1233,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } }); } catch (final Exception e) { - s_logger.error("Unable to edit pod due to " + e.getMessage(), e); + logger.error("Unable to edit pod due to " + e.getMessage(), e); throw new CloudRuntimeException("Failed to edit pod. Please contact Cloud Support."); } @@ -1763,7 +1761,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati _networkSvc.addTrafficTypeToPhysicalNetwork(mgmtPhyNetwork.getId(), TrafficType.Storage.toString(), "vlan", mgmtTraffic.getXenNetworkLabel(), mgmtTraffic.getKvmNetworkLabel(), mgmtTraffic.getVmwareNetworkLabel(), mgmtTraffic.getSimulatorNetworkLabel(), mgmtTraffic.getVlan(), mgmtTraffic.getHypervNetworkLabel(), mgmtTraffic.getOvm3NetworkLabel()); - s_logger.info("No storage traffic type was specified by admin, create default storage traffic on physical network " + mgmtPhyNetwork.getId() + logger.info("No storage traffic type was specified by admin, create default storage traffic on physical network " + mgmtPhyNetwork.getId() + " with same configure of management traffic type"); } } catch (final InvalidParameterValueException ex) { @@ -2895,7 +2893,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (supportsMultipleSubnets == null || !Boolean.valueOf(supportsMultipleSubnets)) { throw new InvalidParameterValueException("The Dhcp serivice provider for this network dose not support the dhcp across multiple subnets"); } - s_logger.info("adding a new subnet to the network " + network.getId()); + logger.info("adding a new subnet to the network " + network.getId()); } else if (sameSubnet != null) { // if it is same subnet the user might not send the vlan and the // netmask details. so we are @@ -3256,7 +3254,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Override public VlanVO doInTransaction(final TransactionStatus status) { VlanVO vlan = new VlanVO(vlanType, vlanId, vlanGateway, vlanNetmask, zone.getId(), ipRange, networkId, physicalNetworkId, vlanIp6Gateway, vlanIp6Cidr, ipv6Range); - s_logger.debug("Saving vlan range " + vlan); + logger.debug("Saving vlan range " + vlan); vlan = _vlanDao.persist(vlan); // IPv6 use a used ip map, is different from ipv4, no need to save @@ -3319,8 +3317,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new CloudRuntimeException("Unable to acquire vlan configuration: " + vlanDbId); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("lock vlan " + vlanDbId + " is acquired"); + if (logger.isDebugEnabled()) { + logger.debug("lock vlan " + vlanDbId + " is acquired"); } for (final IPAddressVO ip : ips) { boolean success = true; @@ -3344,7 +3342,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati success = _ipAddrMgr.disassociatePublicIpAddress(ip.getId(), userId, caller); } if (!success) { - s_logger.warn("Some ip addresses failed to be released as a part of vlan " + vlanDbId + " removal"); + logger.warn("Some ip addresses failed to be released as a part of vlan " + vlanDbId + " removal"); } else { resourceCountToBeDecrement++; UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NET_IP_RELEASE, acctVln.get(0).getAccountId(), ip.getDataCenterId(), ip.getId(), @@ -3501,14 +3499,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (vlan == null) { throw new CloudRuntimeException("Unable to acquire vlan configuration: " + vlanDbId); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("lock vlan " + vlanDbId + " is acquired"); + if (logger.isDebugEnabled()) { + logger.debug("lock vlan " + vlanDbId + " is acquired"); } for (final IPAddressVO ip : ips) { // Disassociate allocated IP's that are not in use if (!ip.isOneToOneNat() && !ip.isSourceNat() && !(_firewallDao.countRulesByIpId(ip.getId()) > 0)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing Public IP addresses" + ip + " of vlan " + vlanDbId + " as part of Public IP" + " range release to the system pool"); + if (logger.isDebugEnabled()) { + logger.debug("Releasing Public IP addresses" + ip + " of vlan " + vlanDbId + " as part of Public IP" + " range release to the system pool"); } success = success && _ipAddrMgr.disassociatePublicIpAddress(ip.getId(), userId, caller); } else { @@ -3516,7 +3514,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } if (!success) { - s_logger.warn("Some Public IP addresses that were not in use failed to be released as a part of" + " vlan " + vlanDbId + "release to the system pool"); + logger.warn("Some Public IP addresses that were not in use failed to be released as a part of" + " vlan " + vlanDbId + "release to the system pool"); } } finally { _vlanDao.releaseFromLockTable(vlanDbId); @@ -3799,8 +3797,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati public void checkDiskOfferingAccess(final Account caller, final DiskOffering dof) { for (final SecurityChecker checker : _secChecker) { if (checker.checkAccess(caller, dof)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Access granted to " + caller + " to disk offering:" + dof.getId() + " by " + checker.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Access granted to " + caller + " to disk offering:" + dof.getId() + " by " + checker.getName()); } return; } else { @@ -3816,8 +3814,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati public void checkZoneAccess(final Account caller, final DataCenter zone) { for (final SecurityChecker checker : _secChecker) { if (checker.checkAccess(caller, zone)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Access granted to " + caller + " to zone:" + zone.getId() + " by " + checker.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Access granted to " + caller + " to zone:" + zone.getId() + " by " + checker.getName()); } return; } else { @@ -4000,7 +3998,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // dhcp provider and userdata provider should be same because vm will be contacting dhcp server for user data. if (dhcpProvider == null && IsVrUserdataProvider) { - s_logger.debug("User data provider VR can't be selected without VR as dhcp provider. In this case VM fails to contact the DHCP server for userdata"); + logger.debug("User data provider VR can't be selected without VR as dhcp provider. In this case VM fails to contact the DHCP server for userdata"); throw new InvalidParameterValueException("Without VR as dhcp provider, User data can't selected for VR. Please select VR as DHCP provider "); } @@ -4060,7 +4058,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // if Firewall service is missing, add Firewall service/provider // combination if (firewallProvider != null) { - s_logger.debug("Adding Firewall service with provider " + firewallProvider.getName()); + logger.debug("Adding Firewall service with provider " + firewallProvider.getName()); final Set firewallProviderSet = new HashSet(); firewallProviderSet.add(firewallProvider); serviceProviderMap.put(Service.Firewall, firewallProviderSet); @@ -4386,7 +4384,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati NetworkOfferingVO offering = offeringFinal; // 1) create network offering object - s_logger.debug("Adding network offering " + offering); + logger.debug("Adding network offering " + offering); offering.setConcurrentConnections(maxconn); offering.setKeepAliveEnabled(enableKeepAlive); offering = _networkOfferingDao.persist(offering, details); @@ -4402,7 +4400,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } final NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(offering.getId(), service, provider); _ntwkOffServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService + " with provider " + provider.getName()); + logger.trace("Added service for the network offering: " + offService + " with provider " + provider.getName()); } if (vpcOff) { @@ -4413,7 +4411,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } else { final NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(offering.getId(), service, null); _ntwkOffServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService + " with null provider"); + logger.trace("Added service for the network offering: " + offService + " with null provider"); } } } @@ -4835,7 +4833,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // Check if the account exists final Account account = _accountDao.findEnabledAccount(accountName, domainId); if (account == null) { - s_logger.error("Unable to find account by name: " + accountName + " in domain " + domainId); + logger.error("Unable to find account by name: " + accountName + " in domain " + domainId); throw new InvalidParameterValueException("Account by name: " + accountName + " doesn't exist in domain " + domainId); } @@ -4955,11 +4953,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } }); } catch (final CloudRuntimeException e) { - s_logger.error(e); + logger.error(e); return false; } } else { - s_logger.trace("Account id=" + accountId + " has no account specific virtual ip ranges, nothing to release"); + logger.trace("Account id=" + accountId + " has no account specific virtual ip ranges, nothing to release"); } return true; } diff --git a/server/src/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java b/server/src/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java index 6824b9c731a..7fd3f4ca8ed 100644 --- a/server/src/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java +++ b/server/src/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java @@ -22,7 +22,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.security.keys.KeysManager; @@ -48,7 +47,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Local(value = {ConsoleProxyManager.class}) public class AgentBasedConsoleProxyManager extends ManagerBase implements ConsoleProxyManager { - private static final Logger s_logger = Logger.getLogger(AgentBasedConsoleProxyManager.class); @Inject protected HostDao _hostDao; @@ -101,8 +99,8 @@ public class AgentBasedConsoleProxyManager extends ManagerBase implements Consol @Override public boolean configure(String name, Map params) throws ConfigurationException { - if (s_logger.isInfoEnabled()) { - s_logger.info("Start configuring AgentBasedConsoleProxyManager"); + if (logger.isInfoEnabled()) { + logger.info("Start configuring AgentBasedConsoleProxyManager"); } Map configs = _configDao.getConfiguration("management-server", params); @@ -126,8 +124,8 @@ public class AgentBasedConsoleProxyManager extends ManagerBase implements Consol _listener = new ConsoleProxyListener(new AgentBasedAgentHook(_instanceDao, _hostDao, _configDao, _ksMgr, _agentMgr, _keysMgr)); _agentMgr.registerForHostEvents(_listener, true, true, false); - if (s_logger.isInfoEnabled()) { - s_logger.info("AgentBasedConsoleProxyManager has been configured. SSL enabled: " + _sslEnabled); + if (logger.isInfoEnabled()) { + logger.info("AgentBasedConsoleProxyManager has been configured. SSL enabled: " + _sslEnabled); } return true; } @@ -140,22 +138,22 @@ public class AgentBasedConsoleProxyManager extends ManagerBase implements Consol public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) { UserVmVO userVm = _userVmDao.findById(userVmId); if (userVm == null) { - s_logger.warn("User VM " + userVmId + " no longer exists, return a null proxy for user vm:" + userVmId); + logger.warn("User VM " + userVmId + " no longer exists, return a null proxy for user vm:" + userVmId); return null; } HostVO host = findHost(userVm); if (host != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Assign embedded console proxy running at " + host.getName() + " to user vm " + userVmId + " with public IP " + host.getPublicIpAddress()); + if (logger.isDebugEnabled()) { + logger.debug("Assign embedded console proxy running at " + host.getName() + " to user vm " + userVmId + " with public IP " + host.getPublicIpAddress()); } // only private IP, public IP, host id have meaningful values, rest // of all are place-holder values String publicIp = host.getPublicIpAddress(); if (publicIp == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host " + host.getName() + "/" + host.getPrivateIpAddress() + + if (logger.isDebugEnabled()) { + logger.debug("Host " + host.getName() + "/" + host.getPrivateIpAddress() + " does not have public interface, we will return its private IP for cosole proxy."); } publicIp = host.getPrivateIpAddress(); @@ -169,7 +167,7 @@ public class AgentBasedConsoleProxyManager extends ManagerBase implements Consol return new ConsoleProxyInfo(_sslEnabled, publicIp, _consoleProxyPort, urlPort, _consoleProxyUrlDomain); } else { - s_logger.warn("Host that VM is running is no longer available, console access to VM " + userVmId + " will be temporarily unavailable."); + logger.warn("Host that VM is running is no longer available, console access to VM " + userVmId + " will be temporarily unavailable."); } return null; } diff --git a/server/src/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java b/server/src/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java index 869af13aced..3cf606b0a93 100644 --- a/server/src/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java +++ b/server/src/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import com.cloud.host.Host; import com.cloud.host.HostVO; @@ -29,13 +28,12 @@ import com.cloud.vm.UserVmVO; @Local(value = {ConsoleProxyManager.class}) public class AgentBasedStandaloneConsoleProxyManager extends AgentBasedConsoleProxyManager { - private static final Logger s_logger = Logger.getLogger(AgentBasedStandaloneConsoleProxyManager.class); @Override public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) { UserVmVO userVm = _userVmDao.findById(userVmId); if (userVm == null) { - s_logger.warn("User VM " + userVmId + " no longer exists, return a null proxy for user vm:" + userVmId); + logger.warn("User VM " + userVmId + " no longer exists, return a null proxy for user vm:" + userVmId); return null; } @@ -60,19 +58,19 @@ public class AgentBasedStandaloneConsoleProxyManager extends AgentBasedConsolePr } } if (allocatedHost == null) { - if (s_logger.isDebugEnabled()) - s_logger.debug("Failed to find a console proxy at host: " + host.getName() + " and in the pod: " + host.getPodId() + " to user vm " + userVmId); + if (logger.isDebugEnabled()) + logger.debug("Failed to find a console proxy at host: " + host.getName() + " and in the pod: " + host.getPodId() + " to user vm " + userVmId); return null; } - if (s_logger.isDebugEnabled()) - s_logger.debug("Assign standalone console proxy running at " + allocatedHost.getName() + " to user vm " + userVmId + " with public IP " + + if (logger.isDebugEnabled()) + logger.debug("Assign standalone console proxy running at " + allocatedHost.getName() + " to user vm " + userVmId + " with public IP " + allocatedHost.getPublicIpAddress()); // only private IP, public IP, host id have meaningful values, rest of all are place-holder values String publicIp = allocatedHost.getPublicIpAddress(); if (publicIp == null) { - if (s_logger.isDebugEnabled()) - s_logger.debug("Host " + allocatedHost.getName() + "/" + allocatedHost.getPrivateIpAddress() + + if (logger.isDebugEnabled()) + logger.debug("Host " + allocatedHost.getName() + "/" + allocatedHost.getPrivateIpAddress() + " does not have public interface, we will return its private IP for cosole proxy."); publicIp = allocatedHost.getPrivateIpAddress(); } @@ -83,7 +81,7 @@ public class AgentBasedStandaloneConsoleProxyManager extends AgentBasedConsolePr return new ConsoleProxyInfo(_sslEnabled, publicIp, _consoleProxyPort, urlPort, _consoleProxyUrlDomain); } else { - s_logger.warn("Host that VM is running is no longer available, console access to VM " + userVmId + " will be temporarily unavailable."); + logger.warn("Host that VM is running is no longer available, console access to VM " + userVmId + " will be temporarily unavailable."); } return null; } diff --git a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index 54ae38248de..b1d221fdf04 100644 --- a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -42,7 +42,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -154,7 +153,6 @@ import com.google.gson.GsonBuilder; // @Local(value = {ConsoleProxyManager.class, ConsoleProxyService.class}) public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxyManager, VirtualMachineGuru, SystemVmLoadScanHandler, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyManagerImpl.class); private static final int DEFAULT_CAPACITY_SCAN_INTERVAL = 30000; // 30 seconds private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_SYNC = 180; // 3 minutes @@ -267,7 +265,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy Gson gson = gb.create(); status = gson.fromJson(cmd.getLoadInfo(), ConsoleProxyStatus.class); } catch (Throwable e) { - s_logger.warn("Unable to parse load info from proxy, proxy vm id : " + cmd.getProxyVmId() + ", info : " + cmd.getLoadInfo()); + logger.warn("Unable to parse load info from proxy, proxy vm id : " + cmd.getProxyVmId() + ", info : " + cmd.getLoadInfo()); } if (status != null) { @@ -282,8 +280,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy } _consoleProxyDao.update(cmd.getProxyVmId(), count, DateUtil.currentGMTTime(), details); } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Unable to get console proxy load info, id : " + cmd.getProxyVmId()); + if (logger.isTraceEnabled()) { + logger.trace("Unable to get console proxy load info, id : " + cmd.getProxyVmId()); } _consoleProxyDao.update(cmd.getProxyVmId(), 0, DateUtil.currentGMTTime(), null); @@ -303,8 +301,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy HostVO host = _hostDao.findById(agentId); if (host.getType() == Type.ConsoleProxy) { String name = host.getName(); - if (s_logger.isInfoEnabled()) { - s_logger.info("Console proxy agent disconnected, proxy: " + name); + if (logger.isInfoEnabled()) { + logger.info("Console proxy agent disconnected, proxy: " + name); } if (name != null && name.startsWith("v-")) { String[] tokens = name.split("-"); @@ -312,7 +310,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy try { proxyVmId = Long.parseLong(tokens[1]); } catch (NumberFormatException e) { - s_logger.error("Unexpected exception " + e.getMessage(), e); + logger.error("Unexpected exception " + e.getMessage(), e); return; } @@ -332,15 +330,15 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy /* * _capacityScanScheduler.execute(new Runnable() { - * public void run() { if(s_logger.isInfoEnabled()) - * s_logger.info("Stop console proxy " + + * public void run() { if(logger.isInfoEnabled()) + * logger.info("Stop console proxy " + * proxy.getName() + * " VM because of that the agent running inside it has disconnected" * ); stopProxy(proxy.getId()); } }); */ } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Console proxy agent disconnected but corresponding console proxy VM no longer exists in DB, proxy: " + name); + if (logger.isInfoEnabled()) { + logger.info("Console proxy agent disconnected but corresponding console proxy VM no longer exists in DB, proxy: " + name); } } } else { @@ -356,7 +354,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy long proxyVmId = startupCmd.getProxyVmId(); ConsoleProxyVO consoleProxy = _consoleProxyDao.findById(proxyVmId); if (consoleProxy == null) { - s_logger.info("Proxy " + proxyVmId + " is no longer in DB, skip sending startup command"); + logger.info("Proxy " + proxyVmId + " is no longer in DB, skip sending startup command"); return null; } @@ -374,13 +372,13 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy } if (proxy.getPublicIpAddress() == null) { - s_logger.warn("Assigned console proxy does not have a valid public IP address"); + logger.warn("Assigned console proxy does not have a valid public IP address"); return null; } KeystoreVO ksVo = _ksDao.findByName(ConsoleProxyManager.CERTIFICATE_NAME); if (proxy.isSslEnabled() && ksVo == null) { - s_logger.warn("SSL enabled for console proxy but no server certificate found in database"); + logger.warn("SSL enabled for console proxy but no server certificate found in database"); } if (_staticPublicIp == null) { @@ -395,13 +393,13 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy VMInstanceVO vm = _instanceDao.findById(vmId); if (vm == null) { - s_logger.warn("VM " + vmId + " no longer exists, return a null proxy for vm:" + vmId); + logger.warn("VM " + vmId + " no longer exists, return a null proxy for vm:" + vmId); return null; } if (vm != null && vm.getState() != State.Running) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Detected that vm : " + vmId + " is not currently at running state, we will fail the proxy assignment for it"); + if (logger.isInfoEnabled()) { + logger.info("Detected that vm : " + vmId + " is not currently at running state, we will fail the proxy assignment for it"); } return null; } @@ -413,18 +411,18 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy if (proxy != null) { if (!isInAssignableState(proxy)) { - if (s_logger.isInfoEnabled()) { - s_logger.info("A previous assigned proxy is not assignable now, reassign console proxy for user vm : " + vmId); + if (logger.isInfoEnabled()) { + logger.info("A previous assigned proxy is not assignable now, reassign console proxy for user vm : " + vmId); } proxy = null; } else { if (_consoleProxyDao.getProxyActiveLoad(proxy.getId()) < _capacityPerProxy || hasPreviousSession(proxy, vm)) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Assign previous allocated console proxy for user vm : " + vmId); + if (logger.isTraceEnabled()) { + logger.trace("Assign previous allocated console proxy for user vm : " + vmId); } if (proxy.getActiveSession() >= _capacityPerProxy) { - s_logger.warn("Assign overloaded proxy to user VM as previous session exists, user vm : " + vmId); + logger.warn("Assign overloaded proxy to user VM as previous session exists, user vm : " + vmId); } } else { proxy = null; @@ -440,12 +438,12 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy _allocProxyLock.unlock(); } } else { - s_logger.error("Unable to acquire synchronization lock to get/allocate proxy resource for vm :" + vmId + + logger.error("Unable to acquire synchronization lock to get/allocate proxy resource for vm :" + vmId + ". Previous console proxy allocation is taking too long"); } if (proxy == null) { - s_logger.warn("Unable to find or allocate console proxy resource"); + logger.warn("Unable to find or allocate console proxy resource"); return null; } @@ -485,7 +483,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy byte[] details = proxy.getSessionDetails(); status = gson.fromJson(details != null ? new String(details, Charset.forName("US-ASCII")) : null, ConsoleProxyStatus.class); } catch (Throwable e) { - s_logger.warn("Unable to parse proxy session details : " + Arrays.toString(proxy.getSessionDetails())); + logger.warn("Unable to parse proxy session details : " + Arrays.toString(proxy.getSessionDetails())); } if (status != null && status.getConnections() != null) { @@ -496,7 +494,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy try { taggedVmId = Long.parseLong(connections[i].tag); } catch (NumberFormatException e) { - s_logger.warn("Unable to parse console proxy connection info passed through tag: " + connections[i].tag, e); + logger.warn("Unable to parse console proxy connection info passed through tag: " + connections[i].tag, e); } } if (taggedVmId == vm.getId()) { @@ -515,7 +513,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy return false; } else { - s_logger.error("No proxy load info on an overloaded proxy ?"); + logger.error("No proxy load info on an overloaded proxy ?"); return false; } } @@ -542,33 +540,33 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy // For VMs that are in Stopping, Starting, Migrating state, let client to wait by returning null // as sooner or later, Starting/Migrating state will be transited to Running and Stopping will be transited // to Stopped to allow Starting of it - s_logger.warn("Console proxy is not in correct state to be started: " + proxy.getState()); + logger.warn("Console proxy is not in correct state to be started: " + proxy.getState()); return null; } catch (StorageUnavailableException e) { - s_logger.warn("Exception while trying to start console proxy", e); + logger.warn("Exception while trying to start console proxy", e); return null; } catch (InsufficientCapacityException e) { - s_logger.warn("Exception while trying to start console proxy", e); + logger.warn("Exception while trying to start console proxy", e); return null; } catch (ResourceUnavailableException e) { - s_logger.warn("Exception while trying to start console proxy", e); + logger.warn("Exception while trying to start console proxy", e); return null; } catch (ConcurrentOperationException e) { - s_logger.warn("Runtime Exception while trying to start console proxy", e); + logger.warn("Runtime Exception while trying to start console proxy", e); return null; } catch (CloudRuntimeException e) { - s_logger.warn("Runtime Exception while trying to start console proxy", e); + logger.warn("Runtime Exception while trying to start console proxy", e); return null; } catch (OperationTimedoutException e) { - s_logger.warn("Runtime Exception while trying to start console proxy", e); + logger.warn("Runtime Exception while trying to start console proxy", e); return null; } } public ConsoleProxyVO assignProxyFromRunningPool(long dataCenterId) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Assign console proxy from running pool for request from data center : " + dataCenterId); + if (logger.isTraceEnabled()) { + logger.trace("Assign console proxy from running pool for request from data center : " + dataCenterId); } ConsoleProxyAllocator allocator = getCurrentAllocator(); @@ -582,10 +580,10 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy it.remove(); } } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Running proxy pool size : " + runningList.size()); + if (logger.isTraceEnabled()) { + logger.trace("Running proxy pool size : " + runningList.size()); for (ConsoleProxyVO proxy : runningList) { - s_logger.trace("Running proxy instance : " + proxy.getHostName()); + logger.trace("Running proxy instance : " + proxy.getHostName()); } } @@ -595,20 +593,20 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy for (Pair p : l) { loadInfo.put(p.first(), p.second()); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Running proxy instance allocation load { proxy id : " + p.first() + ", load : " + p.second() + "}"); + if (logger.isTraceEnabled()) { + logger.trace("Running proxy instance allocation load { proxy id : " + p.first() + ", load : " + p.second() + "}"); } } } Long allocated = allocator.allocProxy(runningList, loadInfo, dataCenterId); if (allocated == null) { - s_logger.debug("Unable to find a console proxy "); + logger.debug("Unable to find a console proxy "); return null; } return _consoleProxyDao.findById(allocated); } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Empty running proxy pool for now in data center : " + dataCenterId); + if (logger.isTraceEnabled()) { + logger.trace("Empty running proxy pool for now in data center : " + dataCenterId); } } return null; @@ -629,12 +627,12 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy public ConsoleProxyVO startNew(long dataCenterId) throws ConcurrentOperationException { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Assign console proxy from a newly started instance for request from data center : " + dataCenterId); + if (logger.isDebugEnabled()) { + logger.debug("Assign console proxy from a newly started instance for request from data center : " + dataCenterId); } if (!allowToLaunchNew(dataCenterId)) { - s_logger.warn("The number of launched console proxy on zone " + dataCenterId + " has reached to limit"); + logger.warn("The number of launched console proxy on zone " + dataCenterId + " has reached to limit"); return null; } @@ -649,8 +647,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy long proxyVmId = (Long)context.get("proxyVmId"); if (proxyVmId == 0) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Creating proxy instance failed, data center id : " + dataCenterId); + if (logger.isTraceEnabled()) { + logger.trace("Creating proxy instance failed, data center id : " + dataCenterId); } return null; } @@ -661,8 +659,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy new ConsoleProxyAlertEventArgs(ConsoleProxyAlertEventArgs.PROXY_CREATED, dataCenterId, proxy.getId(), proxy, null)); return proxy; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to allocate console proxy storage, remove the console proxy record from DB, proxy id: " + proxyVmId); + if (logger.isDebugEnabled()) { + logger.debug("Unable to allocate console proxy storage, remove the console proxy record from DB, proxy id: " + proxyVmId); } } return null; @@ -724,7 +722,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy try { _itMgr.allocate(name, template, serviceOffering, networks, plan, null); } catch (InsufficientCapacityException e) { - s_logger.warn("InsufficientCapacity", e); + logger.warn("InsufficientCapacity", e); throw new CloudRuntimeException("Insufficient capacity exception", e); } @@ -758,7 +756,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy Gson gson = gb.create(); status = gson.fromJson(answer.getDetails(), ConsoleProxyStatus.class); } catch (Throwable e) { - s_logger.warn("Unable to parse load info from proxy, proxy vm id : " + answer.getProxyVmId() + ", info : " + answer.getDetails()); + logger.warn("Unable to parse load info from proxy, proxy vm id : " + answer.getProxyVmId() + ", info : " + answer.getDetails()); } if (status != null) { @@ -773,8 +771,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy } _consoleProxyDao.update(answer.getProxyVmId(), count, DateUtil.currentGMTTime(), details); } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Unable to get console proxy load info, id : " + answer.getProxyVmId()); + if (logger.isTraceEnabled()) { + logger.trace("Unable to get console proxy load info, id : " + answer.getProxyVmId()); } _consoleProxyDao.update(answer.getProxyVmId(), 0, DateUtil.currentGMTTime(), null); @@ -791,8 +789,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy HostVO host = _hostDao.findById(agentId); if (host.getType() == Type.ConsoleProxy) { String name = host.getName(); - if (s_logger.isInfoEnabled()) { - s_logger.info("Console proxy agent disconnected, proxy: " + name); + if (logger.isInfoEnabled()) { + logger.info("Console proxy agent disconnected, proxy: " + name); } if (name != null && name.startsWith("v-")) { String[] tokens = name.split("-"); @@ -800,7 +798,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy try { proxyVmId = Long.parseLong(tokens[1]); } catch (NumberFormatException e) { - s_logger.error("Unexpected exception " + e.getMessage(), e); + logger.error("Unexpected exception " + e.getMessage(), e); return; } @@ -817,14 +815,14 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy /* * _capacityScanScheduler.execute(new Runnable() { public void run() { - * if(s_logger.isInfoEnabled()) - * s_logger.info("Stop console proxy " + proxy.getName() + + * if(logger.isInfoEnabled()) + * logger.info("Stop console proxy " + proxy.getName() + * " VM because of that the agent running inside it has disconnected" ); * stopProxy(proxy.getId()); } }); */ } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Console proxy agent disconnected but corresponding console proxy VM no longer exists in DB, proxy: " + name); + if (logger.isInfoEnabled()) { + logger.info("Console proxy agent disconnected but corresponding console proxy VM no longer exists in DB, proxy: " + name); } } } else { @@ -855,8 +853,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy private boolean allowToLaunchNew(long dcId) { if (!isConsoleProxyVmRequired(dcId)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Console proxy vm not required in zone " + dcId + " not launching"); + if (logger.isDebugEnabled()) { + logger.debug("Console proxy vm not required in zone " + dcId + " not launching"); } return false; } @@ -879,8 +877,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy } private void allocCapacity(long dataCenterId) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Allocate console proxy standby capacity for data center : " + dataCenterId); + if (logger.isTraceEnabled()) { + logger.trace("Allocate console proxy standby capacity for data center : " + dataCenterId); } ConsoleProxyVO proxy = null; @@ -889,26 +887,26 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy boolean consoleProxyVmFromStoppedPool = false; proxy = assignProxyFromStoppedPool(dataCenterId); if (proxy == null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("No stopped console proxy is available, need to allocate a new console proxy"); + if (logger.isInfoEnabled()) { + logger.info("No stopped console proxy is available, need to allocate a new console proxy"); } if (_allocProxyLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_SYNC)) { try { proxy = startNew(dataCenterId); } catch (ConcurrentOperationException e) { - s_logger.info("Concurrent operation exception caught " + e); + logger.info("Concurrent operation exception caught " + e); } finally { _allocProxyLock.unlock(); } } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Unable to acquire synchronization lock for console proxy vm allocation, wait for next scan"); + if (logger.isInfoEnabled()) { + logger.info("Unable to acquire synchronization lock for console proxy vm allocation, wait for next scan"); } } } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Found a stopped console proxy, starting it. Vm id : " + proxy.getId()); + if (logger.isInfoEnabled()) { + logger.info("Found a stopped console proxy, starting it. Vm id : " + proxy.getId()); } consoleProxyVmFromStoppedPool = true; } @@ -918,14 +916,14 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy proxy = startProxy(proxyVmId, false); if (proxy != null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Console proxy " + proxy.getHostName() + " is started"); + if (logger.isInfoEnabled()) { + logger.info("Console proxy " + proxy.getHostName() + " is started"); } SubscriptionMgr.getInstance().notifySubscribers(ConsoleProxyManager.ALERT_SUBJECT, this, new ConsoleProxyAlertEventArgs(ConsoleProxyAlertEventArgs.PROXY_UP, dataCenterId, proxy.getId(), proxy, null)); } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Unable to start console proxy vm for standby capacity, vm id : " + proxyVmId + ", will recycle it and start a new one"); + if (logger.isInfoEnabled()) { + logger.info("Unable to start console proxy vm for standby capacity, vm id : " + proxyVmId + ", will recycle it and start a new one"); } if (consoleProxyVmFromStoppedPool) { @@ -950,8 +948,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy if (zoneHostInfo != null && isZoneHostReady(zoneHostInfo)) { VMTemplateVO template = _templateDao.findSystemVMReadyTemplate(dataCenterId, HypervisorType.Any); if (template == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("System vm template is not ready at data center " + dataCenterId + ", wait until it is ready to launch console proxy vm"); + if (logger.isDebugEnabled()) { + logger.debug("System vm template is not ready at data center " + dataCenterId + ", wait until it is ready to launch console proxy vm"); } return false; } @@ -967,13 +965,13 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy if (l != null && l.size() > 0 && l.get(0).second().intValue() > 0) { return true; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Primary storage is not ready, wait until it is ready to launch console proxy"); + if (logger.isDebugEnabled()) { + logger.debug("Primary storage is not ready, wait until it is ready to launch console proxy"); } } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Zone host is ready, but console proxy template: " + template.getId() + " is not ready on secondary storage."); + if (logger.isDebugEnabled()) { + logger.debug("Zone host is ready, but console proxy template: " + template.getId() + " is not ready on secondary storage."); } } } @@ -1007,8 +1005,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy @Override public boolean start() { - if (s_logger.isInfoEnabled()) { - s_logger.info("Start console proxy manager"); + if (logger.isInfoEnabled()) { + logger.info("Start console proxy manager"); } return true; @@ -1016,8 +1014,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy @Override public boolean stop() { - if (s_logger.isInfoEnabled()) { - s_logger.info("Stop console proxy manager"); + if (logger.isInfoEnabled()) { + logger.info("Stop console proxy manager"); } _loadScanner.stop(); @@ -1030,8 +1028,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy public boolean stopProxy(long proxyVmId) { ConsoleProxyVO proxy = _consoleProxyDao.findById(proxyVmId); if (proxy == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Stopping console proxy failed: console proxy " + proxyVmId + " no longer exists"); + if (logger.isDebugEnabled()) { + logger.debug("Stopping console proxy failed: console proxy " + proxyVmId + " no longer exists"); } return false; } @@ -1040,10 +1038,10 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy _itMgr.stop(proxy.getUuid()); return true; } catch (ResourceUnavailableException e) { - s_logger.warn("Stopping console proxy " + proxy.getHostName() + " failed : exception ", e); + logger.warn("Stopping console proxy " + proxy.getHostName() + " failed : exception ", e); return false; } catch (CloudRuntimeException e) { - s_logger.warn("Unable to stop proxy ", e); + logger.warn("Unable to stop proxy ", e); return false; } } @@ -1067,7 +1065,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy }); } } catch (Throwable e) { - s_logger.error("Failed to set managment state", e); + logger.error("Failed to set managment state", e); } } @@ -1078,12 +1076,12 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy ConsoleProxyManagementState state = ConsoleProxyManagementState.valueOf(value); if (state == null) { - s_logger.error("Invalid console proxy management state: " + value); + logger.error("Invalid console proxy management state: " + value); } return state; } - s_logger.error("Invalid console proxy management state: " + value); + logger.error("Invalid console proxy management state: " + value); return null; } @@ -1101,7 +1099,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy _configDao.update(Config.ConsoleProxyManagementState.key(), Config.ConsoleProxyManagementState.getCategory(), lastState.toString()); } } catch (Throwable e) { - s_logger.error("Failed to resume last management state", e); + logger.error("Failed to resume last management state", e); } } @@ -1111,12 +1109,12 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy ConsoleProxyManagementState state = ConsoleProxyManagementState.valueOf(value); if (state == null) { - s_logger.error("Invalid console proxy management state: " + value); + logger.error("Invalid console proxy management state: " + value); } return state; } - s_logger.error("Invalid console proxy management state: " + value); + logger.error("Invalid console proxy management state: " + value); return null; } @@ -1133,8 +1131,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy final Answer answer = _agentMgr.easySend(proxy.getHostId(), cmd); if (answer != null && answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully reboot console proxy " + proxy.getHostName()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully reboot console proxy " + proxy.getHostName()); } SubscriptionMgr.getInstance().notifySubscribers(ConsoleProxyManager.ALERT_SUBJECT, this, @@ -1142,8 +1140,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy return true; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("failed to reboot console proxy : " + proxy.getHostName()); + if (logger.isDebugEnabled()) { + logger.debug("failed to reboot console proxy : " + proxy.getHostName()); } return false; @@ -1168,13 +1166,13 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy _consoleProxyDao.remove(vmId); HostVO host = _hostDao.findByTypeNameAndZoneId(proxy.getDataCenterId(), proxy.getHostName(), Host.Type.ConsoleProxy); if (host != null) { - s_logger.debug("Removing host entry for proxy id=" + vmId); + logger.debug("Removing host entry for proxy id=" + vmId); return _hostDao.remove(host.getId()); } return true; } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to expunge " + proxy, e); + logger.warn("Unable to expunge " + proxy, e); return false; } } @@ -1185,8 +1183,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy @Override public boolean configure(String name, Map params) throws ConfigurationException { - if (s_logger.isInfoEnabled()) { - s_logger.info("Start configuring console proxy manager : " + name); + if (logger.isInfoEnabled()) { + logger.info("Start configuring console proxy manager : " + name); } Map configs = _configDao.getConfiguration("management-server", params); @@ -1199,7 +1197,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy _consoleProxyUrlDomain = configs.get(Config.ConsoleProxyUrlDomain.key()); if( _sslEnabled && (_consoleProxyUrlDomain == null || _consoleProxyUrlDomain.isEmpty())) { - s_logger.warn("Empty console proxy domain, explicitly disabling SSL"); + logger.warn("Empty console proxy domain, explicitly disabling SSL"); _sslEnabled = false; } @@ -1225,9 +1223,9 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy _useStorageVm = true; } - if (s_logger.isInfoEnabled()) { - s_logger.info("Console proxy max session soft limit : " + _capacityPerProxy); - s_logger.info("Console proxy standby capacity : " + _standbyCapacity); + if (logger.isInfoEnabled()) { + logger.info("Console proxy max session soft limit : " + _capacityPerProxy); + logger.info("Console proxy standby capacity : " + _standbyCapacity); } _instance = configs.get("instance.name"); @@ -1253,11 +1251,11 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy try { _serviceOffering = _offeringDao.findById(Long.parseLong(cpvmSrvcOffIdStr)); } catch (NumberFormatException ex) { - s_logger.debug("The system service offering specified by global config is not id, but uuid=" + cpvmSrvcOffIdStr + " for console proxy vm"); + logger.debug("The system service offering specified by global config is not id, but uuid=" + cpvmSrvcOffIdStr + " for console proxy vm"); } } if (_serviceOffering == null) { - s_logger.warn("Can't find system service offering specified by global config, uuid=" + cpvmSrvcOffIdStr + " for console proxy vm"); + logger.warn("Can't find system service offering specified by global config, uuid=" + cpvmSrvcOffIdStr + " for console proxy vm"); } } @@ -1270,7 +1268,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy // this can sometimes happen, if DB is manually or programmatically manipulated if (offerings == null || offerings.size() < 2) { String msg = "Data integrity problem : System Offering For Console Proxy has been removed?"; - s_logger.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } } @@ -1284,8 +1282,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy _staticPort = NumbersUtil.parseInt(_configDao.getValue("consoleproxy.static.port"), 8443); } - if (s_logger.isInfoEnabled()) { - s_logger.info("Console Proxy Manager is configured."); + if (logger.isInfoEnabled()) { + logger.info("Console Proxy Manager is configured."); } return true; } @@ -1364,8 +1362,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy } String bootArgs = buf.toString(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Boot Args for " + profile + ": " + bootArgs); + if (logger.isDebugEnabled()) { + logger.debug("Boot Args for " + profile + ": " + bootArgs); } return true; @@ -1409,7 +1407,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy if (controlNic == null) { if (managementNic == null) { - s_logger.error("Management network doesn't exist for the console proxy vm " + profile.getVirtualMachine()); + logger.error("Management network doesn't exist for the console proxy vm " + profile.getVirtualMachine()); return false; } controlNic = managementNic; @@ -1431,9 +1429,9 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy CheckSshAnswer answer = (CheckSshAnswer)cmds.getAnswer("checkSsh"); if (answer == null || !answer.getResult()) { if (answer != null) { - s_logger.warn("Unable to ssh to the VM: " + answer.getDetails()); + logger.warn("Unable to ssh to the VM: " + answer.getDetails()); } else { - s_logger.warn("Unable to ssh to the VM: null answer"); + logger.warn("Unable to ssh to the VM: null answer"); } return false; } @@ -1449,7 +1447,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy _consoleProxyDao.update(consoleVm.getId(), consoleVm); } } catch (Exception ex) { - s_logger.warn("Failed to get system ip and enable static nat for the vm " + profile.getVirtualMachine() + " due to exception ", ex); + logger.warn("Failed to get system ip and enable static nat for the vm " + profile.getVirtualMachine() + " due to exception ", ex); return false; } @@ -1476,7 +1474,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy try { _rulesMgr.disableStaticNat(ip.getId(), ctx.getCallingAccount(), ctx.getCallingUserId(), true); } catch (Exception ex) { - s_logger.warn("Failed to disable static nat and release system ip " + ip + " as a part of vm " + profile.getVirtualMachine() + " stop due to exception ", + logger.warn("Failed to disable static nat and release system ip " + ip + " as a part of vm " + profile.getVirtualMachine() + " stop due to exception ", ex); } } @@ -1528,14 +1526,14 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy private void handleResetSuspending() { List runningProxies = _consoleProxyDao.getProxyListInStates(State.Running); for (ConsoleProxyVO proxy : runningProxies) { - s_logger.info("Stop console proxy " + proxy.getId() + " because of we are currently in ResetSuspending management mode"); + logger.info("Stop console proxy " + proxy.getId() + " because of we are currently in ResetSuspending management mode"); stopProxy(proxy.getId()); } // check if it is time to resume List proxiesInTransition = _consoleProxyDao.getProxyListInStates(State.Running, State.Starting, State.Stopping); if (proxiesInTransition.size() == 0) { - s_logger.info("All previous console proxy VMs in transition mode ceased the mode, we will now resume to last management state"); + logger.info("All previous console proxy VMs in transition mode ceased the mode, we will now resume to last management state"); resumeLastManagementState(); } } @@ -1546,15 +1544,15 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy scanManagementState(); if (!reserveStandbyCapacity()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Reserving standby capacity is disabled, skip capacity scan"); + if (logger.isDebugEnabled()) { + logger.debug("Reserving standby capacity is disabled, skip capacity scan"); } return false; } List upPools = _storagePoolDao.listByStatus(StoragePoolStatus.Up); if (upPools == null || upPools.size() == 0) { - s_logger.debug("Skip capacity scan as there is no Primary Storage in 'Up' state"); + logger.debug("Skip capacity scan as there is no Primary Storage in 'Up' state"); return false; } @@ -1580,23 +1578,23 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy long dataCenterId = pool.longValue(); if (!isZoneReady(_zoneHostInfoMap, dataCenterId)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Zone " + dataCenterId + " is not ready to launch console proxy yet"); + if (logger.isDebugEnabled()) { + logger.debug("Zone " + dataCenterId + " is not ready to launch console proxy yet"); } return false; } List l = _consoleProxyDao.getProxyListInStates(VirtualMachine.State.Starting, VirtualMachine.State.Stopping); if (l.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Zone " + dataCenterId + " has " + l.size() + " console proxy VM(s) in transition state"); + if (logger.isDebugEnabled()) { + logger.debug("Zone " + dataCenterId + " has " + l.size() + " console proxy VM(s) in transition state"); } return false; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Zone " + dataCenterId + " is ready to launch console proxy"); + if (logger.isDebugEnabled()) { + logger.debug("Zone " + dataCenterId + " is ready to launch console proxy"); } return true; } @@ -1616,8 +1614,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy } if (!checkCapacity(proxyInfo, vmInfo)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Expand console proxy standby capacity for zone " + proxyInfo.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Expand console proxy standby capacity for zone " + proxyInfo.getName()); } return new Pair(AfterScanAction.expand, null); diff --git a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java index bfd6af4b480..88c6ebca558 100644 --- a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -33,7 +33,6 @@ import javax.naming.ConfigurationException; import com.cloud.utils.fsm.StateMachine2; -import org.apache.log4j.Logger; import org.apache.cloudstack.affinity.AffinityGroupProcessor; import org.apache.cloudstack.affinity.AffinityGroupService; import org.apache.cloudstack.affinity.AffinityGroupVMMapVO; @@ -135,7 +134,6 @@ import com.cloud.vm.dao.VMInstanceDao; public class DeploymentPlanningManagerImpl extends ManagerBase implements DeploymentPlanningManager, Manager, Listener, StateListener { - private static final Logger s_logger = Logger.getLogger(DeploymentPlanningManagerImpl.class); @Inject AgentManager _agentMgr; @Inject @@ -261,16 +259,16 @@ StateListener { if (vm.getType() == VirtualMachine.Type.User) { checkForNonDedicatedResources(vmProfile, dc, avoids); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid()); + if (logger.isDebugEnabled()) { + logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid()); } // call planners //DataCenter dc = _dcDao.findById(vm.getDataCenterId()); // check if datacenter is in avoid set if (avoids.shouldAvoid(dc)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); + if (logger.isDebugEnabled()) { + logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); } return null; } @@ -291,30 +289,30 @@ StateListener { int cpu_requested = offering.getCpu() * offering.getSpeed(); long ram_requested = offering.getRamSize() * 1024L * 1024L; - if (s_logger.isDebugEnabled()) { - s_logger.debug("DeploymentPlanner allocation algorithm: " + planner); + if (logger.isDebugEnabled()) { + logger.debug("DeploymentPlanner allocation algorithm: " + planner); - s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + + logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() + ", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested); - s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId() != null ? "Yes" : "No")); + logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId() != null ? "Yes" : "No")); } String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag); if (plan.getHostId() != null && haVmTag == null) { Long hostIdSpecified = plan.getHostId(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: " + hostIdSpecified); + if (logger.isDebugEnabled()) { + logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: " + hostIdSpecified); } HostVO host = _hostDao.findById(hostIdSpecified); if (host == null) { - s_logger.debug("The specified host cannot be found"); + logger.debug("The specified host cannot be found"); } else if (avoids.shouldAvoid(host)) { - s_logger.debug("The specified host is in avoid set"); + logger.debug("The specified host is in avoid set"); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for suitable pools for this host under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + + if (logger.isDebugEnabled()) { + logger.debug("Looking for suitable pools for this host under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId()); } @@ -323,7 +321,7 @@ StateListener { if (vm.getHypervisorType() == HypervisorType.BareMetal) { DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap()); - s_logger.debug("Returning Deployment Destination: " + dest); + logger.debug("Returning Deployment Destination: " + dest); return dest; } @@ -353,35 +351,35 @@ StateListener { storageVolMap.remove(vol); } DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap); - s_logger.debug("Returning Deployment Destination: " + dest); + logger.debug("Returning Deployment Destination: " + dest); return dest; } } } - s_logger.debug("Cannot deploy to specified host, returning."); + logger.debug("Cannot deploy to specified host, returning."); return null; } if (vm.getLastHostId() != null && haVmTag == null) { - s_logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId()); + logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId()); HostVO host = _hostDao.findById(vm.getLastHostId()); ServiceOfferingDetailsVO offeringDetails = null; if (host == null) { - s_logger.debug("The last host of this VM cannot be found"); + logger.debug("The last host of this VM cannot be found"); } else if (avoids.shouldAvoid(host)) { - s_logger.debug("The last host of this VM is in avoid set"); + logger.debug("The last host of this VM is in avoid set"); } else if (plan.getClusterId() != null && host.getClusterId() != null && !plan.getClusterId().equals(host.getClusterId())) { - s_logger.debug("The last host of this VM cannot be picked as the plan specifies different clusterId: " + logger.debug("The last host of this VM cannot be picked as the plan specifies different clusterId: " + plan.getClusterId()); } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { - s_logger.debug("The last Host, hostId: " + host.getId() + + logger.debug("The last Host, hostId: " + host.getId() + " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); } else if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString())) != null) { ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString()); if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){ - s_logger.debug("The last host of this VM does not have required GPU devices available"); + logger.debug("The last host of this VM does not have required GPU devices available"); } } else { if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) { @@ -415,15 +413,15 @@ StateListener { if (hostHasCapacity && hostHasCpuCapability) { - s_logger.debug("The last host of this VM is UP and has enough capacity"); - s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() + logger.debug("The last host of this VM is UP and has enough capacity"); + logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId()); Pod pod = _podDao.findById(host.getPodId()); Cluster cluster = _clusterDao.findById(host.getClusterId()); if (vm.getHypervisorType() == HypervisorType.BareMetal) { DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap()); - s_logger.debug("Returning Deployment Destination: " + dest); + logger.debug("Returning Deployment Destination: " + dest); return dest; } @@ -456,22 +454,22 @@ StateListener { } DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap); - s_logger.debug("Returning Deployment Destination: " + dest); + logger.debug("Returning Deployment Destination: " + dest); return dest; } } } else { - s_logger.debug("The last host of this VM does not have enough capacity"); + logger.debug("The last host of this VM does not have enough capacity"); } } else { - s_logger.debug("Service Offering host tag does not match the last host of this VM"); + logger.debug("Service Offering host tag does not match the last host of this VM"); } } else { - s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " + + logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " + host.getResourceState()); } } - s_logger.debug("Cannot choose the last host to deploy this VM "); + logger.debug("Cannot choose the last host to deploy this VM "); } DeployDestination dest = null; @@ -648,7 +646,7 @@ StateListener { if (hostResourceType == resourceUsageRequired) { return true; } else { - s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired + ", since this host has been reserved for planner usage : " + + logger.debug("Cannot use this host for usage: " + resourceUsageRequired + ", since this host has been reserved for planner usage : " + hostResourceType); return false; } @@ -661,7 +659,7 @@ StateListener { public Boolean doInTransaction(TransactionStatus status) { final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); if (lockedEntry == null) { - s_logger.error("Unable to lock the host entry for reservation, host: " + hostId); + logger.error("Unable to lock the host entry for reservation, host: " + hostId); return false; } // check before updating @@ -674,7 +672,7 @@ StateListener { if (lockedEntry.getResourceUsage() == resourceUsageRequired) { return true; } else { - s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired + ", since this host has been reserved for planner usage : " + + logger.debug("Cannot use this host for usage: " + resourceUsageRequired + ", since this host has been reserved for planner usage : " + hostResourceTypeFinal); return false; } @@ -699,8 +697,8 @@ StateListener { // check if any VMs are starting or running on this host List vms = _vmInstanceDao.listUpByHostId(hostId); if (vms.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot release reservation, Found " + vms.size() + " VMs Running on host " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Cannot release reservation, Found " + vms.size() + " VMs Running on host " + hostId); } return false; } @@ -713,8 +711,8 @@ StateListener { for (VMInstanceVO stoppedVM : vmsByLastHostId) { long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - stoppedVM.getUpdateTime().getTime()) / 1000; if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot release reservation, Found VM: " + stoppedVM + " Stopped but reserved on host " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Cannot release reservation, Found VM: " + stoppedVM + " Stopped but reserved on host " + hostId); } return false; } @@ -724,8 +722,8 @@ StateListener { // check if any VMs are stopping on or migrating to this host List vmsStoppingMigratingByHostId = _vmInstanceDao.findByHostInStates(hostId, State.Stopping, State.Migrating, State.Starting); if (vmsStoppingMigratingByHostId.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot release reservation, Found " + vmsStoppingMigratingByHostId.size() + " VMs stopping/migrating/starting on host " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Cannot release reservation, Found " + vmsStoppingMigratingByHostId.size() + " VMs stopping/migrating/starting on host " + hostId); } return false; } @@ -736,14 +734,14 @@ StateListener { List vmsStartingNoHost = _vmInstanceDao.listStartingWithNoHostId(); if (vmsStartingNoHost.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot release reservation, Found " + vms.size() + " VMs starting as of now and no hostId yet stored"); + if (logger.isDebugEnabled()) { + logger.debug("Cannot release reservation, Found " + vms.size() + " VMs starting as of now and no hostId yet stored"); } return false; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host has no VMs associated, releasing the planner reservation for host " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Host has no VMs associated, releasing the planner reservation for host " + hostId); } final long id = reservationEntry.getId(); @@ -753,7 +751,7 @@ StateListener { public Boolean doInTransaction(TransactionStatus status) { final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); if (lockedEntry == null) { - s_logger.error("Unable to lock the host entry for reservation, host: " + hostId); + logger.error("Unable to lock the host entry for reservation, host: " + hostId); return false; } // check before updating @@ -776,11 +774,11 @@ StateListener { @Override protected void runInContext() { try { - s_logger.debug("Checking if any host reservation can be released ... "); + logger.debug("Checking if any host reservation can be released ... "); checkHostReservations(); - s_logger.debug("Done running HostReservationReleaseChecker ... "); + logger.debug("Done running HostReservationReleaseChecker ... "); } catch (Throwable t) { - s_logger.error("Exception in HostReservationReleaseChecker", t); + logger.error("Exception in HostReservationReleaseChecker", t); } } } @@ -862,7 +860,7 @@ StateListener { @Override public void onPublishMessage(String senderAddress, String subject, Object obj) { VMInstanceVO vm = ((VMInstanceVO)obj); - s_logger.debug("MessageBus message: host reserved capacity released for VM: " + vm.getLastHostId() + + logger.debug("MessageBus message: host reserved capacity released for VM: " + vm.getLastHostId() + ", checking if host reservation can be released for host:" + vm.getLastHostId()); Long hostId = vm.getLastHostId(); checkHostReservationRelease(hostId); @@ -922,20 +920,20 @@ StateListener { private DeployDestination checkClustersforDestination(List clusterList, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, DataCenter dc, DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, ExcludeList plannerAvoidOutput) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("ClusterId List to consider: " + clusterList); + if (logger.isTraceEnabled()) { + logger.trace("ClusterId List to consider: " + clusterList); } for (Long clusterId : clusterList) { ClusterVO clusterVO = _clusterDao.findById(clusterId); if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) { - s_logger.debug("Cluster: " + clusterId + " has HyperVisorType that does not match the VM, skipping this cluster"); + logger.debug("Cluster: " + clusterId + " has HyperVisorType that does not match the VM, skipping this cluster"); avoid.addCluster(clusterVO.getId()); continue; } - s_logger.debug("Checking resources in Cluster: " + clusterId + " under Pod: " + clusterVO.getPodId()); + logger.debug("Checking resources in Cluster: " + clusterId + " under Pod: " + clusterVO.getPodId()); // search for resources(hosts and storage) under this zone, pod, // cluster. DataCenterDeployment potentialPlan = @@ -974,21 +972,21 @@ StateListener { storageVolMap.remove(vol); } DeployDestination dest = new DeployDestination(dc, pod, clusterVO, host, storageVolMap); - s_logger.debug("Returning Deployment Destination: " + dest); + logger.debug("Returning Deployment Destination: " + dest); return dest; } } else { - s_logger.debug("No suitable storagePools found under this Cluster: " + clusterId); + logger.debug("No suitable storagePools found under this Cluster: " + clusterId); } } else { - s_logger.debug("No suitable hosts found under this Cluster: " + clusterId); + logger.debug("No suitable hosts found under this Cluster: " + clusterId); } if (canAvoidCluster(clusterVO, avoid, plannerAvoidOutput, vmProfile)) { avoid.addCluster(clusterVO.getId()); } } - s_logger.debug("Could not find suitable Deployment Destination for this VM under any clusters, returning. "); + logger.debug("Could not find suitable Deployment Destination for this VM under any clusters, returning. "); return null; } @@ -1100,7 +1098,7 @@ StateListener { protected Pair> findPotentialDeploymentResources(List suitableHosts, Map> suitableVolumeStoragePools, ExcludeList avoid, DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, List readyAndReusedVolumes) { - s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM"); + logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM"); boolean hostCanAccessPool = false; boolean haveEnoughSpace = false; @@ -1124,7 +1122,7 @@ StateListener { Map> volumeAllocationMap = new HashMap>(); for (Volume vol : volumesOrderBySizeDesc) { haveEnoughSpace = false; - s_logger.debug("Checking if host: " + potentialHost.getId() + " can access any suitable storage pool for volume: " + vol.getVolumeType()); + logger.debug("Checking if host: " + potentialHost.getId() + " can access any suitable storage pool for volume: " + vol.getVolumeType()); List volumePoolList = suitableVolumeStoragePools.get(vol); hostCanAccessPool = false; for (StoragePool potentialSPool : volumePoolList) { @@ -1151,19 +1149,19 @@ StateListener { break; } if (!haveEnoughSpace) { - s_logger.warn("insufficient capacity to allocate all volumes"); + logger.warn("insufficient capacity to allocate all volumes"); break; } } if (hostCanAccessPool && haveEnoughSpace && checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired)) { - s_logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + potentialHost.getName() + + logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + potentialHost.getName() + " and associated storage pools for this VM"); return new Pair>(potentialHost, storage); } else { avoid.addHost(potentialHost.getId()); } } - s_logger.debug("Could not find a potential host that has associated storage pools from the suitable host/pool lists for this VM"); + logger.debug("Could not find a potential host that has associated storage pools from the suitable host/pool lists for this VM"); return null; } @@ -1175,7 +1173,7 @@ StateListener { hostCanAccessSPool = true; } - s_logger.debug("Host: " + host.getId() + (hostCanAccessSPool ? " can" : " cannot") + " access pool: " + pool.getId()); + logger.debug("Host: " + host.getId() + (hostCanAccessSPool ? " can" : " cannot") + " access pool: " + pool.getId()); return hostCanAccessSPool; } @@ -1189,7 +1187,7 @@ StateListener { } if (suitableHosts.isEmpty()) { - s_logger.debug("No suitable hosts found"); + logger.debug("No suitable hosts found"); } return suitableHosts; } @@ -1219,14 +1217,14 @@ StateListener { Set poolsToAvoidOutput = new HashSet(originalAvoidPoolSet); for (VolumeVO toBeCreated : volumesTobeCreated) { - s_logger.debug("Checking suitable pools for volume (Id, Type): (" + toBeCreated.getId() + "," + toBeCreated.getVolumeType().name() + ")"); + logger.debug("Checking suitable pools for volume (Id, Type): (" + toBeCreated.getId() + "," + toBeCreated.getVolumeType().name() + ")"); // If the plan specifies a poolId, it means that this VM's ROOT // volume is ready and the pool should be reused. // In this case, also check if rest of the volumes are ready and can // be reused. if (plan.getPoolId() != null || (toBeCreated.getVolumeType() == Volume.Type.DATADISK && toBeCreated.getPoolId() != null && toBeCreated.getState() == Volume.State.Ready)) { - s_logger.debug("Volume has pool already allocated, checking if pool can be reused, poolId: " + toBeCreated.getPoolId()); + logger.debug("Volume has pool already allocated, checking if pool can be reused, poolId: " + toBeCreated.getPoolId()); List suitablePools = new ArrayList(); StoragePool pool = null; if (toBeCreated.getPoolId() != null) { @@ -1249,12 +1247,12 @@ StateListener { canReusePool = true; } } else { - s_logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume"); + logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume"); canReusePool = false; } if (canReusePool) { - s_logger.debug("Planner need not allocate a pool for this volume since its READY"); + logger.debug("Planner need not allocate a pool for this volume since its READY"); suitablePools.add(pool); suitableVolumeStoragePools.put(toBeCreated, suitablePools); if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) { @@ -1263,21 +1261,21 @@ StateListener { continue; } } else { - s_logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume"); + logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume"); } } else { - s_logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume"); + logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume"); } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("We need to allocate new storagepool for this volume"); + if (logger.isDebugEnabled()) { + logger.debug("We need to allocate new storagepool for this volume"); } if (!isRootAdmin(vmProfile)) { if (!isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled"); - s_logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning."); + if (logger.isDebugEnabled()) { + logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled"); + logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning."); } // Cannot find suitable storage pools under this cluster for // this volume since allocation_state is disabled. @@ -1289,7 +1287,7 @@ StateListener { } } - s_logger.debug("Calling StoragePoolAllocators to find suitable pools"); + logger.debug("Calling StoragePoolAllocators to find suitable pools"); DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId()); @@ -1305,7 +1303,7 @@ StateListener { Boolean useLocalStorageForSystemVM = ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(zone.getId()); if (useLocalStorageForSystemVM != null) { useLocalStorage = useLocalStorageForSystemVM.booleanValue(); - s_logger.debug("System VMs will use " + (useLocalStorage ? "local" : "shared") + " storage for zone id=" + plan.getDataCenterId()); + logger.debug("System VMs will use " + (useLocalStorage ? "local" : "shared") + " storage for zone id=" + plan.getDataCenterId()); } } else { useLocalStorage = diskOffering.getUseLocalStorage(); @@ -1341,7 +1339,7 @@ StateListener { } if (!foundPotentialPools) { - s_logger.debug("No suitable pools found for volume: " + toBeCreated + " under cluster: " + plan.getClusterId()); + logger.debug("No suitable pools found for volume: " + toBeCreated + " under cluster: " + plan.getClusterId()); // No suitable storage pools found under this cluster for this // volume. - remove any suitable pools found for other volumes. // All volumes should get suitable pools under this cluster; @@ -1364,7 +1362,7 @@ StateListener { } if (suitableVolumeStoragePools.isEmpty()) { - s_logger.debug("No suitable pools found"); + logger.debug("No suitable pools found"); } return new Pair>, List>(suitableVolumeStoragePools, readyAndReusedVolumes); @@ -1374,19 +1372,19 @@ StateListener { // Check if the zone exists in the system DataCenterVO zone = _dcDao.findById(zoneId); if (zone != null && Grouping.AllocationState.Disabled == zone.getAllocationState()) { - s_logger.info("Zone is currently disabled, cannot allocate to this zone: " + zoneId); + logger.info("Zone is currently disabled, cannot allocate to this zone: " + zoneId); return false; } Pod pod = _podDao.findById(podId); if (pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()) { - s_logger.info("Pod is currently disabled, cannot allocate to this pod: " + podId); + logger.info("Pod is currently disabled, cannot allocate to this pod: " + podId); return false; } Cluster cluster = _clusterDao.findById(clusterId); if (cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()) { - s_logger.info("Cluster is currently disabled, cannot allocate to this cluster: " + clusterId); + logger.info("Cluster is currently disabled, cannot allocate to this cluster: " + clusterId); return false; } diff --git a/server/src/com/cloud/deploy/FirstFitPlanner.java b/server/src/com/cloud/deploy/FirstFitPlanner.java index 8fa25bd5960..1be85b0c5f5 100644 --- a/server/src/com/cloud/deploy/FirstFitPlanner.java +++ b/server/src/com/cloud/deploy/FirstFitPlanner.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.log4j.Logger; import com.cloud.capacity.Capacity; import com.cloud.capacity.CapacityManager; @@ -70,7 +69,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Local(value = DeploymentPlanner.class) public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPlanner, Configurable { - private static final Logger s_logger = Logger.getLogger(FirstFitPlanner.class); @Inject protected HostDao _hostDao; @Inject @@ -127,8 +125,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPla //check if datacenter is in avoid set if (avoid.shouldAvoid(dc)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); + if (logger.isDebugEnabled()) { + logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); } return null; } @@ -136,29 +134,29 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPla List clusterList = new ArrayList(); if (plan.getClusterId() != null) { Long clusterIdSpecified = plan.getClusterId(); - s_logger.debug("Searching resources only under specified Cluster: " + clusterIdSpecified); + logger.debug("Searching resources only under specified Cluster: " + clusterIdSpecified); ClusterVO cluster = _clusterDao.findById(plan.getClusterId()); if (cluster != null) { if (avoid.shouldAvoid(cluster)) { - s_logger.debug("The specified cluster is in avoid set, returning."); + logger.debug("The specified cluster is in avoid set, returning."); } else { clusterList.add(clusterIdSpecified); removeClustersCrossingThreshold(clusterList, avoid, vmProfile, plan); } } else { - s_logger.debug("The specified cluster cannot be found, returning."); + logger.debug("The specified cluster cannot be found, returning."); avoid.addCluster(plan.getClusterId()); return null; } } else if (plan.getPodId() != null) { //consider clusters under this pod only Long podIdSpecified = plan.getPodId(); - s_logger.debug("Searching resources only under specified Pod: " + podIdSpecified); + logger.debug("Searching resources only under specified Pod: " + podIdSpecified); HostPodVO pod = _podDao.findById(podIdSpecified); if (pod != null) { if (avoid.shouldAvoid(pod)) { - s_logger.debug("The specified pod is in avoid set, returning."); + logger.debug("The specified pod is in avoid set, returning."); } else { clusterList = scanClustersForDestinationInZoneOrPod(podIdSpecified, false, vmProfile, plan, avoid); if (clusterList == null) { @@ -166,12 +164,12 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPla } } } else { - s_logger.debug("The specified Pod cannot be found, returning."); + logger.debug("The specified Pod cannot be found, returning."); avoid.addPod(plan.getPodId()); return null; } } else { - s_logger.debug("Searching all possible resources under this Zone: " + plan.getDataCenterId()); + logger.debug("Searching all possible resources under this Zone: " + plan.getDataCenterId()); boolean applyAllocationAtPods = Boolean.parseBoolean(_configDao.getValue(Config.ApplyAllocationAlgorithmToPods.key())); if (applyAllocationAtPods) { @@ -228,23 +226,23 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPla if (!podsWithCapacity.isEmpty()) { if (avoid.getPodsToAvoid() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Removing from the podId list these pods from avoid set: " + avoid.getPodsToAvoid()); + if (logger.isDebugEnabled()) { + logger.debug("Removing from the podId list these pods from avoid set: " + avoid.getPodsToAvoid()); } podsWithCapacity.removeAll(avoid.getPodsToAvoid()); } if (!isRootAdmin(vmProfile)) { List disabledPods = listDisabledPods(plan.getDataCenterId()); if (!disabledPods.isEmpty()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Removing from the podId list these pods that are disabled: " + disabledPods); + if (logger.isDebugEnabled()) { + logger.debug("Removing from the podId list these pods that are disabled: " + disabledPods); } podsWithCapacity.removeAll(disabledPods); } } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("No pods found having a host with enough capacity, returning."); + if (logger.isDebugEnabled()) { + logger.debug("No pods found having a host with enough capacity, returning."); } return null; } @@ -253,8 +251,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPla prioritizedPodIds = reorderPods(podCapacityInfo, vmProfile, plan); if (prioritizedPodIds == null || prioritizedPodIds.isEmpty()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("No Pods found for destination, returning."); + if (logger.isDebugEnabled()) { + logger.debug("No Pods found for destination, returning."); } return null; } @@ -262,7 +260,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPla List clusterList = new ArrayList(); //loop over pods for (Long podId : prioritizedPodIds) { - s_logger.debug("Checking resources under Pod: " + podId); + logger.debug("Checking resources under Pod: " + podId); List clustersUnderPod = scanClustersForDestinationInZoneOrPod(podId, false, vmProfile, plan, avoid); if (clustersUnderPod != null) { clusterList.addAll(clustersUnderPod); @@ -270,8 +268,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPla } return clusterList; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("No Pods found after removing disabled pods and pods in avoid list, returning."); + if (logger.isDebugEnabled()) { + logger.debug("No Pods found after removing disabled pods and pods in avoid list, returning."); } return null; } @@ -338,7 +336,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPla // Remove clusters crossing disabled threshold clusterListForVmAllocation.removeAll(clustersCrossingThreshold); - s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" + + logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" + " crosses the disable capacity threshold defined at each cluster/ at global value for capacity Type : " + capacity + ", skipping these clusters"); } @@ -358,8 +356,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPla List prioritizedClusterIds = clusterCapacityInfo.first(); if (!prioritizedClusterIds.isEmpty()) { if (avoid.getClustersToAvoid() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Removing from the clusterId list these clusters from avoid set: " + avoid.getClustersToAvoid()); + if (logger.isDebugEnabled()) { + logger.debug("Removing from the clusterId list these clusters from avoid set: " + avoid.getClustersToAvoid()); } prioritizedClusterIds.removeAll(avoid.getClustersToAvoid()); } @@ -372,8 +370,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPla disabledClusters = listDisabledClusters(plan.getDataCenterId(), id); } if (!disabledClusters.isEmpty()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Removing from the clusterId list these clusters that are disabled/clusters under disabled pods: " + disabledClusters); + if (logger.isDebugEnabled()) { + logger.debug("Removing from the clusterId list these clusters that are disabled/clusters under disabled pods: " + disabledClusters); } prioritizedClusterIds.removeAll(disabledClusters); } @@ -382,8 +380,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPla removeClustersCrossingThreshold(prioritizedClusterIds, avoid, vmProfile, plan); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("No clusters found having a host with enough capacity, returning."); + if (logger.isDebugEnabled()) { + logger.debug("No clusters found having a host with enough capacity, returning."); } return null; } @@ -391,8 +389,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPla List clusterList = reorderClusters(id, isZone, clusterCapacityInfo, vmProfile, plan); return clusterList; //return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("No clusters found after removing disabled clusters and clusters in avoid list, returning."); + if (logger.isDebugEnabled()) { + logger.debug("No clusters found after removing disabled clusters and clusters in avoid list, returning."); } return null; } @@ -443,8 +441,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPla //although an aggregate value may be false indicator that a cluster can host a vm, it will at the least eliminate those clusters which definitely cannot //we need clusters having enough cpu AND RAM to host this particular VM and order them by aggregate cluster capacity - if (s_logger.isDebugEnabled()) { - s_logger.debug("Listing clusters in order of aggregate capacity, that have (atleast one host with) enough CPU and RAM capacity under this " + + if (logger.isDebugEnabled()) { + logger.debug("Listing clusters in order of aggregate capacity, that have (atleast one host with) enough CPU and RAM capacity under this " + (isZone ? "Zone: " : "Pod: ") + id); } String capacityTypeToOrder = _configDao.getValue(Config.HostCapacityTypeToOrderClusters.key()); @@ -454,19 +452,19 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPla } List clusterIdswithEnoughCapacity = _capacityDao.listClustersInZoneOrPodByHostCapacities(id, requiredCpu, requiredRam, capacityType, isZone); - if (s_logger.isTraceEnabled()) { - s_logger.trace("ClusterId List having enough CPU and RAM capacity: " + clusterIdswithEnoughCapacity); + if (logger.isTraceEnabled()) { + logger.trace("ClusterId List having enough CPU and RAM capacity: " + clusterIdswithEnoughCapacity); } Pair, Map> result = _capacityDao.orderClustersByAggregateCapacity(id, capacityType, isZone); List clusterIdsOrderedByAggregateCapacity = result.first(); //only keep the clusters that have enough capacity to host this VM - if (s_logger.isTraceEnabled()) { - s_logger.trace("ClusterId List in order of aggregate capacity: " + clusterIdsOrderedByAggregateCapacity); + if (logger.isTraceEnabled()) { + logger.trace("ClusterId List in order of aggregate capacity: " + clusterIdsOrderedByAggregateCapacity); } clusterIdsOrderedByAggregateCapacity.retainAll(clusterIdswithEnoughCapacity); - if (s_logger.isTraceEnabled()) { - s_logger.trace("ClusterId List having enough CPU and RAM capacity & in order of aggregate capacity: " + clusterIdsOrderedByAggregateCapacity); + if (logger.isTraceEnabled()) { + logger.trace("ClusterId List having enough CPU and RAM capacity & in order of aggregate capacity: " + clusterIdsOrderedByAggregateCapacity); } return result; @@ -478,8 +476,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPla //although an aggregate value may be false indicator that a pod can host a vm, it will at the least eliminate those pods which definitely cannot //we need pods having enough cpu AND RAM to host this particular VM and order them by aggregate pod capacity - if (s_logger.isDebugEnabled()) { - s_logger.debug("Listing pods in order of aggregate capacity, that have (atleast one host with) enough CPU and RAM capacity under this Zone: " + zoneId); + if (logger.isDebugEnabled()) { + logger.debug("Listing pods in order of aggregate capacity, that have (atleast one host with) enough CPU and RAM capacity under this Zone: " + zoneId); } String capacityTypeToOrder = _configDao.getValue(Config.HostCapacityTypeToOrderClusters.key()); short capacityType = Capacity.CAPACITY_TYPE_CPU; @@ -488,19 +486,19 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPla } List podIdswithEnoughCapacity = _capacityDao.listPodsByHostCapacities(zoneId, requiredCpu, requiredRam, capacityType); - if (s_logger.isTraceEnabled()) { - s_logger.trace("PodId List having enough CPU and RAM capacity: " + podIdswithEnoughCapacity); + if (logger.isTraceEnabled()) { + logger.trace("PodId List having enough CPU and RAM capacity: " + podIdswithEnoughCapacity); } Pair, Map> result = _capacityDao.orderPodsByAggregateCapacity(zoneId, capacityType); List podIdsOrderedByAggregateCapacity = result.first(); //only keep the clusters that have enough capacity to host this VM - if (s_logger.isTraceEnabled()) { - s_logger.trace("PodId List in order of aggregate capacity: " + podIdsOrderedByAggregateCapacity); + if (logger.isTraceEnabled()) { + logger.trace("PodId List in order of aggregate capacity: " + podIdsOrderedByAggregateCapacity); } podIdsOrderedByAggregateCapacity.retainAll(podIdswithEnoughCapacity); - if (s_logger.isTraceEnabled()) { - s_logger.trace("PodId List having enough CPU and RAM capacity & in order of aggregate capacity: " + podIdsOrderedByAggregateCapacity); + if (logger.isTraceEnabled()) { + logger.trace("PodId List having enough CPU and RAM capacity & in order of aggregate capacity: " + podIdsOrderedByAggregateCapacity); } return result; diff --git a/server/src/com/cloud/event/dao/EventJoinDaoImpl.java b/server/src/com/cloud/event/dao/EventJoinDaoImpl.java index 313c3725148..8a1578bdbea 100644 --- a/server/src/com/cloud/event/dao/EventJoinDaoImpl.java +++ b/server/src/com/cloud/event/dao/EventJoinDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.EventResponse; @@ -37,7 +36,6 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = {EventJoinDao.class}) public class EventJoinDaoImpl extends GenericDaoBase implements EventJoinDao { - public static final Logger s_logger = Logger.getLogger(EventJoinDaoImpl.class); private SearchBuilder vrSearch; diff --git a/server/src/com/cloud/ha/AbstractInvestigatorImpl.java b/server/src/com/cloud/ha/AbstractInvestigatorImpl.java index 147cecdc640..b65865e732b 100644 --- a/server/src/com/cloud/ha/AbstractInvestigatorImpl.java +++ b/server/src/com/cloud/ha/AbstractInvestigatorImpl.java @@ -23,7 +23,6 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -40,7 +39,6 @@ import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; public abstract class AbstractInvestigatorImpl extends AdapterBase implements Investigator { - private static final Logger s_logger = Logger.getLogger(AbstractInvestigatorImpl.class); @Inject private final HostDao _hostDao = null; @@ -90,32 +88,32 @@ public abstract class AbstractInvestigatorImpl extends AdapterBase implements In try { Answer pingTestAnswer = _agentMgr.send(hostId, new PingTestCommand(testHostIp)); if (pingTestAnswer == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("host (" + testHostIp + ") returns Unknown (null) answer"); + if (logger.isDebugEnabled()) { + logger.debug("host (" + testHostIp + ") returns Unknown (null) answer"); } return Status.Unknown; } if (pingTestAnswer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("host (" + testHostIp + ") has been successfully pinged, returning that host is up"); + if (logger.isDebugEnabled()) { + logger.debug("host (" + testHostIp + ") has been successfully pinged, returning that host is up"); } // computing host is available, but could not reach agent, return false return Status.Up; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("host (" + testHostIp + ") cannot be pinged, returning Unknown (I don't know) state"); + if (logger.isDebugEnabled()) { + logger.debug("host (" + testHostIp + ") cannot be pinged, returning Unknown (I don't know) state"); } return Status.Unknown; } } catch (AgentUnavailableException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("host (" + testHostIp + "): " + e.getLocalizedMessage() + ", trapped AgentUnavailableException returning Unknown state"); + if (logger.isDebugEnabled()) { + logger.debug("host (" + testHostIp + "): " + e.getLocalizedMessage() + ", trapped AgentUnavailableException returning Unknown state"); } return Status.Unknown; } catch (OperationTimedoutException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("host (" + testHostIp + "): " + e.getLocalizedMessage() + ", trapped OperationTimedoutException returning Unknown state"); + if (logger.isDebugEnabled()) { + logger.debug("host (" + testHostIp + "): " + e.getLocalizedMessage() + ", trapped OperationTimedoutException returning Unknown state"); } return Status.Unknown; } diff --git a/server/src/com/cloud/ha/CheckOnAgentInvestigator.java b/server/src/com/cloud/ha/CheckOnAgentInvestigator.java index b2e333fbf92..3a7fdf4f5d4 100644 --- a/server/src/com/cloud/ha/CheckOnAgentInvestigator.java +++ b/server/src/com/cloud/ha/CheckOnAgentInvestigator.java @@ -19,7 +19,6 @@ package com.cloud.ha; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.CheckVirtualMachineAnswer; @@ -34,7 +33,6 @@ import com.cloud.vm.VirtualMachine.PowerState; @Local(value = Investigator.class) public class CheckOnAgentInvestigator extends AdapterBase implements Investigator { - private final static Logger s_logger = Logger.getLogger(CheckOnAgentInvestigator.class); @Inject AgentManager _agentMgr; @@ -52,17 +50,17 @@ public class CheckOnAgentInvestigator extends AdapterBase implements Investigato try { CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer)_agentMgr.send(vm.getHostId(), cmd); if (!answer.getResult()) { - s_logger.debug("Unable to get vm state on " + vm.toString()); + logger.debug("Unable to get vm state on " + vm.toString()); throw new UnknownVM(); } - s_logger.debug("Agent responded with state " + answer.getState().toString()); + logger.debug("Agent responded with state " + answer.getState().toString()); return answer.getState() == PowerState.PowerOn; } catch (AgentUnavailableException e) { - s_logger.debug("Unable to reach the agent for " + vm.toString() + ": " + e.getMessage()); + logger.debug("Unable to reach the agent for " + vm.toString() + ": " + e.getMessage()); throw new UnknownVM(); } catch (OperationTimedoutException e) { - s_logger.debug("Operation timed out for " + vm.toString() + ": " + e.getMessage()); + logger.debug("Operation timed out for " + vm.toString() + ": " + e.getMessage()); throw new UnknownVM(); } } diff --git a/server/src/com/cloud/ha/HighAvailabilityManagerExtImpl.java b/server/src/com/cloud/ha/HighAvailabilityManagerExtImpl.java index 56db8ef2f26..b2f8a962766 100644 --- a/server/src/com/cloud/ha/HighAvailabilityManagerExtImpl.java +++ b/server/src/com/cloud/ha/HighAvailabilityManagerExtImpl.java @@ -64,8 +64,8 @@ public class HighAvailabilityManagerExtImpl extends HighAvailabilityManagerImpl protected class UsageServerMonitorTask extends ManagedContextRunnable { @Override protected void runInContext() { - if (s_logger.isInfoEnabled()) { - s_logger.info("checking health of usage server"); + if (logger.isInfoEnabled()) { + logger.info("checking health of usage server"); } try { @@ -80,8 +80,8 @@ public class HighAvailabilityManagerExtImpl extends HighAvailabilityManagerImpl isRunning = true; } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("usage server running? " + isRunning + ", heartbeat: " + lastHeartbeat); + if (logger.isDebugEnabled()) { + logger.debug("usage server running? " + isRunning + ", heartbeat: " + lastHeartbeat); } } finally { txn.close(); @@ -98,7 +98,7 @@ public class HighAvailabilityManagerExtImpl extends HighAvailabilityManagerImpl _alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER, 0, 0); } } catch (Exception ex) { - s_logger.warn("Error while monitoring usage job", ex); + logger.warn("Error while monitoring usage job", ex); } } } diff --git a/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java b/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java index bec1fd58cc2..880a4003eac 100644 --- a/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java +++ b/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java @@ -29,7 +29,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.log4j.NDC; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -104,7 +103,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Local(value = { HighAvailabilityManager.class }) public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvailabilityManager, ClusterManagerListener { - protected static final Logger s_logger = Logger.getLogger(HighAvailabilityManagerImpl.class); WorkerThread[] _workers; boolean _stopped; long _timeToSleep; @@ -209,13 +207,13 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai for (Investigator investigator : investigators) { hostState = investigator.isAgentAlive(host); if (hostState != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(investigator.getName() + " was able to determine host " + hostId + " is in " + hostState.toString()); + if (logger.isDebugEnabled()) { + logger.debug(investigator.getName() + " was able to determine host " + hostId + " is in " + hostState.toString()); } return hostState; } - if (s_logger.isDebugEnabled()) { - s_logger.debug(investigator.getName() + " unable to determine the state of the host. Moving on."); + if (logger.isDebugEnabled()) { + logger.debug(investigator.getName() + " unable to determine the state of the host. Moving on."); } } @@ -230,11 +228,11 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai } if (host.getHypervisorType() == HypervisorType.VMware || host.getHypervisorType() == HypervisorType.Hyperv) { - s_logger.info("Don't restart VMs on host " + host.getId() + " as it is a " + host.getHypervisorType().toString() + " host"); + logger.info("Don't restart VMs on host " + host.getId() + " as it is a " + host.getHypervisorType().toString() + " host"); return; } - s_logger.warn("Scheduling restart for VMs on host " + host.getId() + "-" + host.getName()); + logger.warn("Scheduling restart for VMs on host " + host.getId() + "-" + host.getName()); final List vms = _instanceDao.listByHostId(host.getId()); final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); @@ -266,13 +264,13 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai "Host [" + hostDesc + "] is down." + ((sb != null) ? sb.toString() : "")); for (VMInstanceVO vm : reorderedVMList) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Notifying HA Mgr of to restart vm " + vm.getId() + "-" + vm.getInstanceName()); + if (logger.isDebugEnabled()) { + logger.debug("Notifying HA Mgr of to restart vm " + vm.getId() + "-" + vm.getInstanceName()); } vm = _instanceDao.findByUuid(vm.getUuid()); Long hostId = vm.getHostId(); if (hostId != null && !hostId.equals(host.getId())) { - s_logger.debug("VM " + vm.getInstanceName() + " is not on down host " + host.getId() + " it is on other host " + logger.debug("VM " + vm.getInstanceName() + " is not on down host " + host.getId() + " it is on other host " + hostId + " VM HA is done"); continue; } @@ -285,14 +283,14 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai assert (type == WorkType.CheckStop || type == WorkType.ForceStop || type == WorkType.Stop); if (_haDao.hasBeenScheduled(vm.getId(), type)) { - s_logger.info("There's already a job scheduled to stop " + vm); + logger.info("There's already a job scheduled to stop " + vm); return; } HaWorkVO work = new HaWorkVO(vm.getId(), vm.getType(), type, Step.Scheduled, hostId, vm.getState(), 0, vm.getUpdated()); _haDao.persist(work); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Scheduled " + work); + if (logger.isDebugEnabled()) { + logger.debug("Scheduled " + work); } wakeupWorkers(); } @@ -318,7 +316,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai Long hostId = vm.getHostId(); if (hostId == null) { try { - s_logger.debug("Found a vm that is scheduled to be restarted but has no host id: " + vm); + logger.debug("Found a vm that is scheduled to be restarted but has no host id: " + vm); _itMgr.advanceStop(vm.getUuid(), true); } catch (ResourceUnavailableException e) { assert false : "How do we hit this when force is true?"; @@ -333,13 +331,13 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai } if (vm.getHypervisorType() == HypervisorType.VMware || vm.getHypervisorType() == HypervisorType.Hyperv) { - s_logger.info("Skip HA for VMware VM or Hyperv VM" + vm.getInstanceName()); + logger.info("Skip HA for VMware VM or Hyperv VM" + vm.getInstanceName()); return; } if (!investigate) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM does not require investigation so I'm marking it as Stopped: " + vm.toString()); + if (logger.isDebugEnabled()) { + logger.debug("VM does not require investigation so I'm marking it as Stopped: " + vm.toString()); } AlertManager.AlertType alertType = AlertManager.AlertType.ALERT_TYPE_USERVM; @@ -357,8 +355,8 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai ") stopped unexpectedly on host " + hostDesc, "Virtual Machine " + vm.getHostName() + " (id: " + vm.getId() + ") running on host [" + vm.getHostId() + "] stopped unexpectedly."); - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is not HA enabled so we're done."); + if (logger.isDebugEnabled()) { + logger.debug("VM is not HA enabled so we're done."); } } @@ -378,7 +376,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai } if (vm.getHypervisorType() == HypervisorType.VMware) { - s_logger.info("Skip HA for VMware VM " + vm.getInstanceName()); + logger.info("Skip HA for VMware VM " + vm.getInstanceName()); return; } @@ -399,8 +397,8 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai hostId != null ? hostId : 0L, vm.getState(), maxRetries + 1, vm.getUpdated()); _haDao.persist(work); - if (s_logger.isInfoEnabled()) { - s_logger.info("Schedule vm for HA: " + vm); + if (logger.isInfoEnabled()) { + logger.info("Schedule vm for HA: " + vm); } wakeupWorkers(); @@ -415,7 +413,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai str.append(item.getId()).append(", "); } str.delete(str.length() - 2, str.length()).append("]"); - s_logger.info(str.toString()); + logger.info(str.toString()); return null; } @@ -426,7 +424,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai str.append(item.getId()).append(", "); } str.delete(str.length() - 2, str.length()).append("]"); - s_logger.info(str.toString()); + logger.info(str.toString()); return (System.currentTimeMillis() >> 10) + _investigateRetryInterval; } @@ -434,13 +432,13 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai VirtualMachine vm = _itMgr.findById(work.getInstanceId()); if (vm == null) { - s_logger.info("Unable to find vm: " + vmId); + logger.info("Unable to find vm: " + vmId); return null; } - s_logger.info("HA on " + vm); + logger.info("HA on " + vm); if (vm.getState() != work.getPreviousState() || vm.getUpdated() != work.getUpdateTime()) { - s_logger.info("VM " + vm + " has been changed. Current State = " + vm.getState() + " Previous State = " + work.getPreviousState() + " last updated = " + + logger.info("VM " + vm + " has been changed. Current State = " + vm.getState() + " Previous State = " + work.getPreviousState() + " last updated = " + vm.getUpdated() + " previous updated = " + work.getUpdateTime()); return null; } @@ -459,7 +457,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai if (host == null) { host = _hostDao.findByIdIncludingRemoved(work.getHostId()); if (host != null) { - s_logger.debug("VM " + vm.toString() + " is now no longer on host " + work.getHostId() + " as the host is removed"); + logger.debug("VM " + vm.toString() + " is now no longer on host " + work.getHostId() + " as the host is removed"); isHostRemoved = true; } } @@ -472,7 +470,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai if (work.getStep() == Step.Investigating) { if (!isHostRemoved) { if (vm.getHostId() == null || vm.getHostId() != work.getHostId()) { - s_logger.info("VM " + vm.toString() + " is now no longer on host " + work.getHostId()); + logger.info("VM " + vm.toString() + " is now no longer on host " + work.getHostId()); return null; } @@ -482,19 +480,19 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai try { alive = investigator.isVmAlive(vm, host); - s_logger.info(investigator.getName() + " found " + vm + " to be alive? " + alive); + logger.info(investigator.getName() + " found " + vm + " to be alive? " + alive); break; } catch (UnknownVM e) { - s_logger.info(investigator.getName() + " could not find " + vm); + logger.info(investigator.getName() + " could not find " + vm); } } boolean fenced = false; if (alive == null) { - s_logger.debug("Fencing off VM that we don't know the state of"); + logger.debug("Fencing off VM that we don't know the state of"); for (FenceBuilder fb : fenceBuilders) { Boolean result = fb.fenceOff(vm, host); - s_logger.info("Fencer " + fb.getName() + " returned " + result); + logger.info("Fencer " + fb.getName() + " returned " + result); if (result != null && result) { fenced = true; break; @@ -504,18 +502,18 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai } else if (!alive) { fenced = true; } else { - s_logger.debug("VM " + vm.getInstanceName() + " is found to be alive by " + investigator.getName()); + logger.debug("VM " + vm.getInstanceName() + " is found to be alive by " + investigator.getName()); if (host.getStatus() == Status.Up) { - s_logger.info(vm + " is alive and host is up. No need to restart it."); + logger.info(vm + " is alive and host is up. No need to restart it."); return null; } else { - s_logger.debug("Rescheduling because the host is not up but the vm is alive"); + logger.debug("Rescheduling because the host is not up but the vm is alive"); return (System.currentTimeMillis() >> 10) + _investigateRetryInterval; } } if (!fenced) { - s_logger.debug("We were unable to fence off the VM " + vm); + logger.debug("We were unable to fence off the VM " + vm); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + hostDesc, "Insufficient capacity to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); @@ -538,7 +536,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai work.setStep(Step.Scheduled); _haDao.update(work.getId(), work); } else { - s_logger.debug("How come that HA step is Investigating and the host is removed? Calling forced Stop on Vm anyways"); + logger.debug("How come that HA step is Investigating and the host is removed? Calling forced Stop on Vm anyways"); try { _itMgr.advanceStop(vm.getUuid(), true); } catch (ResourceUnavailableException e) { @@ -557,22 +555,22 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai vm = _itMgr.findById(vm.getId()); if (!_forceHA && !vm.isHaEnabled()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is not HA enabled so we're done."); + if (logger.isDebugEnabled()) { + logger.debug("VM is not HA enabled so we're done."); } return null; // VM doesn't require HA } if ((host == null || host.getRemoved() != null || host.getState() != Status.Up) && !volumeMgr.canVmRestartOnAnotherServer(vm.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM can not restart on another server."); + if (logger.isDebugEnabled()) { + logger.debug("VM can not restart on another server."); } return null; } if (work.getTimesTried() > _maxRetries) { - s_logger.warn("Retried to max times so deleting: " + vmId); + logger.warn("Retried to max times so deleting: " + vmId); return null; } @@ -590,33 +588,33 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai // First try starting the vm with its original planner, if it doesn't succeed send HAPlanner as its an emergency. _itMgr.advanceStart(vm.getUuid(), params, null); }catch (InsufficientCapacityException e){ - s_logger.warn("Failed to deploy vm " + vmId + " with original planner, sending HAPlanner"); + logger.warn("Failed to deploy vm " + vmId + " with original planner, sending HAPlanner"); _itMgr.advanceStart(vm.getUuid(), params, _haPlanners.get(0)); } VMInstanceVO started = _instanceDao.findById(vm.getId()); if (started != null && started.getState() == VirtualMachine.State.Running) { - s_logger.info("VM is now restarted: " + vmId + " on " + started.getHostId()); + logger.info("VM is now restarted: " + vmId + " on " + started.getHostId()); return null; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Rescheduling VM " + vm.toString() + " to try again in " + _restartRetryInterval); + if (logger.isDebugEnabled()) { + logger.debug("Rescheduling VM " + vm.toString() + " to try again in " + _restartRetryInterval); } } catch (final InsufficientCapacityException e) { - s_logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); + logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + hostDesc, "Insufficient capacity to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); } catch (final ResourceUnavailableException e) { - s_logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); + logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + hostDesc, "The Storage is unavailable for trying to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); } catch (ConcurrentOperationException e) { - s_logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); + logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + hostDesc, "The Storage is unavailable for trying to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); } catch (OperationTimedoutException e) { - s_logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); + logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + hostDesc, "The Storage is unavailable for trying to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); } @@ -642,7 +640,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai _itMgr.migrateAway(vm.getUuid(), srcHostId); return null; } catch (InsufficientServerCapacityException e) { - s_logger.warn("Insufficient capacity for migrating a VM."); + logger.warn("Insufficient capacity for migrating a VM."); _resourceMgr.maintenanceFailed(srcHostId); return (System.currentTimeMillis() >> 10) + _migrateRetryInterval; } @@ -652,8 +650,8 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai public void scheduleDestroy(VMInstanceVO vm, long hostId) { final HaWorkVO work = new HaWorkVO(vm.getId(), vm.getType(), WorkType.Destroy, Step.Scheduled, hostId, vm.getState(), 0, vm.getUpdated()); _haDao.persist(work); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Scheduled " + work.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Scheduled " + work.toString()); } wakeupWorkers(); } @@ -665,29 +663,29 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai protected Long destroyVM(HaWorkVO work) { final VirtualMachine vm = _itMgr.findById(work.getInstanceId()); - s_logger.info("Destroying " + vm.toString()); + logger.info("Destroying " + vm.toString()); try { if (vm.getState() != State.Destroyed) { - s_logger.info("VM is no longer in Destroyed state " + vm.toString()); + logger.info("VM is no longer in Destroyed state " + vm.toString()); return null; } if (vm.getHostId() != null) { _itMgr.destroy(vm.getUuid()); - s_logger.info("Successfully destroy " + vm); + logger.info("Successfully destroy " + vm); return null; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug(vm + " has already been stopped"); + if (logger.isDebugEnabled()) { + logger.debug(vm + " has already been stopped"); } return null; } } catch (final AgentUnavailableException e) { - s_logger.debug("Agnet is not available" + e.getMessage()); + logger.debug("Agnet is not available" + e.getMessage()); } catch (OperationTimedoutException e) { - s_logger.debug("operation timed out: " + e.getMessage()); + logger.debug("operation timed out: " + e.getMessage()); } catch (ConcurrentOperationException e) { - s_logger.debug("concurrent operation: " + e.getMessage()); + logger.debug("concurrent operation: " + e.getMessage()); } work.setTimesTried(work.getTimesTried() + 1); @@ -697,50 +695,50 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai protected Long stopVM(final HaWorkVO work) throws ConcurrentOperationException { VirtualMachine vm = _itMgr.findById(work.getInstanceId()); if (vm == null) { - s_logger.info("No longer can find VM " + work.getInstanceId() + ". Throwing away " + work); + logger.info("No longer can find VM " + work.getInstanceId() + ". Throwing away " + work); work.setStep(Step.Done); return null; } - s_logger.info("Stopping " + vm); + logger.info("Stopping " + vm); try { if (work.getWorkType() == WorkType.Stop) { _itMgr.advanceStop(vm.getUuid(), false); - s_logger.info("Successfully stopped " + vm); + logger.info("Successfully stopped " + vm); return null; } else if (work.getWorkType() == WorkType.CheckStop) { if ((vm.getState() != work.getPreviousState()) || vm.getUpdated() != work.getUpdateTime() || vm.getHostId() == null || vm.getHostId().longValue() != work.getHostId()) { - s_logger.info(vm + " is different now. Scheduled Host: " + work.getHostId() + " Current Host: " + + logger.info(vm + " is different now. Scheduled Host: " + work.getHostId() + " Current Host: " + (vm.getHostId() != null ? vm.getHostId() : "none") + " State: " + vm.getState()); return null; } _itMgr.advanceStop(vm.getUuid(), false); - s_logger.info("Stop for " + vm + " was successful"); + logger.info("Stop for " + vm + " was successful"); return null; } else if (work.getWorkType() == WorkType.ForceStop) { if ((vm.getState() != work.getPreviousState()) || vm.getUpdated() != work.getUpdateTime() || vm.getHostId() == null || vm.getHostId().longValue() != work.getHostId()) { - s_logger.info(vm + " is different now. Scheduled Host: " + work.getHostId() + " Current Host: " + + logger.info(vm + " is different now. Scheduled Host: " + work.getHostId() + " Current Host: " + (vm.getHostId() != null ? vm.getHostId() : "none") + " State: " + vm.getState()); return null; } _itMgr.advanceStop(vm.getUuid(), true); - s_logger.info("Stop for " + vm + " was successful"); + logger.info("Stop for " + vm + " was successful"); return null; } else { assert false : "Who decided there's other steps but didn't modify the guy who does the work?"; } } catch (final ResourceUnavailableException e) { - s_logger.debug("Agnet is not available" + e.getMessage()); + logger.debug("Agnet is not available" + e.getMessage()); } catch (OperationTimedoutException e) { - s_logger.debug("operation timed out: " + e.getMessage()); + logger.debug("operation timed out: " + e.getMessage()); } work.setTimesTried(work.getTimesTried() + 1); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Stop was unsuccessful. Rescheduling"); + if (logger.isDebugEnabled()) { + logger.debug("Stop was unsuccessful. Rescheduling"); } return (System.currentTimeMillis() >> 10) + _stopRetryInterval; } @@ -849,12 +847,12 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai protected class CleanupTask extends ManagedContextRunnable { @Override protected void runInContext() { - s_logger.info("HA Cleanup Thread Running"); + logger.info("HA Cleanup Thread Running"); try { _haDao.cleanup(System.currentTimeMillis() - _timeBetweenFailures); } catch (Exception e) { - s_logger.warn("Error while cleaning up", e); + logger.warn("Error while cleaning up", e); } } } @@ -866,7 +864,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai @Override public void run() { - s_logger.info("Starting work"); + logger.info("Starting work"); while (!_stopped) { _managedContext.runWithContext(new Runnable() { @Override @@ -875,13 +873,13 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai } }); } - s_logger.info("Time to go home!"); + logger.info("Time to go home!"); } private void runWithContext() { HaWorkVO work = null; try { - s_logger.trace("Checking the database"); + logger.trace("Checking the database"); work = _haDao.take(_serverId); if (work == null) { try { @@ -890,13 +888,13 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai } return; } catch (final InterruptedException e) { - s_logger.info("Interrupted"); + logger.info("Interrupted"); return; } } NDC.push("work-" + work.getId()); - s_logger.info("Processing " + work); + logger.info("Processing " + work); try { final WorkType wt = work.getWorkType(); @@ -915,21 +913,21 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai } if (nextTime == null) { - s_logger.info("Completed " + work); + logger.info("Completed " + work); work.setStep(Step.Done); } else { - s_logger.info("Rescheduling " + work + " to try again at " + new Date(nextTime << 10)); + logger.info("Rescheduling " + work + " to try again at " + new Date(nextTime << 10)); work.setTimeToTry(nextTime); work.setTimesTried(work.getTimesTried() + 1); work.setServerId(null); work.setDateTaken(null); } } catch (Exception e) { - s_logger.warn("Encountered unhandled exception during HA process, reschedule retry", e); + logger.warn("Encountered unhandled exception during HA process, reschedule retry", e); long nextTime = (System.currentTimeMillis() >> 10) + _restartRetryInterval; - s_logger.info("Rescheduling " + work + " to try again at " + new Date(nextTime << 10)); + logger.info("Rescheduling " + work + " to try again at " + new Date(nextTime << 10)); work.setTimeToTry(nextTime); work.setTimesTried(work.getTimesTried() + 1); work.setServerId(null); @@ -941,13 +939,13 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai work.setUpdateTime(vm.getUpdated()); work.setPreviousState(vm.getState()); if (!Step.Done.equals(work.getStep()) && work.getTimesTried() >= _maxRetries) { - s_logger.warn("Giving up, retries max times for work: " + work); + logger.warn("Giving up, retries max times for work: " + work); work.setStep(Step.Done); } } _haDao.update(work.getId(), work); } catch (final Throwable th) { - s_logger.error("Caught this throwable, ", th); + logger.error("Caught this throwable, ", th); } finally { if (work != null) { NDC.pop(); diff --git a/server/src/com/cloud/ha/KVMFencer.java b/server/src/com/cloud/ha/KVMFencer.java index b5834efe91a..fb76d82b2fc 100644 --- a/server/src/com/cloud/ha/KVMFencer.java +++ b/server/src/com/cloud/ha/KVMFencer.java @@ -23,7 +23,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.alert.AlertManager; @@ -42,7 +41,6 @@ import com.cloud.vm.VirtualMachine; @Local(value = FenceBuilder.class) public class KVMFencer extends AdapterBase implements FenceBuilder { - private static final Logger s_logger = Logger.getLogger(KVMFencer.class); @Inject HostDao _hostDao; @@ -78,7 +76,7 @@ public class KVMFencer extends AdapterBase implements FenceBuilder { @Override public Boolean fenceOff(VirtualMachine vm, Host host) { if (host.getHypervisorType() != HypervisorType.KVM && host.getHypervisorType() != HypervisorType.LXC) { - s_logger.warn("Don't know how to fence non kvm hosts " + host.getHypervisorType()); + logger.warn("Don't know how to fence non kvm hosts " + host.getHypervisorType()); return null; } @@ -101,10 +99,10 @@ public class KVMFencer extends AdapterBase implements FenceBuilder { try { answer = (FenceAnswer)_agentMgr.send(h.getId(), fence); } catch (AgentUnavailableException e) { - s_logger.info("Moving on to the next host because " + h.toString() + " is unavailable"); + logger.info("Moving on to the next host because " + h.toString() + " is unavailable"); continue; } catch (OperationTimedoutException e) { - s_logger.info("Moving on to the next host because " + h.toString() + " is unavailable"); + logger.info("Moving on to the next host because " + h.toString() + " is unavailable"); continue; } if (answer != null && answer.getResult()) { @@ -118,7 +116,7 @@ public class KVMFencer extends AdapterBase implements FenceBuilder { "Fencing off host " + host.getId() + " did not succeed after asking " + i + " hosts. " + "Check Agent logs for more information."); - s_logger.error("Unable to fence off " + vm.toString() + " on " + host.toString()); + logger.error("Unable to fence off " + vm.toString() + " on " + host.toString()); return false; } diff --git a/server/src/com/cloud/ha/ManagementIPSystemVMInvestigator.java b/server/src/com/cloud/ha/ManagementIPSystemVMInvestigator.java index 9f361b0de63..5835fbca4bd 100644 --- a/server/src/com/cloud/ha/ManagementIPSystemVMInvestigator.java +++ b/server/src/com/cloud/ha/ManagementIPSystemVMInvestigator.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.host.Host; import com.cloud.host.HostVO; @@ -34,7 +33,6 @@ import com.cloud.vm.VirtualMachine; @Local(value = {Investigator.class}) public class ManagementIPSystemVMInvestigator extends AbstractInvestigatorImpl { - private static final Logger s_logger = Logger.getLogger(ManagementIPSystemVMInvestigator.class); @Inject private final HostDao _hostDao = null; @@ -44,28 +42,28 @@ public class ManagementIPSystemVMInvestigator extends AbstractInvestigatorImpl { @Override public boolean isVmAlive(VirtualMachine vm, Host host) throws UnknownVM { if (!vm.getType().isUsedBySystem()) { - s_logger.debug("Not a System Vm, unable to determine state of " + vm + " returning null"); + logger.debug("Not a System Vm, unable to determine state of " + vm + " returning null"); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Testing if " + vm + " is alive"); + if (logger.isDebugEnabled()) { + logger.debug("Testing if " + vm + " is alive"); } if (vm.getHostId() == null) { - s_logger.debug("There's no host id for " + vm); + logger.debug("There's no host id for " + vm); throw new UnknownVM(); } HostVO vmHost = _hostDao.findById(vm.getHostId()); if (vmHost == null) { - s_logger.debug("Unable to retrieve the host by using id " + vm.getHostId()); + logger.debug("Unable to retrieve the host by using id " + vm.getHostId()); throw new UnknownVM(); } List nics = _networkMgr.getNicsForTraffic(vm.getId(), TrafficType.Management); if (nics.size() == 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find a management nic, cannot ping this system VM, unable to determine state of " + vm + " returning null"); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find a management nic, cannot ping this system VM, unable to determine state of " + vm + " returning null"); } throw new UnknownVM(); } @@ -81,8 +79,8 @@ public class ManagementIPSystemVMInvestigator extends AbstractInvestigatorImpl { assert vmState != null; // In case of Status.Unknown, next host will be tried if (vmState == Status.Up) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("successfully pinged vm's private IP (" + vm.getPrivateIpAddress() + "), returning that the VM is up"); + if (logger.isDebugEnabled()) { + logger.debug("successfully pinged vm's private IP (" + vm.getPrivateIpAddress() + "), returning that the VM is up"); } return Boolean.TRUE; } else if (vmState == Status.Down) { @@ -91,8 +89,8 @@ public class ManagementIPSystemVMInvestigator extends AbstractInvestigatorImpl { Status vmHostState = testIpAddress(otherHost, vmHost.getPrivateIpAddress()); assert vmHostState != null; if (vmHostState == Status.Up) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("successfully pinged vm's host IP (" + vmHost.getPrivateIpAddress() + + if (logger.isDebugEnabled()) { + logger.debug("successfully pinged vm's host IP (" + vmHost.getPrivateIpAddress() + "), but could not ping VM, returning that the VM is down"); } return Boolean.FALSE; @@ -101,8 +99,8 @@ public class ManagementIPSystemVMInvestigator extends AbstractInvestigatorImpl { } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("unable to determine state of " + vm + " returning null"); + if (logger.isDebugEnabled()) { + logger.debug("unable to determine state of " + vm + " returning null"); } throw new UnknownVM(); } diff --git a/server/src/com/cloud/ha/RecreatableFencer.java b/server/src/com/cloud/ha/RecreatableFencer.java index 4aa74385dff..fbfc1a77b06 100644 --- a/server/src/com/cloud/ha/RecreatableFencer.java +++ b/server/src/com/cloud/ha/RecreatableFencer.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -35,7 +34,6 @@ import com.cloud.vm.VirtualMachine; @Component @Local(value = FenceBuilder.class) public class RecreatableFencer extends AdapterBase implements FenceBuilder { - private static final Logger s_logger = Logger.getLogger(RecreatableFencer.class); @Inject VolumeDao _volsDao; @Inject @@ -49,22 +47,22 @@ public class RecreatableFencer extends AdapterBase implements FenceBuilder { public Boolean fenceOff(VirtualMachine vm, Host host) { VirtualMachine.Type type = vm.getType(); if (type != VirtualMachine.Type.ConsoleProxy && type != VirtualMachine.Type.DomainRouter && type != VirtualMachine.Type.SecondaryStorageVm) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Don't know how to fence off " + type); + if (logger.isDebugEnabled()) { + logger.debug("Don't know how to fence off " + type); } return null; } List vols = _volsDao.findByInstance(vm.getId()); for (VolumeVO vol : vols) { if (!vol.isRecreatable()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to fence off volumes that are not recreatable: " + vol); + if (logger.isDebugEnabled()) { + logger.debug("Unable to fence off volumes that are not recreatable: " + vol); } return null; } if (vol.getPoolType().isShared()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to fence off volumes that are shared: " + vol); + if (logger.isDebugEnabled()) { + logger.debug("Unable to fence off volumes that are shared: " + vol); } return null; } diff --git a/server/src/com/cloud/ha/UserVmDomRInvestigator.java b/server/src/com/cloud/ha/UserVmDomRInvestigator.java index 63d9bf055c1..e747c0052e8 100644 --- a/server/src/com/cloud/ha/UserVmDomRInvestigator.java +++ b/server/src/com/cloud/ha/UserVmDomRInvestigator.java @@ -22,7 +22,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -41,7 +40,6 @@ import com.cloud.vm.dao.UserVmDao; @Local(value = {Investigator.class}) public class UserVmDomRInvestigator extends AbstractInvestigatorImpl { - private static final Logger s_logger = Logger.getLogger(UserVmDomRInvestigator.class); @Inject private final UserVmDao _userVmDao = null; @@ -55,14 +53,14 @@ public class UserVmDomRInvestigator extends AbstractInvestigatorImpl { @Override public boolean isVmAlive(VirtualMachine vm, Host host) throws UnknownVM { if (vm.getType() != VirtualMachine.Type.User) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Not a User Vm, unable to determine state of " + vm + " returning null"); + if (logger.isDebugEnabled()) { + logger.debug("Not a User Vm, unable to determine state of " + vm + " returning null"); } throw new UnknownVM(); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("testing if " + vm + " is alive"); + if (logger.isDebugEnabled()) { + logger.debug("testing if " + vm + " is alive"); } // to verify that the VM is alive, we ask the domR (router) to ping the VM (private IP) UserVmVO userVm = _userVmDao.findById(vm.getId()); @@ -76,8 +74,8 @@ public class UserVmDomRInvestigator extends AbstractInvestigatorImpl { List routers = _vnaMgr.getRoutersForNetwork(nic.getNetworkId()); if (routers == null || routers.isEmpty()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find a router in network " + nic.getNetworkId() + " to ping " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find a router in network " + nic.getNetworkId() + " to ping " + vm); } continue; } @@ -97,16 +95,16 @@ public class UserVmDomRInvestigator extends AbstractInvestigatorImpl { return result; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Returning null since we're unable to determine state of " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Returning null since we're unable to determine state of " + vm); } throw new UnknownVM(); } @Override public Status isAgentAlive(Host agent) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("checking if agent (" + agent.getId() + ") is alive"); + if (logger.isDebugEnabled()) { + logger.debug("checking if agent (" + agent.getId() + ") is alive"); } if (agent.getPodId() == null) { @@ -116,29 +114,29 @@ public class UserVmDomRInvestigator extends AbstractInvestigatorImpl { List otherHosts = findHostByPod(agent.getPodId(), agent.getId()); for (Long hostId : otherHosts) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("sending ping from (" + hostId + ") to agent's host ip address (" + agent.getPrivateIpAddress() + ")"); + if (logger.isDebugEnabled()) { + logger.debug("sending ping from (" + hostId + ") to agent's host ip address (" + agent.getPrivateIpAddress() + ")"); } Status hostState = testIpAddress(hostId, agent.getPrivateIpAddress()); assert hostState != null; // In case of Status.Unknown, next host will be tried if (hostState == Status.Up) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ping from (" + hostId + ") to agent's host ip address (" + agent.getPrivateIpAddress() + + if (logger.isDebugEnabled()) { + logger.debug("ping from (" + hostId + ") to agent's host ip address (" + agent.getPrivateIpAddress() + ") successful, returning that agent is disconnected"); } return Status.Disconnected; // the computing host ip is ping-able, but the computing agent is down, report that the agent is disconnected } else if (hostState == Status.Down) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("returning host state: " + hostState); + if (logger.isDebugEnabled()) { + logger.debug("returning host state: " + hostState); } return Status.Down; } } // could not reach agent, could not reach agent's host, unclear what the problem is but it'll require more investigation... - if (s_logger.isDebugEnabled()) { - s_logger.debug("could not reach agent, could not reach agent's host, returning that we don't have enough information"); + if (logger.isDebugEnabled()) { + logger.debug("could not reach agent, could not reach agent's host, returning that we don't have enough information"); } return null; } @@ -167,21 +165,21 @@ public class UserVmDomRInvestigator extends AbstractInvestigatorImpl { try { Answer pingTestAnswer = _agentMgr.easySend(hostId, new PingTestCommand(routerPrivateIp, privateIp)); if (pingTestAnswer != null && pingTestAnswer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("user vm's " + vm.getHostName() + " ip address " + privateIp + " has been successfully pinged from the Virtual Router " + + if (logger.isDebugEnabled()) { + logger.debug("user vm's " + vm.getHostName() + " ip address " + privateIp + " has been successfully pinged from the Virtual Router " + router.getHostName() + ", returning that vm is alive"); } return Boolean.TRUE; } } catch (Exception e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Couldn't reach due to", e); + if (logger.isDebugEnabled()) { + logger.debug("Couldn't reach due to", e); } continue; } } - if (s_logger.isDebugEnabled()) { - s_logger.debug(vm + " could not be pinged, returning that it is unknown"); + if (logger.isDebugEnabled()) { + logger.debug(vm + " could not be pinged, returning that it is unknown"); } return null; diff --git a/server/src/com/cloud/ha/XenServerInvestigator.java b/server/src/com/cloud/ha/XenServerInvestigator.java index 07eb76be193..8cdb80c9c33 100644 --- a/server/src/com/cloud/ha/XenServerInvestigator.java +++ b/server/src/com/cloud/ha/XenServerInvestigator.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -38,7 +37,6 @@ import com.cloud.vm.VirtualMachine; @Local(value = Investigator.class) public class XenServerInvestigator extends AdapterBase implements Investigator { - private final static Logger s_logger = Logger.getLogger(XenServerInvestigator.class); @Inject HostDao _hostDao; @Inject @@ -65,7 +63,7 @@ public class XenServerInvestigator extends AdapterBase implements Investigator { if (answer != null && answer.getResult()) { CheckOnHostAnswer ans = (CheckOnHostAnswer)answer; if (!ans.isDetermined()) { - s_logger.debug("Host " + neighbor + " couldn't determine the status of " + agent); + logger.debug("Host " + neighbor + " couldn't determine the status of " + agent); continue; } // even it returns true, that means host is up, but XAPI may not work diff --git a/server/src/com/cloud/ha/dao/HighAvailabilityDaoImpl.java b/server/src/com/cloud/ha/dao/HighAvailabilityDaoImpl.java index 724f4f6d7c9..4a886957e11 100644 --- a/server/src/com/cloud/ha/dao/HighAvailabilityDaoImpl.java +++ b/server/src/com/cloud/ha/dao/HighAvailabilityDaoImpl.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.ha.HaWorkVO; @@ -38,7 +37,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value = {HighAvailabilityDao.class}) public class HighAvailabilityDaoImpl extends GenericDaoBase implements HighAvailabilityDao { - private static final Logger s_logger = Logger.getLogger(HighAvailabilityDaoImpl.class); private final SearchBuilder TBASearch; private final SearchBuilder PreviousInstanceSearch; diff --git a/server/src/com/cloud/hypervisor/CloudZonesStartupProcessor.java b/server/src/com/cloud/hypervisor/CloudZonesStartupProcessor.java index d407b844a7c..8f562c832c4 100644 --- a/server/src/com/cloud/hypervisor/CloudZonesStartupProcessor.java +++ b/server/src/com/cloud/hypervisor/CloudZonesStartupProcessor.java @@ -23,7 +23,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.ResourceDetail; @@ -61,7 +60,6 @@ import com.cloud.utils.net.NetUtils; @Component @Local(value = StartupCommandProcessor.class) public class CloudZonesStartupProcessor extends AdapterBase implements StartupCommandProcessor { - private static final Logger s_logger = Logger.getLogger(CloudZonesStartupProcessor.class); @Inject ClusterDao _clusterDao = null; @Inject @@ -115,8 +113,8 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo server = _hostDao.findByGuid(startup.getGuidWithoutResource()); } if (server != null && server.getRemoved() == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found the host " + server.getId() + " by guid: " + if (logger.isDebugEnabled()) { + logger.debug("Found the host " + server.getId() + " by guid: " + startup.getGuid()); } found = true; @@ -140,7 +138,7 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo "Agent cannot connect because the current state is " + server.getStatus().toString()); } - s_logger.info("Old " + server.getType().toString() + logger.info("Old " + server.getType().toString() + " host reconnected w/ id =" + server.getId()); } */ @@ -152,7 +150,7 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo String zoneToken = startup.getDataCenter(); if (zoneToken == null) { - s_logger.warn("No Zone Token passed in, cannot not find zone for the agent"); + logger.warn("No Zone Token passed in, cannot not find zone for the agent"); throw new AgentAuthnException("No Zone Token passed in, cannot not find zone for agent"); } @@ -171,8 +169,8 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo } } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully loaded the DataCenter from the zone token passed in "); + if (logger.isDebugEnabled()) { + logger.debug("Successfully loaded the DataCenter from the zone token passed in "); } long zoneId = zone.getId(); @@ -180,8 +178,8 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo if (maxHostsInZone != null) { long maxHosts = Long.parseLong(maxHostsInZone.getValue()); long currentCountOfHosts = _hostDao.countRoutingHostsByDataCenter(zoneId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Number of hosts in Zone:" + currentCountOfHosts + ", max hosts limit: " + maxHosts); + if (logger.isDebugEnabled()) { + logger.debug("Number of hosts in Zone:" + currentCountOfHosts + ", max hosts limit: " + maxHosts); } if (currentCountOfHosts >= maxHosts) { throw new AgentAuthnException("Number of running Routing hosts in the Zone:" + zone.getName() + " is already at the max limit:" + maxHosts + @@ -192,24 +190,24 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo HostPodVO pod = null; if (startup.getPrivateIpAddress() == null) { - s_logger.warn("No private IP address passed in for the agent, cannot not find pod for agent"); + logger.warn("No private IP address passed in for the agent, cannot not find pod for agent"); throw new AgentAuthnException("No private IP address passed in for the agent, cannot not find pod for agent"); } if (startup.getPrivateNetmask() == null) { - s_logger.warn("No netmask passed in for the agent, cannot not find pod for agent"); + logger.warn("No netmask passed in for the agent, cannot not find pod for agent"); throw new AgentAuthnException("No netmask passed in for the agent, cannot not find pod for agent"); } if (host.getPodId() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Pod is already created for this agent, looks like agent is reconnecting..."); + if (logger.isDebugEnabled()) { + logger.debug("Pod is already created for this agent, looks like agent is reconnecting..."); } pod = _podDao.findById(host.getPodId()); if (!checkCIDR(type, pod, startup.getPrivateIpAddress(), startup.getPrivateNetmask())) { pod = null; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Subnet of Pod does not match the subnet of the agent, not using this Pod: " + host.getPodId()); + if (logger.isDebugEnabled()) { + logger.debug("Subnet of Pod does not match the subnet of the agent, not using this Pod: " + host.getPodId()); } } else { updatePodNetmaskIfNeeded(pod, startup.getPrivateNetmask()); @@ -217,8 +215,8 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo } if (pod == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Trying to detect the Pod to use from the agent's ip address and netmask passed in "); + if (logger.isDebugEnabled()) { + logger.debug("Trying to detect the Pod to use from the agent's ip address and netmask passed in "); } //deduce pod @@ -236,12 +234,12 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo } if (!podFound) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating a new Pod since no default Pod found that matches the agent's ip address and netmask passed in "); + if (logger.isDebugEnabled()) { + logger.debug("Creating a new Pod since no default Pod found that matches the agent's ip address and netmask passed in "); } if (startup.getGatewayIpAddress() == null) { - s_logger.warn("No Gateway IP address passed in for the agent, cannot create a new pod for the agent"); + logger.warn("No Gateway IP address passed in for the agent, cannot create a new pod for the agent"); throw new AgentAuthnException("No Gateway IP address passed in for the agent, cannot create a new pod for the agent"); } //auto-create a new pod, since pod matching the agent's ip is not found @@ -266,8 +264,8 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo ClusterVO cluster = null; if (host.getClusterId() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cluster is already created for this agent, looks like agent is reconnecting..."); + if (logger.isDebugEnabled()) { + logger.debug("Cluster is already created for this agent, looks like agent is reconnecting..."); } cluster = _clusterDao.findById(host.getClusterId()); } @@ -278,8 +276,8 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo if (existingCluster != null) { cluster = existingCluster; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating a new Cluster for this agent with name: " + clusterName + " in Pod: " + pod.getId() + ", in Zone:" + zoneId); + if (logger.isDebugEnabled()) { + logger.debug("Creating a new Cluster for this agent with name: " + clusterName + " in Pod: " + pod.getId() + ", in Zone:" + zoneId); } cluster = new ClusterVO(zoneId, pod.getId(), clusterName); @@ -293,8 +291,8 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Detected Zone: " + zoneId + ", Pod: " + pod.getId() + ", Cluster:" + cluster.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Detected Zone: " + zoneId + ", Pod: " + pod.getId() + ", Cluster:" + cluster.getId()); } host.setDataCenterId(zone.getId()); host.setPodId(pod.getId()); @@ -370,8 +368,8 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo server = _hostDao.findByGuid(startup.getGuidWithoutResource()); } if (server != null && server.getRemoved() == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found the host " + server.getId() + " by guid: " + if (logger.isDebugEnabled()) { + logger.debug("Found the host " + server.getId() + " by guid: " + startup.getGuid()); } found = true; @@ -395,7 +393,7 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo "Agent cannot connect because the current state is " + server.getStatus().toString()); } - s_logger.info("Old " + server.getType().toString() + logger.info("Old " + server.getType().toString() + " host reconnected w/ id =" + server.getId()); } */ @@ -407,7 +405,7 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo String zoneToken = startup.getDataCenter(); if (zoneToken == null) { - s_logger.warn("No Zone Token passed in, cannot not find zone for the agent"); + logger.warn("No Zone Token passed in, cannot not find zone for the agent"); throw new AgentAuthnException("No Zone Token passed in, cannot not find zone for agent"); } @@ -426,14 +424,14 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo } } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully loaded the DataCenter from the zone token passed in "); + if (logger.isDebugEnabled()) { + logger.debug("Successfully loaded the DataCenter from the zone token passed in "); } HostPodVO pod = findPod(startup, zone.getId(), Host.Type.Routing); //yes, routing Long podId = null; if (pod != null) { - s_logger.debug("Found pod " + pod.getName() + " for the secondary storage host " + startup.getName()); + logger.debug("Found pod " + pod.getName() + " for the secondary storage host " + startup.getName()); podId = pod.getId(); } host.setDataCenterId(zone.getId()); diff --git a/server/src/com/cloud/hypervisor/HypervisorGuruBase.java b/server/src/com/cloud/hypervisor/HypervisorGuruBase.java index fb14dc4408a..880c4f09535 100644 --- a/server/src/com/cloud/hypervisor/HypervisorGuruBase.java +++ b/server/src/com/cloud/hypervisor/HypervisorGuruBase.java @@ -22,7 +22,6 @@ import java.util.UUID; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.api.Command; import com.cloud.agent.api.to.DiskTO; @@ -52,7 +51,6 @@ import com.cloud.vm.dao.UserVmDetailsDao; import com.cloud.vm.dao.VMInstanceDao; public abstract class HypervisorGuruBase extends AdapterBase implements HypervisorGuru { - public static final Logger s_logger = Logger.getLogger(HypervisorGuruBase.class); @Inject VMTemplateDetailsDao _templateDetailsDao; @@ -115,7 +113,7 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis } to.setNicSecIps(secIps); } else { - s_logger.warn("Unabled to load NicVO for NicProfile " + profile.getId()); + logger.warn("Unabled to load NicVO for NicProfile " + profile.getId()); //Workaround for dynamically created nics //FixMe: uuid and secondary IPs can be made part of nic profile to.setUuid(UUID.randomUUID().toString()); diff --git a/server/src/com/cloud/hypervisor/HypervisorGuruManagerImpl.java b/server/src/com/cloud/hypervisor/HypervisorGuruManagerImpl.java index ade1b9677ac..909a4f026e5 100644 --- a/server/src/com/cloud/hypervisor/HypervisorGuruManagerImpl.java +++ b/server/src/com/cloud/hypervisor/HypervisorGuruManagerImpl.java @@ -24,7 +24,6 @@ import javax.annotation.PostConstruct; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.api.Command; @@ -36,7 +35,6 @@ import com.cloud.utils.component.ManagerBase; @Component @Local(value = {HypervisorGuruManager.class}) public class HypervisorGuruManagerImpl extends ManagerBase implements HypervisorGuruManager { - public static final Logger s_logger = Logger.getLogger(HypervisorGuruManagerImpl.class.getName()); @Inject HostDao _hostDao; diff --git a/server/src/com/cloud/hypervisor/kvm/discoverer/KvmServerDiscoverer.java b/server/src/com/cloud/hypervisor/kvm/discoverer/KvmServerDiscoverer.java index cf552044396..e55c36d5755 100644 --- a/server/src/com/cloud/hypervisor/kvm/discoverer/KvmServerDiscoverer.java +++ b/server/src/com/cloud/hypervisor/kvm/discoverer/KvmServerDiscoverer.java @@ -18,14 +18,12 @@ package com.cloud.hypervisor.kvm.discoverer; import javax.ejb.Local; -import org.apache.log4j.Logger; import com.cloud.hypervisor.Hypervisor; import com.cloud.resource.Discoverer; @Local(value = Discoverer.class) public class KvmServerDiscoverer extends LibvirtServerDiscoverer { - private static final Logger s_logger = Logger.getLogger(KvmServerDiscoverer.class); @Override public Hypervisor.HypervisorType getHypervisorType() { diff --git a/server/src/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java b/server/src/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java index 48be8f28516..26162ce4332 100644 --- a/server/src/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java +++ b/server/src/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java @@ -43,7 +43,6 @@ import com.cloud.resource.ResourceStateAdapter; import com.cloud.resource.ServerResource; import com.cloud.resource.UnableDeleteHostException; import com.cloud.utils.ssh.SSHCmdHelper; -import org.apache.log4j.Logger; import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -55,7 +54,6 @@ import java.util.Map; import java.util.UUID; public abstract class LibvirtServerDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(LibvirtServerDiscoverer.class); private String _hostIp; private final int _waitTime = 5; /* wait for 5 minutes */ private String _kvmPrivateNic; @@ -119,8 +117,8 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements ClusterVO cluster = _clusterDao.findById(clusterId); if (cluster == null || cluster.getHypervisorType() != getHypervisorType()) { - if (s_logger.isInfoEnabled()) - s_logger.info("invalid cluster id or cluster is not for " + getHypervisorType() + " hypervisors"); + if (logger.isInfoEnabled()) + logger.info("invalid cluster id or cluster is not for " + getHypervisorType() + " hypervisors"); return null; } @@ -128,7 +126,7 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements Map details = new HashMap(); if (!uri.getScheme().equals("http")) { String msg = "urlString is not http so we're not taking care of the discovery for this: " + uri; - s_logger.debug(msg); + logger.debug(msg); return null; } com.trilead.ssh2.Connection sshConnection = null; @@ -144,7 +142,7 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements if (existingHosts != null) { for (HostVO existingHost : existingHosts) { if (existingHost.getGuid().toLowerCase().startsWith(guid.toLowerCase())) { - s_logger.debug("Skipping " + agentIp + " because " + guid + " is already in the database for resource " + existingHost.getGuid()); + logger.debug("Skipping " + agentIp + " because " + guid + " is already in the database for resource " + existingHost.getGuid()); return null; } } @@ -154,12 +152,12 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(username, password)) { - s_logger.debug("Failed to authenticate"); + logger.debug("Failed to authenticate"); throw new DiscoveredWithErrorException("Authentication error"); } if (!SSHCmdHelper.sshExecuteCmd(sshConnection, "lsmod|grep kvm", 3)) { - s_logger.debug("It's not a KVM enabled machine"); + logger.debug("It's not a KVM enabled machine"); return null; } @@ -211,7 +209,7 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements } if (!SSHCmdHelper.sshExecuteCmd(sshConnection, setupAgentCommand + parameters, 3)) { - s_logger.info("cloudstack agent setup command failed: " + logger.info("cloudstack agent setup command failed: " + setupAgentCommand + parameters); return null; } @@ -252,7 +250,7 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements throw e; } catch (Exception e) { String msg = " can't setup agent, due to " + e.toString() + " - " + e.getMessage(); - s_logger.warn(msg); + logger.warn(msg); } finally { if (sshConnection != null) sshConnection.close(); @@ -272,10 +270,10 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements try { Thread.sleep(30000); } catch (InterruptedException e) { - s_logger.debug("Failed to sleep: " + e.toString()); + logger.debug("Failed to sleep: " + e.toString()); } } - s_logger.debug("Timeout, to wait for the host connecting to mgt svr, assuming it is failed"); + logger.debug("Timeout, to wait for the host connecting to mgt svr, assuming it is failed"); List hosts = _resourceMgr.findHostByGuid(dcId, guid); if (hosts.size() == 1) { return hosts.get(0); @@ -344,7 +342,7 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements /* KVM requires host are the same in cluster */ ClusterVO clusterVO = _clusterDao.findById(host.getClusterId()); if (clusterVO == null) { - s_logger.debug("cannot find cluster: " + host.getClusterId()); + logger.debug("cannot find cluster: " + host.getClusterId()); throw new IllegalArgumentException("cannot add host, due to can't find cluster: " + host.getClusterId()); } @@ -382,9 +380,9 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements ShutdownCommand cmd = new ShutdownCommand(ShutdownCommand.DeleteHost, null); _agentMgr.send(host.getId(), cmd); } catch (AgentUnavailableException e) { - s_logger.warn("Sending ShutdownCommand failed: ", e); + logger.warn("Sending ShutdownCommand failed: ", e); } catch (OperationTimedoutException e) { - s_logger.warn("Sending ShutdownCommand failed: ", e); + logger.warn("Sending ShutdownCommand failed: ", e); } return new DeleteHostAnswer(true); diff --git a/server/src/com/cloud/hypervisor/kvm/discoverer/LxcServerDiscoverer.java b/server/src/com/cloud/hypervisor/kvm/discoverer/LxcServerDiscoverer.java index dff89c8932a..31d43148af4 100644 --- a/server/src/com/cloud/hypervisor/kvm/discoverer/LxcServerDiscoverer.java +++ b/server/src/com/cloud/hypervisor/kvm/discoverer/LxcServerDiscoverer.java @@ -18,14 +18,12 @@ package com.cloud.hypervisor.kvm.discoverer; import javax.ejb.Local; -import org.apache.log4j.Logger; import com.cloud.hypervisor.Hypervisor; import com.cloud.resource.Discoverer; @Local(value = Discoverer.class) public class LxcServerDiscoverer extends LibvirtServerDiscoverer { - private static final Logger s_logger = Logger.getLogger(LxcServerDiscoverer.class); @Override public Hypervisor.HypervisorType getHypervisorType() { diff --git a/server/src/com/cloud/metadata/ResourceMetaDataManagerImpl.java b/server/src/com/cloud/metadata/ResourceMetaDataManagerImpl.java index 812630ab8b0..e368cd085f4 100644 --- a/server/src/com/cloud/metadata/ResourceMetaDataManagerImpl.java +++ b/server/src/com/cloud/metadata/ResourceMetaDataManagerImpl.java @@ -45,7 +45,6 @@ import org.apache.cloudstack.resourcedetail.dao.LBStickinessPolicyDetailsDao; import org.apache.cloudstack.resourcedetail.dao.LBHealthCheckPolicyDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.dao.DataCenterDetailsDao; @@ -70,7 +69,6 @@ import com.cloud.vm.dao.UserVmDetailsDao; @Component @Local(value = {ResourceMetaDataService.class, ResourceMetaDataManager.class}) public class ResourceMetaDataManagerImpl extends ManagerBase implements ResourceMetaDataService, ResourceMetaDataManager { - public static final Logger s_logger = Logger.getLogger(ResourceMetaDataManagerImpl.class); @Inject VolumeDetailsDao _volumeDetailDao; @Inject diff --git a/server/src/com/cloud/network/ExternalDeviceUsageManagerImpl.java b/server/src/com/cloud/network/ExternalDeviceUsageManagerImpl.java index 9e53341db06..54e74dad3cf 100644 --- a/server/src/com/cloud/network/ExternalDeviceUsageManagerImpl.java +++ b/server/src/com/cloud/network/ExternalDeviceUsageManagerImpl.java @@ -31,7 +31,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.managed.context.ManagedContextRunnable; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -153,7 +152,6 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter ScheduledExecutorService _executor; private int _externalNetworkStatsInterval; - private static final org.apache.log4j.Logger s_logger = Logger.getLogger(ExternalDeviceUsageManagerImpl.class); @Override public boolean configure(String name, Map params) throws ConfigurationException { @@ -210,24 +208,24 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter LoadBalancerVO lb = _loadBalancerDao.findById(loadBalancerRuleId); if (lb == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot update usage stats, LB rule is not found"); + if (logger.isDebugEnabled()) { + logger.debug("Cannot update usage stats, LB rule is not found"); } return; } long networkId = lb.getNetworkId(); Network network = _networkDao.findById(networkId); if (network == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot update usage stats, Network is not found"); + if (logger.isDebugEnabled()) { + logger.debug("Cannot update usage stats, Network is not found"); } return; } ExternalLoadBalancerDeviceVO lbDeviceVO = getExternalLoadBalancerForNetwork(network); if (lbDeviceVO == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot update usage stats, No external LB device found"); + if (logger.isDebugEnabled()) { + logger.debug("Cannot update usage stats, No external LB device found"); } return; } @@ -241,7 +239,7 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter if (lbAnswer == null || !lbAnswer.getResult()) { String details = (lbAnswer != null) ? lbAnswer.getDetails() : "details unavailable"; String msg = "Unable to get external load balancer stats for network" + networkId + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); return; } } @@ -249,7 +247,7 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter long accountId = lb.getAccountId(); AccountVO account = _accountDao.findById(accountId); if (account == null) { - s_logger.debug("Skipping stats update for external LB for account with ID " + accountId); + logger.debug("Skipping stats update for external LB for account with ID " + accountId); return; } @@ -283,7 +281,7 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter } if (bytesSentAndReceived == null) { - s_logger.debug("Didn't get an external network usage answer for public IP " + publicIp); + logger.debug("Didn't get an external network usage answer for public IP " + publicIp); } else { newCurrentBytesSent += bytesSentAndReceived[0]; newCurrentBytesReceived += bytesSentAndReceived[1]; @@ -312,23 +310,23 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter userStats.setCurrentBytesSent(newCurrentBytesSent); if (oldCurrentBytesSent > newCurrentBytesSent) { - s_logger.warn(warning + "Stored bytes sent: " + oldCurrentBytesSent + ", new bytes sent: " + newCurrentBytesSent + "."); + logger.warn(warning + "Stored bytes sent: " + oldCurrentBytesSent + ", new bytes sent: " + newCurrentBytesSent + "."); userStats.setNetBytesSent(oldNetBytesSent + oldCurrentBytesSent); } userStats.setCurrentBytesReceived(newCurrentBytesReceived); if (oldCurrentBytesReceived > newCurrentBytesReceived) { - s_logger.warn(warning + "Stored bytes received: " + oldCurrentBytesReceived + ", new bytes received: " + newCurrentBytesReceived + "."); + logger.warn(warning + "Stored bytes received: " + oldCurrentBytesReceived + ", new bytes received: " + newCurrentBytesReceived + "."); userStats.setNetBytesReceived(oldNetBytesReceived + oldCurrentBytesReceived); } if (_userStatsDao.update(userStats.getId(), userStats)) { - s_logger.debug("Successfully updated stats for " + statsEntryIdentifier); + logger.debug("Successfully updated stats for " + statsEntryIdentifier); } else { - s_logger.debug("Failed to update stats for " + statsEntryIdentifier); + logger.debug("Failed to update stats for " + statsEntryIdentifier); } } else { - s_logger.warn("Unable to find user stats entry for " + statsEntryIdentifier); + logger.warn("Unable to find user stats entry for " + statsEntryIdentifier); } } }); @@ -352,14 +350,14 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter } } } catch (Exception e) { - s_logger.warn("Problems while getting external device usage", e); + logger.warn("Problems while getting external device usage", e); } finally { scanLock.releaseRef(); } } private void runExternalDeviceNetworkUsageTask() { - s_logger.debug("External devices stats collector is running..."); + logger.debug("External devices stats collector is running..."); for (DataCenterVO zone : _dcDao.listAll()) { List domainRoutersInZone = _routerDao.listByDataCenter(zone.getId()); @@ -374,8 +372,8 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter long accountId = domainRouter.getAccountId(); if (accountsProcessed.contains(new Long(accountId))) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Networks for Account " + accountId + " are already processed for external network usage, so skipping usage check."); + if (logger.isTraceEnabled()) { + logger.trace("Networks for Account " + accountId + " are already processed for external network usage, so skipping usage check."); } continue; } @@ -389,7 +387,7 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter for (NetworkVO network : networksForAccount) { if (!_networkModel.networkIsConfiguredForExternalNetworking(zoneId, network.getId())) { - s_logger.debug("Network " + network.getId() + " is not configured for external networking, so skipping usage check."); + logger.debug("Network " + network.getId() + " is not configured for external networking, so skipping usage check."); continue; } @@ -413,17 +411,17 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter if (firewallAnswer == null || !firewallAnswer.getResult()) { String details = (firewallAnswer != null) ? firewallAnswer.getDetails() : "details unavailable"; String msg = "Unable to get external firewall stats for network" + zone.getName() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); } else { fwDeviceUsageAnswerMap.put(fwDeviceId, firewallAnswer); } } catch (Exception e) { String msg = "Unable to get external firewall stats for network" + zone.getName(); - s_logger.error(msg, e); + logger.error(msg, e); } } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Reusing usage Answer for device id " + fwDeviceId + "for Network " + network.getId()); + if (logger.isTraceEnabled()) { + logger.trace("Reusing usage Answer for device id " + fwDeviceId + "for Network " + network.getId()); } firewallAnswer = fwDeviceUsageAnswerMap.get(fwDeviceId); } @@ -444,17 +442,17 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter if (lbAnswer == null || !lbAnswer.getResult()) { String details = (lbAnswer != null) ? lbAnswer.getDetails() : "details unavailable"; String msg = "Unable to get external load balancer stats for " + zone.getName() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); } else { lbDeviceUsageAnswerMap.put(lbDeviceId, lbAnswer); } } catch (Exception e) { String msg = "Unable to get external load balancer stats for " + zone.getName(); - s_logger.error(msg, e); + logger.error(msg, e); } } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Reusing usage Answer for device id " + lbDeviceId + "for Network " + network.getId()); + if (logger.isTraceEnabled()) { + logger.trace("Reusing usage Answer for device id " + lbDeviceId + "for Network " + network.getId()); } lbAnswer = lbDeviceUsageAnswerMap.get(lbDeviceId); } @@ -467,7 +465,7 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter AccountVO account = _accountDao.findById(accountId); if (account == null) { - s_logger.debug("Skipping stats update for account with ID " + accountId); + logger.debug("Skipping stats update for account with ID " + accountId); continue; } @@ -494,13 +492,13 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter userStats.setCurrentBytesSent(newCurrentBytesSent); if (oldCurrentBytesSent > newCurrentBytesSent) { - s_logger.warn(warning + "Stored bytes sent: " + oldCurrentBytesSent + ", new bytes sent: " + newCurrentBytesSent + "."); + logger.warn(warning + "Stored bytes sent: " + oldCurrentBytesSent + ", new bytes sent: " + newCurrentBytesSent + "."); userStats.setNetBytesSent(oldNetBytesSent + oldCurrentBytesSent); } userStats.setCurrentBytesReceived(newCurrentBytesReceived); if (oldCurrentBytesReceived > newCurrentBytesReceived) { - s_logger.warn(warning + "Stored bytes received: " + oldCurrentBytesReceived + ", new bytes received: " + newCurrentBytesReceived + "."); + logger.warn(warning + "Stored bytes received: " + oldCurrentBytesReceived + ", new bytes received: " + newCurrentBytesReceived + "."); userStats.setNetBytesReceived(oldNetBytesReceived + oldCurrentBytesReceived); } @@ -553,7 +551,7 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter } if (bytesSentAndReceived == null) { - s_logger.debug("Didn't get an external network usage answer for public IP " + publicIp); + logger.debug("Didn't get an external network usage answer for public IP " + publicIp); } else { newCurrentBytesSent += bytesSentAndReceived[0]; newCurrentBytesReceived += bytesSentAndReceived[1]; @@ -561,14 +559,14 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter } else { URI broadcastURI = network.getBroadcastUri(); if (broadcastURI == null) { - s_logger.debug("Not updating stats for guest network with ID " + network.getId() + " because the network is not implemented."); + logger.debug("Not updating stats for guest network with ID " + network.getId() + " because the network is not implemented."); return true; } else { long vlanTag = Integer.parseInt(BroadcastDomainType.getValue(broadcastURI)); long[] bytesSentAndReceived = answer.guestVlanBytes.get(String.valueOf(vlanTag)); if (bytesSentAndReceived == null) { - s_logger.warn("Didn't get an external network usage answer for guest VLAN " + vlanTag); + logger.warn("Didn't get an external network usage answer for guest VLAN " + vlanTag); } else { newCurrentBytesSent += bytesSentAndReceived[0]; newCurrentBytesReceived += bytesSentAndReceived[1]; @@ -580,15 +578,15 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter try { userStats = _userStatsDao.lock(accountId, zoneId, networkId, publicIp, hostId, host.getType().toString()); } catch (Exception e) { - s_logger.warn("Unable to find user stats entry for " + statsEntryIdentifier); + logger.warn("Unable to find user stats entry for " + statsEntryIdentifier); return false; } if (updateBytes(userStats, newCurrentBytesSent, newCurrentBytesReceived)) { - s_logger.debug("Successfully updated stats for " + statsEntryIdentifier); + logger.debug("Successfully updated stats for " + statsEntryIdentifier); return true; } else { - s_logger.debug("Failed to update stats for " + statsEntryIdentifier); + logger.debug("Failed to update stats for " + statsEntryIdentifier); return false; } } @@ -676,7 +674,7 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter }); return true; } catch (Exception e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); return false; } } diff --git a/server/src/com/cloud/network/ExternalFirewallDeviceManagerImpl.java b/server/src/com/cloud/network/ExternalFirewallDeviceManagerImpl.java index f0d862257aa..1d426e368cb 100644 --- a/server/src/com/cloud/network/ExternalFirewallDeviceManagerImpl.java +++ b/server/src/com/cloud/network/ExternalFirewallDeviceManagerImpl.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.api.response.ExternalFirewallResponse; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -174,7 +173,6 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl @Inject FirewallRulesDao _fwRulesDao; - private static final org.apache.log4j.Logger s_logger = Logger.getLogger(ExternalFirewallDeviceManagerImpl.class); private long _defaultFwCapacity; @Override @@ -219,7 +217,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl try { uri = new URI(url); } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); throw new InvalidParameterValueException(e.getMessage()); } @@ -302,7 +300,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl _externalFirewallDeviceDao.remove(fwDeviceId); return true; } catch (Exception e) { - s_logger.debug("Failed to delete external firewall device due to " + e.getMessage()); + logger.debug("Failed to delete external firewall device due to " + e.getMessage()); return false; } } @@ -388,7 +386,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl _networkExternalFirewallDao.remove(fwDeviceForNetwork.getId()); } } catch (Exception exception) { - s_logger.error("Failed to release firewall device for the network" + network.getId() + " due to " + exception.getMessage()); + logger.error("Failed to release firewall device for the network" + network.getId() + " due to " + exception.getMessage()); return false; } finally { deviceMapLock.unlock(); @@ -423,7 +421,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl @Override public boolean manageGuestNetworkWithExternalFirewall(boolean add, Network network) throws ResourceUnavailableException, InsufficientCapacityException { if (network.getTrafficType() != TrafficType.Guest) { - s_logger.trace("External firewall can only be used for add/remove guest networks."); + logger.trace("External firewall can only be used for add/remove guest networks."); return false; } @@ -453,7 +451,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl } else { ExternalFirewallDeviceVO fwDeviceVO = getExternalFirewallForNetwork(network); if (fwDeviceVO == null) { - s_logger.warn("Network shutdown requested on external firewall element, which did not implement the network." + logger.warn("Network shutdown requested on external firewall element, which did not implement the network." + " Either network implement failed half way through or already network shutdown is completed."); return true; } @@ -478,7 +476,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl } if (sourceNatIp == null) { String errorMsg = "External firewall was unable to find the source NAT IP address for network " + network.getName(); - s_logger.error(errorMsg); + logger.error(errorMsg); return true; } } @@ -515,10 +513,10 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl String answerDetails = (answer != null) ? answer.getDetails() : "answer was null"; String msg = "External firewall was unable to " + action + " the guest network on the external firewall in zone " + zone.getName() + " due to " + answerDetails; - s_logger.error(msg); + logger.error(msg); if (!add && (!reservedIpAddressesForGuestNetwork.contains(network.getGateway()))) { // If we failed the implementation as well, then just return, no complain - s_logger.error("Skip the shutdown of guest network on SRX because it seems we didn't implement it as well"); + logger.error("Skip the shutdown of guest network on SRX because it seems we didn't implement it as well"); return true; } throw new ResourceUnavailableException(msg, DataCenter.class, zoneId); @@ -545,7 +543,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl List nics = _nicDao.listByNetworkId(network.getId()); for (NicVO nic : nics) { if (nic.getVmType() == null && nic.getReservationStrategy().equals(ReservationStrategy.PlaceHolder) && nic.getIPv4Address().equals(network.getGateway())) { - s_logger.debug("Removing placeholder nic " + nic + " for the network " + network); + logger.debug("Removing placeholder nic " + nic + " for the network " + network); _nicDao.remove(nic.getId()); } } @@ -553,7 +551,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl } String action = add ? "implemented" : "shut down"; - s_logger.debug("External firewall has " + action + " the guest network for account " + account.getAccountName() + "(id = " + account.getAccountId() + + logger.debug("External firewall has " + action + " the guest network for account " + account.getAccountName() + "(id = " + account.getAccountId() + ") with VLAN tag " + guestVlanTag); return true; @@ -574,7 +572,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl assert (externalFirewall != null); if (network.getState() == Network.State.Allocated) { - s_logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + + logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + "; this network is not implemented. Skipping backend commands."); return true; } @@ -617,7 +615,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl assert (externalFirewall != null); if (network.getState() == Network.State.Allocated) { - s_logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + + logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + "; this network is not implemented. Skipping backend commands."); return true; } @@ -645,7 +643,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "External firewall was unable to apply static nat rules to the SRX appliance in zone " + zone.getName() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, zone.getId()); } } @@ -658,7 +656,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "External firewall was unable to apply static nat rules to the SRX appliance in zone " + zone.getName() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, zone.getId()); } } @@ -671,7 +669,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "External firewall was unable to apply port forwarding rules to the SRX appliance in zone " + zone.getName() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, zone.getId()); } } @@ -713,7 +711,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "External firewall was unable to create a remote access VPN in zone " + zone.getName() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, zone.getId()); } @@ -749,7 +747,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl String details = (answer != null) ? answer.getDetails() : "details unavailable"; DataCenterVO zone = _dcDao.findById(network.getDataCenterId()); String msg = "External firewall was unable to add remote access users in zone " + zone.getName() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, zone.getId()); } @@ -822,7 +820,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl assert (externalFirewall != null); if (network.getState() == Network.State.Allocated) { - s_logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + + logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + "; this network is not implemented. Skipping backend commands."); return true; } diff --git a/server/src/com/cloud/network/ExternalIpAddressAllocator.java b/server/src/com/cloud/network/ExternalIpAddressAllocator.java index 58b30f4d57b..7dc4f90a8bf 100644 --- a/server/src/com/cloud/network/ExternalIpAddressAllocator.java +++ b/server/src/com/cloud/network/ExternalIpAddressAllocator.java @@ -30,7 +30,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -41,7 +40,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Local(value = IpAddrAllocator.class) public class ExternalIpAddressAllocator extends AdapterBase implements IpAddrAllocator { - private static final Logger s_logger = Logger.getLogger(ExternalIpAddressAllocator.class); @Inject ConfigurationDao _configDao = null; @Inject @@ -57,7 +55,7 @@ public class ExternalIpAddressAllocator extends AdapterBase implements IpAddrAll return new IpAddr(); } String urlString = _externalIpAllocatorUrl + "?command=getIpAddr&mac=" + macAddr + "&dc=" + dcId + "&pod=" + podId; - s_logger.debug("getIP:" + urlString); + logger.debug("getIP:" + urlString); BufferedReader in = null; try { @@ -68,10 +66,10 @@ public class ExternalIpAddressAllocator extends AdapterBase implements IpAddrAll in = new BufferedReader(new InputStreamReader(conn.getInputStream())); String inputLine; while ((inputLine = in.readLine()) != null) { - s_logger.debug(inputLine); + logger.debug(inputLine); String[] tokens = inputLine.split(","); if (tokens.length != 3) { - s_logger.debug("the return value should be: mac,netmask,gateway"); + logger.debug("the return value should be: mac,netmask,gateway"); return new IpAddr(); } return new IpAddr(tokens[0], tokens[1], tokens[2]); @@ -103,7 +101,7 @@ public class ExternalIpAddressAllocator extends AdapterBase implements IpAddrAll String urlString = _externalIpAllocatorUrl + "?command=releaseIpAddr&ip=" + ip + "&dc=" + dcId + "&pod=" + podId; - s_logger.debug("releaseIP:" + urlString); + logger.debug("releaseIP:" + urlString); BufferedReader in = null; try { URL url = new URL(urlString); diff --git a/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java b/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java index d7ee2b67737..f94190b35a0 100644 --- a/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java +++ b/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.api.response.ExternalLoadBalancerResponse; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -193,7 +192,6 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase IpAddressManager _ipAddrMgr; private long _defaultLbCapacity; - private static final org.apache.log4j.Logger s_logger = Logger.getLogger(ExternalLoadBalancerDeviceManagerImpl.class); @Override @DB @@ -240,7 +238,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase try { uri = new URI(url); } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); throw new InvalidParameterValueException(e.getMessage()); } @@ -344,7 +342,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase return true; } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); return false; } } @@ -456,7 +454,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase if (tryLbProvisioning) { retry = false; // TODO: throwing warning instead of error for now as its possible another provider can service this network - s_logger.warn("There are no load balancer device with the capacity for implementing this network"); + logger.warn("There are no load balancer device with the capacity for implementing this network"); throw exception; } else { tryLbProvisioning = true; // if possible provision a LB appliance in to the physical network @@ -495,11 +493,11 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase try { createLbAnswer = (CreateLoadBalancerApplianceAnswer)_agentMgr.easySend(lbProviderDevice.getHostId(), lbProvisionCmd); if (createLbAnswer == null || !createLbAnswer.getResult()) { - s_logger.error("Could not provision load balancer instance on the load balancer device " + lbProviderDevice.getId()); + logger.error("Could not provision load balancer instance on the load balancer device " + lbProviderDevice.getId()); continue; } } catch (Exception agentException) { - s_logger.error("Could not provision load balancer instance on the load balancer device " + lbProviderDevice.getId() + " due to " + + logger.error("Could not provision load balancer instance on the load balancer device " + lbProviderDevice.getId() + " due to " + agentException.getMessage()); continue; } @@ -524,7 +522,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase try { publicIPVlanTag = BroadcastDomainType.getValue(publicIp.getVlanTag()); } catch (URISyntaxException e) { - s_logger.error("Failed to parse public ip vlan tag" + e.getMessage()); + logger.error("Failed to parse public ip vlan tag" + e.getMessage()); } String url = @@ -537,7 +535,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase addExternalLoadBalancer(physicalNetworkId, url, username, password, createLbAnswer.getDeviceName(), createLbAnswer.getServerResource(), false, false, null, null); } catch (Exception e) { - s_logger.error("Failed to add load balancer appliance in to cloudstack due to " + e.getMessage() + + logger.error("Failed to add load balancer appliance in to cloudstack due to " + e.getMessage() + ". So provisioned load balancer appliance will be destroyed."); } @@ -554,14 +552,14 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase try { answer = (DestroyLoadBalancerApplianceAnswer)_agentMgr.easySend(lbProviderDevice.getHostId(), lbDeleteCmd); if (answer == null || !answer.getResult()) { - s_logger.warn("Failed to destroy load balancer appliance created"); + logger.warn("Failed to destroy load balancer appliance created"); } else { // release the public & private IP back to dc pool, as the load balancer appliance is now destroyed _dcDao.releasePrivateIpAddress(lbIP, guestConfig.getDataCenterId(), null); _ipAddrMgr.disassociatePublicIpAddress(publicIp.getId(), _accountMgr.getSystemUser().getId(), _accountMgr.getSystemAccount()); } } catch (Exception e) { - s_logger.warn("Failed to destroy load balancer appliance created for the network" + guestConfig.getId() + " due to " + e.getMessage()); + logger.warn("Failed to destroy load balancer appliance created for the network" + guestConfig.getId() + " due to " + e.getMessage()); } } } @@ -696,16 +694,16 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase try { answer = (DestroyLoadBalancerApplianceAnswer)_agentMgr.easySend(lbDevice.getParentHostId(), lbDeleteCmd); if (answer == null || !answer.getResult()) { - s_logger.warn("Failed to destoy load balancer appliance used by the network" + logger.warn("Failed to destoy load balancer appliance used by the network" + guestConfig.getId() + " due to " + answer == null ? "communication error with agent" : answer.getDetails()); } } catch (Exception e) { - s_logger.warn("Failed to destroy load balancer appliance used by the network" + guestConfig.getId() + " due to " + e.getMessage()); + logger.warn("Failed to destroy load balancer appliance used by the network" + guestConfig.getId() + " due to " + e.getMessage()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully destroyed load balancer appliance used for the network" + guestConfig.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully destroyed load balancer appliance used for the network" + guestConfig.getId()); } deviceMapLock.unlock(); @@ -725,11 +723,11 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase return true; } else { - s_logger.error("Failed to release load balancer device for the network" + guestConfig.getId() + "as failed to acquire lock "); + logger.error("Failed to release load balancer device for the network" + guestConfig.getId() + "as failed to acquire lock "); return false; } } catch (Exception exception) { - s_logger.error("Failed to release load balancer device for the network" + guestConfig.getId() + " due to " + exception.getMessage()); + logger.error("Failed to release load balancer device for the network" + guestConfig.getId() + " due to " + exception.getMessage()); } finally { deviceMapLock.releaseRef(); } @@ -795,7 +793,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase loadBalancingIpAddress = directIp.getAddress().addr(); } catch (InsufficientCapacityException capException) { String msg = "Ran out of guest IP addresses from the shared network."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, network.getDataCenterId()); } } @@ -803,7 +801,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase if (loadBalancingIpAddress == null) { String msg = "Ran out of guest IP addresses."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, network.getDataCenterId()); } @@ -828,7 +826,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase throw ex; } - s_logger.debug("Created static nat rule for inline load balancer"); + logger.debug("Created static nat rule for inline load balancer"); nic.setState(MappingState.Create); } else { loadBalancingIpNic = _nicDao.findById(mapping.getNicId()); @@ -850,11 +848,11 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase // Delete the NIC _nicDao.expunge(loadBalancingIpNic.getId()); - s_logger.debug("Revoked static nat rule for inline load balancer"); + logger.debug("Revoked static nat rule for inline load balancer"); nic.setState(MappingState.Remove); } } else { - s_logger.debug("Revoking a rule for an inline load balancer that has not been programmed yet."); + logger.debug("Revoking a rule for an inline load balancer that has not been programmed yet."); nic.setNic(null); return nic; } @@ -876,7 +874,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase ExternalLoadBalancerDeviceVO lbDeviceVO = getExternalLoadBalancerForNetwork(network); if (lbDeviceVO == null) { - s_logger.warn("There is no external load balancer device assigned to this network either network is not implement are already shutdown so just returning"); + logger.warn("There is no external load balancer device assigned to this network either network is not implement are already shutdown so just returning"); return true; } @@ -885,7 +883,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase boolean externalLoadBalancerIsInline = _networkMgr.isNetworkInlineMode(network); if (network.getState() == Network.State.Allocated) { - s_logger.debug("External load balancer was asked to apply LB rules for network with ID " + network.getId() + + logger.debug("External load balancer was asked to apply LB rules for network with ID " + network.getId() + "; this network is not implemented. Skipping backend commands."); return true; } @@ -939,13 +937,13 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "Unable to apply load balancer rules to the external load balancer appliance in zone " + zone.getName() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, network.getDataCenterId()); } } } catch (Exception ex) { if (externalLoadBalancerIsInline) { - s_logger.error("Rollbacking static nat operation of inline mode load balancing due to error on applying LB rules!"); + logger.error("Rollbacking static nat operation of inline mode load balancing due to error on applying LB rules!"); String existedGuestIp = loadBalancersToApply.get(0).getSrcIp(); // Rollback static NAT operation in current session for (int i = 0; i < loadBalancingRules.size(); i++) { @@ -972,7 +970,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase @Override public boolean manageGuestNetworkWithExternalLoadBalancer(boolean add, Network guestConfig) throws ResourceUnavailableException, InsufficientCapacityException { if (guestConfig.getTrafficType() != TrafficType.Guest) { - s_logger.trace("External load balancer can only be used for guest networks."); + logger.trace("External load balancer can only be used for guest networks."); return false; } @@ -989,17 +987,17 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase lbDeviceVO = allocateLoadBalancerForNetwork(guestConfig); if (lbDeviceVO == null) { String msg = "failed to alloacate a external load balancer for the network " + guestConfig.getId(); - s_logger.error(msg); + logger.error(msg); throw new InsufficientNetworkCapacityException(msg, DataCenter.class, guestConfig.getDataCenterId()); } } externalLoadBalancer = _hostDao.findById(lbDeviceVO.getHostId()); - s_logger.debug("Allocated external load balancer device:" + lbDeviceVO.getId() + " for the network: " + guestConfig.getId()); + logger.debug("Allocated external load balancer device:" + lbDeviceVO.getId() + " for the network: " + guestConfig.getId()); } else { // find the load balancer device allocated for the network ExternalLoadBalancerDeviceVO lbDeviceVO = getExternalLoadBalancerForNetwork(guestConfig); if (lbDeviceVO == null) { - s_logger.warn("Network shutdwon requested on external load balancer element, which did not implement the network." + logger.warn("Network shutdwon requested on external load balancer element, which did not implement the network." + " Either network implement failed half way through or already network shutdown is completed. So just returning."); return true; } @@ -1025,14 +1023,14 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase selfIp = _ipAddrMgr.acquireGuestIpAddress(guestConfig, null); if (selfIp == null) { String msg = "failed to acquire guest IP address so not implementing the network on the external load balancer "; - s_logger.error(msg); + logger.error(msg); throw new InsufficientNetworkCapacityException(msg, Network.class, guestConfig.getId()); } } else { // get the self-ip used by the load balancer Nic selfipNic = getPlaceholderNic(guestConfig); if (selfipNic == null) { - s_logger.warn("Network shutdwon requested on external load balancer element, which did not implement the network." + logger.warn("Network shutdwon requested on external load balancer element, which did not implement the network." + " Either network implement failed half way through or already network shutdown is completed. So just returning."); return true; } @@ -1053,7 +1051,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase String answerDetails = (answer != null) ? answer.getDetails() : null; answerDetails = (answerDetails != null) ? " due to " + answerDetails : ""; String msg = "External load balancer was unable to " + action + " the guest network on the external load balancer in zone " + zone.getName() + answerDetails; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, Network.class, guestConfig.getId()); } @@ -1069,14 +1067,14 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase boolean releasedLB = freeLoadBalancerForNetwork(guestConfig); if (!releasedLB) { String msg = "Failed to release the external load balancer used for the network: " + guestConfig.getId(); - s_logger.error(msg); + logger.error(msg); } } - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { Account account = _accountDao.findByIdIncludingRemoved(guestConfig.getAccountId()); String action = add ? "implemented" : "shut down"; - s_logger.debug("External load balancer has " + action + " the guest network for account " + account.getAccountName() + "(id = " + account.getAccountId() + + logger.debug("External load balancer has " + action + " the guest network for account " + account.getAccountName() + "(id = " + account.getAccountId() + ") with VLAN tag " + guestVlanTag); } @@ -1129,20 +1127,20 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase List providers = _networkMgr.getProvidersForServiceInNetwork(network, Service.Firewall); //Only support one provider now if (providers == null) { - s_logger.error("Cannot find firewall provider for network " + network.getId()); + logger.error("Cannot find firewall provider for network " + network.getId()); return null; } if (providers.size() != 1) { - s_logger.error("Found " + providers.size() + " firewall provider for network " + network.getId()); + logger.error("Found " + providers.size() + " firewall provider for network " + network.getId()); return null; } NetworkElement element = _networkModel.getElementImplementingProvider(providers.get(0).getName()); if (!(element instanceof IpDeployer)) { - s_logger.error("The firewall provider for network " + network.getName() + " don't have ability to deploy IP address!"); + logger.error("The firewall provider for network " + network.getName() + " don't have ability to deploy IP address!"); return null; } - s_logger.info("Let " + element.getName() + " handle ip association for " + getName() + " in network " + network.getId()); + logger.info("Let " + element.getName() + " handle ip association for " + getName() + " in network " + network.getId()); return (IpDeployer)element; } @@ -1159,7 +1157,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase ExternalLoadBalancerDeviceVO lbDeviceVO = getExternalLoadBalancerForNetwork(network); if (lbDeviceVO == null) { - s_logger.warn("There is no external load balancer device assigned to this network either network is not implement are already shutdown so just returning"); + logger.warn("There is no external load balancer device assigned to this network either network is not implement are already shutdown so just returning"); return null; } @@ -1168,7 +1166,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase boolean externalLoadBalancerIsInline = _networkMgr.isNetworkInlineMode(network); if (network.getState() == Network.State.Allocated) { - s_logger.debug("External load balancer was asked to apply LB rules for network with ID " + network.getId() + + logger.debug("External load balancer was asked to apply LB rules for network with ID " + network.getId() + "; this network is not implemented. Skipping backend commands."); return null; } @@ -1223,7 +1221,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase return answer == null ? null : answer.getLoadBalancers(); } } catch (Exception ex) { - s_logger.error("Exception Occured ", ex); + logger.error("Exception Occured ", ex); } //null return is handled by clients return null; diff --git a/server/src/com/cloud/network/ExternalNetworkDeviceManagerImpl.java b/server/src/com/cloud/network/ExternalNetworkDeviceManagerImpl.java index bf9327f6993..99e3e27f82c 100644 --- a/server/src/com/cloud/network/ExternalNetworkDeviceManagerImpl.java +++ b/server/src/com/cloud/network/ExternalNetworkDeviceManagerImpl.java @@ -26,7 +26,6 @@ import java.util.concurrent.ScheduledExecutorService; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.ApiConstants; @@ -123,7 +122,6 @@ public class ExternalNetworkDeviceManagerImpl extends ManagerBase implements Ext // obsolete // private final static IdentityService _identityService = (IdentityService)ComponentLocator.getLocator(ManagementServer.Name).getManager(IdentityService.class); - private static final org.apache.log4j.Logger s_logger = Logger.getLogger(ExternalNetworkDeviceManagerImpl.class); @Override public Host addNetworkDevice(AddNetworkDeviceCmd cmd) { @@ -149,7 +147,7 @@ public class ExternalNetworkDeviceManagerImpl extends ManagerBase implements Ext // if (devs.size() == 1) { // res.add(devs.get(0)); // } else { -// s_logger.debug("List " + type + ": " + devs.size() + " found"); +// logger.debug("List " + type + ": " + devs.size() + " found"); // } // } else { // List devs = _hostDao.listBy(type, zoneId); diff --git a/server/src/com/cloud/network/IpAddressManagerImpl.java b/server/src/com/cloud/network/IpAddressManagerImpl.java index 28df9712553..5113b7fcbbb 100644 --- a/server/src/com/cloud/network/IpAddressManagerImpl.java +++ b/server/src/com/cloud/network/IpAddressManagerImpl.java @@ -40,7 +40,6 @@ import org.apache.cloudstack.region.PortableIp; import org.apache.cloudstack.region.PortableIpDao; import org.apache.cloudstack.region.PortableIpVO; import org.apache.cloudstack.region.Region; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.alert.AlertManager; @@ -168,7 +167,6 @@ import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; public class IpAddressManagerImpl extends ManagerBase implements IpAddressManager, Configurable { - private static final Logger s_logger = Logger.getLogger(IpAddressManagerImpl.class); @Inject NetworkOrchestrationService _networkMgr = null; @@ -399,7 +397,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage Network.State.getStateMachine().registerListener(new NetworkStateListener(_usageEventDao, _networksDao, _configDao)); - s_logger.info("Network Manager is configured."); + logger.info("Network Manager is configured."); return true; } @@ -451,14 +449,14 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage if (postApplyRules) { if (revokeCount != null && revokeCount.longValue() == totalCount.longValue()) { - s_logger.trace("All rules are in Revoke state, have to dis-assiciate IP from the backend"); + logger.trace("All rules are in Revoke state, have to dis-assiciate IP from the backend"); return true; } } else { if (activeCount != null && activeCount > 0) { continue; } else if (addCount != null && addCount.longValue() == totalCount.longValue()) { - s_logger.trace("All rules are in Add state, have to assiciate IP with the backend"); + logger.trace("All rules are in Add state, have to assiciate IP with the backend"); return true; } else { continue; @@ -475,7 +473,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage public boolean applyRules(List rules, FirewallRule.Purpose purpose, NetworkRuleApplier applier, boolean continueOnError) throws ResourceUnavailableException { if (rules == null || rules.size() == 0) { - s_logger.debug("There are no rules to forward to the network elements"); + logger.debug("There are no rules to forward to the network elements"); return true; } @@ -506,7 +504,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage if (!continueOnError) { throw e; } - s_logger.warn("Problems with applying " + purpose + " rules but pushing on", e); + logger.warn("Problems with applying " + purpose + " rules but pushing on", e); success = false; } @@ -524,31 +522,31 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage // Revoke all firewall rules for the ip try { - s_logger.debug("Revoking all " + Purpose.Firewall + "rules as a part of public IP id=" + ipId + " release..."); + logger.debug("Revoking all " + Purpose.Firewall + "rules as a part of public IP id=" + ipId + " release..."); if (!_firewallMgr.revokeFirewallRulesForIp(ipId, userId, caller)) { - s_logger.warn("Unable to revoke all the firewall rules for ip id=" + ipId + " as a part of ip release"); + logger.warn("Unable to revoke all the firewall rules for ip id=" + ipId + " as a part of ip release"); success = false; } } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to revoke all firewall rules for ip id=" + ipId + " as a part of ip release", e); + logger.warn("Unable to revoke all firewall rules for ip id=" + ipId + " as a part of ip release", e); success = false; } // Revoke all PF/Static nat rules for the ip try { - s_logger.debug("Revoking all " + Purpose.PortForwarding + "/" + Purpose.StaticNat + " rules as a part of public IP id=" + ipId + " release..."); + logger.debug("Revoking all " + Purpose.PortForwarding + "/" + Purpose.StaticNat + " rules as a part of public IP id=" + ipId + " release..."); if (!_rulesMgr.revokeAllPFAndStaticNatRulesForIp(ipId, userId, caller)) { - s_logger.warn("Unable to revoke all the port forwarding rules for ip id=" + ipId + " as a part of ip release"); + logger.warn("Unable to revoke all the port forwarding rules for ip id=" + ipId + " as a part of ip release"); success = false; } } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to revoke all the port forwarding rules for ip id=" + ipId + " as a part of ip release", e); + logger.warn("Unable to revoke all the port forwarding rules for ip id=" + ipId + " as a part of ip release", e); success = false; } - s_logger.debug("Revoking all " + Purpose.LoadBalancing + " rules as a part of public IP id=" + ipId + " release..."); + logger.debug("Revoking all " + Purpose.LoadBalancing + " rules as a part of public IP id=" + ipId + " release..."); if (!_lbMgr.removeAllLoadBalanacersForIp(ipId, caller, userId)) { - s_logger.warn("Unable to revoke all the load balancer rules for ip id=" + ipId + " as a part of ip release"); + logger.warn("Unable to revoke all the load balancer rules for ip id=" + ipId + " as a part of ip release"); success = false; } @@ -556,11 +554,11 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage // conditions // only when ip address failed to be cleaned up as a part of account destroy and was marked as Releasing, this part of // the code would be triggered - s_logger.debug("Cleaning up remote access vpns as a part of public IP id=" + ipId + " release..."); + logger.debug("Cleaning up remote access vpns as a part of public IP id=" + ipId + " release..."); try { _vpnMgr.destroyRemoteAccessVpnForIp(ipId, caller); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to destroy remote access vpn for ip id=" + ipId + " as a part of ip release", e); + logger.warn("Unable to destroy remote access vpn for ip id=" + ipId + " as a part of ip release", e); success = false; } @@ -575,7 +573,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage // Cleanup all ip address resources - PF/LB/Static nat rules if (!cleanupIpResources(addrId, userId, caller)) { success = false; - s_logger.warn("Failed to release resources for ip address id=" + addrId); + logger.warn("Failed to release resources for ip address id=" + addrId); } IPAddressVO ip = markIpAsUnavailable(addrId); @@ -583,15 +581,15 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage return true; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing ip id=" + addrId + "; sourceNat = " + ip.isSourceNat()); + if (logger.isDebugEnabled()) { + logger.debug("Releasing ip id=" + addrId + "; sourceNat = " + ip.isSourceNat()); } if (ip.getAssociatedWithNetworkId() != null) { Network network = _networksDao.findById(ip.getAssociatedWithNetworkId()); try { if (!applyIpAssociations(network, true)) { - s_logger.warn("Unable to apply ip address associations for " + network); + logger.warn("Unable to apply ip address associations for " + network); success = false; } } catch (ResourceUnavailableException e) { @@ -607,7 +605,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage if (ip.isPortable()) { releasePortableIpAddress(addrId); } - s_logger.debug("Released a public ip id=" + addrId); + logger.debug("Released a public ip id=" + addrId); } return success; @@ -703,7 +701,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage ex.addProxyObject(ApiDBUtils.findPodById(podId).getUuid()); throw ex; } - s_logger.warn(errorMessage.toString()); + logger.warn(errorMessage.toString()); InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException("Insufficient address capacity", DataCenter.class, dcId); ex.addProxyObject(ApiDBUtils.findZoneById(dcId).getUuid()); throw ex; @@ -748,7 +746,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage ex.addProxyObject(ApiDBUtils.findPodById(podId).getUuid()); throw ex; } - s_logger.warn(errorMessage.toString()); + logger.warn(errorMessage.toString()); InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException("Insufficient address capacity", DataCenter.class, dcId); ex.addProxyObject(ApiDBUtils.findZoneById(dcId).getUuid()); throw ex; @@ -761,7 +759,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage try { _resourceLimitMgr.checkResourceLimit(owner, ResourceType.public_ip); } catch (ResourceAllocationException ex) { - s_logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + owner); + logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + owner); throw new AccountLimitException("Maximum number of public IP addresses for account: " + owner.getAccountName() + " has been exceeded."); } } @@ -879,8 +877,8 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage ConcurrentOperationException ex = new ConcurrentOperationException("Unable to lock account"); throw ex; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("lock account " + ownerId + " is acquired"); + if (logger.isDebugEnabled()) { + logger.debug("lock account " + ownerId + " is acquired"); } boolean displayIp = true; if (guestNtwkId != null) { @@ -904,14 +902,14 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage return ip; } finally { if (owner != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing lock account " + ownerId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing lock account " + ownerId); } _accountDao.releaseFromLockTable(ownerId); } if (ip == null) { - s_logger.error("Unable to get source nat ip address for account " + ownerId); + logger.error("Unable to get source nat ip address for account " + ownerId); } } } @@ -934,7 +932,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage _ipAddressDao.unassignIpAddress(addr.getId()); } else { success = false; - s_logger.warn("Failed to release resources for ip address id=" + addr.getId()); + logger.warn("Failed to release resources for ip address id=" + addr.getId()); } } } @@ -981,7 +979,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage if (!continueOnError) { throw e; } else { - s_logger.debug("Resource is not available: " + provider.getName(), e); + logger.debug("Resource is not available: " + provider.getName(), e); } } } @@ -1009,17 +1007,17 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage Account accountToLock = null; try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); } accountToLock = _accountDao.acquireInLockTable(ipOwner.getId()); if (accountToLock == null) { - s_logger.warn("Unable to lock account: " + ipOwner.getId()); + logger.warn("Unable to lock account: " + ipOwner.getId()); throw new ConcurrentOperationException("Unable to acquire account lock"); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Associate IP address lock acquired"); + if (logger.isDebugEnabled()) { + logger.debug("Associate IP address lock acquired"); } ip = Transaction.execute(new TransactionCallbackWithException() { @@ -1036,7 +1034,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage CallContext.current().setEventDetails("Ip Id: " + ip.getId()); Ip ipAddress = ip.getAddress(); - s_logger.debug("Got " + ipAddress + " to assign for account " + ipOwner.getId() + " in zone " + zone.getId()); + logger.debug("Got " + ipAddress + " to assign for account " + ipOwner.getId() + " in zone " + zone.getId()); return ip; } @@ -1044,11 +1042,11 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage } finally { if (accountToLock != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing lock account " + ipOwner); + if (logger.isDebugEnabled()) { + logger.debug("Releasing lock account " + ipOwner); } _accountDao.releaseFromLockTable(ipOwner.getId()); - s_logger.debug("Associate IP address lock released"); + logger.debug("Associate IP address lock released"); } } return ip; @@ -1177,12 +1175,12 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage } owner = _accountMgr.getAccount(ipToAssoc.getAllocatedToAccountId()); } else { - s_logger.debug("Unable to find ip address by id: " + ipId); + logger.debug("Unable to find ip address by id: " + ipId); return null; } if (ipToAssoc.getAssociatedWithNetworkId() != null) { - s_logger.debug("IP " + ipToAssoc + " is already assocaited with network id" + networkId); + logger.debug("IP " + ipToAssoc + " is already assocaited with network id" + networkId); return ipToAssoc; } @@ -1190,7 +1188,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage if (network != null) { _accountMgr.checkAccess(owner, AccessType.UseEntry, false, network); } else { - s_logger.debug("Unable to find ip address by id: " + ipId); + logger.debug("Unable to find ip address by id: " + ipId); return null; } @@ -1237,7 +1235,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage } } - s_logger.debug("Associating ip " + ipToAssoc + " to network " + network); + logger.debug("Associating ip " + ipToAssoc + " to network " + network); IPAddressVO ip = _ipAddressDao.findById(ipId); //update ip address with networkId @@ -1249,16 +1247,16 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage try { success = applyIpAssociations(network, false); if (success) { - s_logger.debug("Successfully associated ip address " + ip.getAddress().addr() + " to network " + network); + logger.debug("Successfully associated ip address " + ip.getAddress().addr() + " to network " + network); } else { - s_logger.warn("Failed to associate ip address " + ip.getAddress().addr() + " to network " + network); + logger.warn("Failed to associate ip address " + ip.getAddress().addr() + " to network " + network); } return ip; } finally { if (!success && releaseOnFailure) { if (ip != null) { try { - s_logger.warn("Failed to associate ip address, so releasing ip from the database " + ip); + logger.warn("Failed to associate ip address, so releasing ip from the database " + ip); _ipAddressDao.markAsUnavailable(ip.getId()); if (!applyIpAssociations(network, true)) { // if fail to apply ip assciations again, unassign ip address without updating resource @@ -1266,7 +1264,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage _ipAddressDao.unassignIpAddress(ip.getId()); } } catch (Exception e) { - s_logger.warn("Unable to disassociate ip address for recovery", e); + logger.warn("Unable to disassociate ip address for recovery", e); } } } @@ -1328,7 +1326,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage } owner = _accountMgr.getAccount(ipToAssoc.getAllocatedToAccountId()); } else { - s_logger.debug("Unable to find ip address by id: " + ipId); + logger.debug("Unable to find ip address by id: " + ipId); return null; } @@ -1363,9 +1361,9 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage try { boolean success = applyIpAssociations(network, false); if (success) { - s_logger.debug("Successfully associated ip address " + ip.getAddress().addr() + " to network " + network); + logger.debug("Successfully associated ip address " + ip.getAddress().addr() + " to network " + network); } else { - s_logger.warn("Failed to associate ip address " + ip.getAddress().addr() + " to network " + network); + logger.warn("Failed to associate ip address " + ip.getAddress().addr() + " to network " + network); } return ip; } finally { @@ -1526,12 +1524,12 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage + requiredOfferings.get(0).getTags()); } - s_logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of createVlanIpRange process"); guestNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", null, null, null, null, owner, null, physicalNetwork, zoneId, ACLType.Account, null, null, null, null, true, null); if (guestNetwork == null) { - s_logger.warn("Failed to create default Virtual network for the account " + accountId + "in zone " + zoneId); + logger.warn("Failed to create default Virtual network for the account " + accountId + "in zone " + zoneId); throw new CloudRuntimeException("Failed to create a Guest Isolated Networks with SourceNAT " + "service enabled as a part of createVlanIpRange, for the account " + accountId + "in zone " + zoneId); } @@ -1588,19 +1586,19 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage DeployDestination dest = new DeployDestination(zone, null, null, null); Account callerAccount = CallContext.current().getCallingAccount(); UserVO callerUser = _userDao.findById(CallContext.current().getCallingUserId()); - Journal journal = new Journal.LogJournal("Implementing " + guestNetwork, s_logger); + Journal journal = new Journal.LogJournal("Implementing " + guestNetwork, logger); ReservationContext context = new ReservationContextImpl(UUID.randomUUID().toString(), journal, callerUser, callerAccount); - s_logger.debug("Implementing network " + guestNetwork + " as a part of network provision for persistent network"); + logger.debug("Implementing network " + guestNetwork + " as a part of network provision for persistent network"); try { Pair implementedNetwork = _networkMgr.implementNetwork(guestNetwork.getId(), dest, context); if (implementedNetwork == null || implementedNetwork.first() == null) { - s_logger.warn("Failed to implement the network " + guestNetwork); + logger.warn("Failed to implement the network " + guestNetwork); } if (implementedNetwork != null) { guestNetwork = implementedNetwork.second(); } } catch (Exception ex) { - s_logger.warn("Failed to implement network " + guestNetwork + " elements and resources as a part of" + " network provision due to ", ex); + logger.warn("Failed to implement network " + guestNetwork + " elements and resources as a part of" + " network provision due to ", ex); CloudRuntimeException e = new CloudRuntimeException("Failed to implement network (with specified id)" + " elements and resources as a part of network provision for persistent network"); e.addProxyObject(guestNetwork.getUuid(), "networkId"); @@ -1616,7 +1614,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage final IPAddressVO ip = _ipAddressDao.findById(addrId); if (ip.getAllocatedToAccountId() == null && ip.getAllocatedTime() == null) { - s_logger.trace("Ip address id=" + addrId + " is already released"); + logger.trace("Ip address id=" + addrId + " is already released"); return ip; } @@ -1657,14 +1655,14 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage @DB public String acquireGuestIpAddress(Network network, String requestedIp) { if (requestedIp != null && requestedIp.equals(network.getGateway())) { - s_logger.warn("Requested ip address " + requestedIp + " is used as a gateway address in network " + network); + logger.warn("Requested ip address " + requestedIp + " is used as a gateway address in network " + network); return null; } Set availableIps = _networkModel.getAvailableIps(network, requestedIp); if (availableIps == null || availableIps.isEmpty()) { - s_logger.debug("There are no free ips in the network " + network); + logger.debug("There are no free ips in the network " + network); return null; } @@ -1675,10 +1673,10 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage String[] cidr = network.getCidr().split("/"); boolean isSameCidr = NetUtils.sameSubnetCIDR(requestedIp, NetUtils.long2Ip(array[0]), Integer.parseInt(cidr[1])); if (!isSameCidr) { - s_logger.warn("Requested ip address " + requestedIp + " doesn't belong to the network " + network + " cidr"); + logger.warn("Requested ip address " + requestedIp + " doesn't belong to the network " + network + " cidr"); return null; } else if (NetUtils.IsIpEqualToNetworkOrBroadCastIp(requestedIp, cidr[0], Integer.parseInt(cidr[1]))) { - s_logger.warn("Requested ip address " + requestedIp + " is equal to the to the network/broadcast ip of the network" + network); + logger.warn("Requested ip address " + requestedIp + " is equal to the to the network/broadcast ip of the network" + network); return null; } return requestedIp; @@ -1696,7 +1694,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage @Override public boolean applyStaticNats(List staticNats, boolean continueOnError, boolean forRevoke) throws ResourceUnavailableException { if (staticNats == null || staticNats.size() == 0) { - s_logger.debug("There are no static nat rules for the network elements"); + logger.debug("There are no static nat rules for the network elements"); return true; } @@ -1705,7 +1703,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage // Check if the StaticNat service is supported if (!_networkModel.areServicesSupportedInNetwork(network.getId(), Service.StaticNat)) { - s_logger.debug("StaticNat service is not supported in specified network id"); + logger.debug("StaticNat service is not supported in specified network id"); return true; } @@ -1733,7 +1731,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage if (!continueOnError) { throw e; } - s_logger.warn("Problems with " + element.getName() + " but pushing on", e); + logger.warn("Problems with " + element.getName() + " but pushing on", e); success = false; } @@ -1794,7 +1792,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage if ((off.getElasticLb() && forElasticLb) || (off.getElasticIp() && forElasticIp)) { try { - s_logger.debug("Allocating system IP address for load balancer rule..."); + logger.debug("Allocating system IP address for load balancer rule..."); // allocate ip ip = allocateIP(owner, true, guestNetwork.getDataCenterId()); // apply ip associations @@ -1824,10 +1822,10 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage if (ip.getSystem()) { CallContext ctx = CallContext.current(); if (!disassociatePublicIpAddress(ip.getId(), ctx.getCallingUserId(), ctx.getCallingAccount())) { - s_logger.warn("Unable to release system ip address id=" + ip.getId()); + logger.warn("Unable to release system ip address id=" + ip.getId()); success = false; } else { - s_logger.warn("Successfully released system ip address id=" + ip.getId()); + logger.warn("Successfully released system ip address id=" + ip.getId()); } } } @@ -1855,7 +1853,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage if (placeholderNic != null) { IPAddressVO userIp = _ipAddressDao.findByIpAndSourceNetworkId(network.getId(), placeholderNic.getIPv4Address()); ip = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); - s_logger.debug("Nic got an ip address " + placeholderNic.getIPv4Address() + " stored in placeholder nic for the network " + network); + logger.debug("Nic got an ip address " + placeholderNic.getIPv4Address() + " stored in placeholder nic for the network " + network); } } @@ -1928,7 +1926,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage //Get ip address from the placeholder and don't allocate a new one if (requestedIpv4 != null && vm.getType() == VirtualMachine.Type.DomainRouter) { - s_logger.debug("There won't be nic assignment for VR id " + vm.getId() +" in this network " + network); + logger.debug("There won't be nic assignment for VR id " + vm.getId() +" in this network " + network); } @@ -1995,7 +1993,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage public String allocatePublicIpForGuestNic(Network network, Long podId, Account owner, String requestedIp) throws InsufficientAddressCapacityException { PublicIp ip = assignPublicIpAddress(network.getDataCenterId(), podId, owner, VlanType.DirectAttached, network.getId(), requestedIp, false); if (ip == null) { - s_logger.debug("There is no free public ip address"); + logger.debug("There is no free public ip address"); return null; } Ip ipAddr = ip.getAddress(); diff --git a/server/src/com/cloud/network/Ipv6AddressManagerImpl.java b/server/src/com/cloud/network/Ipv6AddressManagerImpl.java index d8a9d686cd4..0655da78f09 100644 --- a/server/src/com/cloud/network/Ipv6AddressManagerImpl.java +++ b/server/src/com/cloud/network/Ipv6AddressManagerImpl.java @@ -24,7 +24,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -46,7 +45,6 @@ import com.cloud.utils.net.NetUtils; @Local(value = {Ipv6AddressManager.class}) public class Ipv6AddressManagerImpl extends ManagerBase implements Ipv6AddressManager { - public static final Logger s_logger = Logger.getLogger(Ipv6AddressManagerImpl.class.getName()); String _name = null; int _ipv6RetryMax = 0; @@ -80,7 +78,7 @@ public class Ipv6AddressManagerImpl extends ManagerBase implements Ipv6AddressMa } List vlans = _vlanDao.listVlansByNetworkId(networkId); if (vlans == null) { - s_logger.debug("Cannot find related vlan attached to network " + networkId); + logger.debug("Cannot find related vlan attached to network " + networkId); return null; } String ip = null; diff --git a/server/src/com/cloud/network/NetworkModelImpl.java b/server/src/com/cloud/network/NetworkModelImpl.java index 154e666c7a3..39c7f0cce06 100644 --- a/server/src/com/cloud/network/NetworkModelImpl.java +++ b/server/src/com/cloud/network/NetworkModelImpl.java @@ -38,7 +38,6 @@ import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.lb.dao.ApplicationLoadBalancerRuleDao; import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; import com.cloud.api.ApiDBUtils; import com.cloud.configuration.Config; @@ -127,7 +126,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Local(value = {NetworkModel.class}) public class NetworkModelImpl extends ManagerBase implements NetworkModel { - static final Logger s_logger = Logger.getLogger(NetworkModelImpl.class); @Inject EntityManager _entityMgr; @Inject @@ -339,7 +337,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { // no active rules/revoked rules are associated with this public IP, so remove the // association with the provider if (ip.isSourceNat()) { - s_logger.debug("Not releasing ip " + ip.getAddress().addr() + " as it is in use for SourceNat"); + logger.debug("Not releasing ip " + ip.getAddress().addr() + " as it is in use for SourceNat"); } else { ip.setState(State.Releasing); } @@ -594,7 +592,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { } } else { if (network.getCidr() == null) { - s_logger.debug("Network - " + network.getId() + " has NULL CIDR."); + logger.debug("Network - " + network.getId() + " has NULL CIDR."); return false; } hasFreeIps = (getAvailableIps(network, null)).size() > 0; @@ -766,7 +764,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { } } if (ret_network == null) { - s_logger.debug("Can not find network with security group enabled with free IPs"); + logger.debug("Can not find network with security group enabled with free IPs"); } return ret_network; } @@ -779,7 +777,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { } if (networks.size() > 1) { - s_logger.debug("There are multiple network with security group enabled? select one of them..."); + logger.debug("There are multiple network with security group enabled? select one of them..."); } return networks.get(0); } @@ -873,12 +871,12 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { } } } else { - s_logger.debug("Unable to find default network for the vm; vm doesn't have any nics"); + logger.debug("Unable to find default network for the vm; vm doesn't have any nics"); return null; } if (defaultNic == null) { - s_logger.debug("Unable to find default network for the vm; vm doesn't have default nic"); + logger.debug("Unable to find default network for the vm; vm doesn't have default nic"); } return defaultNic; @@ -890,7 +888,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { String userDataProvider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.UserData); if (userDataProvider == null) { - s_logger.debug("Network " + network + " doesn't support service " + Service.UserData.getName()); + logger.debug("Network " + network + " doesn't support service " + Service.UserData.getName()); return null; } @@ -932,7 +930,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { List virtualNetworks = _networksDao.listByZoneAndGuestType(accountId, dataCenterId, Network.GuestType.Isolated, false); if (virtualNetworks.isEmpty()) { - s_logger.trace("Unable to find default Virtual network account id=" + accountId); + logger.trace("Unable to find default Virtual network account id=" + accountId); return null; } @@ -943,7 +941,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { if (networkElementNic != null) { return networkElementNic.getIPv4Address(); } else { - s_logger.warn("Unable to set find network element for the network id=" + virtualNetwork.getId()); + logger.warn("Unable to set find network element for the network id=" + virtualNetwork.getId()); return null; } } @@ -1150,7 +1148,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { Long pNtwkId = null; for (PhysicalNetwork pNtwk : pNtwks) { if (pNtwk.getTags().contains(tag)) { - s_logger.debug("Found physical network id=" + pNtwk.getId() + " based on requested tags " + tag); + logger.debug("Found physical network id=" + pNtwk.getId() + " based on requested tags " + tag); pNtwkId = pNtwk.getId(); break; } @@ -1187,7 +1185,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { @Override public boolean isSecurityGroupSupportedInNetwork(Network network) { if (network.getTrafficType() != TrafficType.Guest) { - s_logger.trace("Security group can be enabled for Guest networks only; and network " + network + " has a diff traffic type"); + logger.trace("Security group can be enabled for Guest networks only; and network " + network + " has a diff traffic type"); return false; } @@ -1255,8 +1253,8 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { return label; } } catch (Exception ex) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to retrive the default label for management traffic:" + "zone: " + zoneId + " hypervisor: " + hypervisorType + " due to:" + + if (logger.isDebugEnabled()) { + logger.debug("Failed to retrive the default label for management traffic:" + "zone: " + zoneId + " hypervisor: " + hypervisorType + " due to:" + ex.getMessage()); } } @@ -1290,8 +1288,8 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { return label; } } catch (Exception ex) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to retrive the default label for storage traffic:" + "zone: " + zoneId + " hypervisor: " + hypervisorType + " due to:" + + if (logger.isDebugEnabled()) { + logger.debug("Failed to retrive the default label for storage traffic:" + "zone: " + zoneId + " hypervisor: " + hypervisorType + " due to:" + ex.getMessage()); } } @@ -1328,7 +1326,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { public boolean isProviderEnabledInPhysicalNetwork(long physicalNetowrkId, String providerName) { PhysicalNetworkServiceProviderVO ntwkSvcProvider = _pNSPDao.findByServiceProvider(physicalNetowrkId, providerName); if (ntwkSvcProvider == null) { - s_logger.warn("Unable to find provider " + providerName + " in physical network id=" + physicalNetowrkId); + logger.warn("Unable to find provider " + providerName + " in physical network id=" + physicalNetowrkId); return false; } return isProviderEnabled(ntwkSvcProvider); @@ -1370,7 +1368,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { if (physicalNetworkId == null) { assert (false) : "Can't get the physical network"; - s_logger.warn("Can't get the physical network"); + logger.warn("Can't get the physical network"); return null; } @@ -1657,8 +1655,8 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { return label; } } catch (Exception ex) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to retrieve the default label for public traffic." + "zone: " + dcId + " hypervisor: " + hypervisorType + " due to: " + + if (logger.isDebugEnabled()) { + logger.debug("Failed to retrieve the default label for public traffic." + "zone: " + dcId + " hypervisor: " + hypervisorType + " due to: " + ex.getMessage()); } } @@ -1692,8 +1690,8 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { return label; } } catch (Exception ex) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to retrive the default label for management traffic:" + "zone: " + dcId + " hypervisor: " + hypervisorType + " due to:" + + if (logger.isDebugEnabled()) { + logger.debug("Failed to retrive the default label for management traffic:" + "zone: " + dcId + " hypervisor: " + hypervisorType + " due to:" + ex.getMessage()); } } @@ -1766,13 +1764,13 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { Long networkDomainId = null; Network network = getNetwork(networkId); if (network.getGuestType() != Network.GuestType.Shared) { - s_logger.trace("Network id=" + networkId + " is not shared"); + logger.trace("Network id=" + networkId + " is not shared"); return false; } NetworkDomainVO networkDomainMap = _networkDomainDao.getDomainNetworkMapByNetworkId(networkId); if (networkDomainMap == null) { - s_logger.trace("Network id=" + networkId + " is shared, but not domain specific"); + logger.trace("Network id=" + networkId + " is shared, but not domain specific"); return true; } else { networkDomainId = networkDomainMap.getDomainId(); @@ -1801,7 +1799,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { for (String ip : ips) { if (requestedIp != null && requestedIp.equals(ip)) { - s_logger.warn("Requested ip address " + requestedIp + " is already in use in network" + network); + logger.warn("Requested ip address " + requestedIp + " is already in use in network" + network); return null; } @@ -1859,14 +1857,14 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { boolean isServiceEnabledInNetwork(long physicalNetworkId, long networkId, Service service) { // check if the service is supported in the network if (!areServicesSupportedInNetwork(networkId, service)) { - s_logger.debug("Service " + service.getName() + " is not supported in the network id=" + networkId); + logger.debug("Service " + service.getName() + " is not supported in the network id=" + networkId); return false; } // get provider for the service and check if all of them are supported String provider = _ntwkSrvcDao.getProviderForServiceInNetwork(networkId, service); if (!isProviderEnabledInPhysicalNetwork(physicalNetworkId, provider)) { - s_logger.debug("Provider " + provider + " is not enabled in physical network id=" + physicalNetworkId); + logger.debug("Provider " + provider + " is not enabled in physical network id=" + physicalNetworkId); return false; } @@ -1890,7 +1888,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { } if (networkList.size() > 1) { - s_logger.info("More than one physical networks exist in zone id=" + zoneId + " with traffic type=" + trafficType + ". "); + logger.info("More than one physical networks exist in zone id=" + zoneId + " with traffic type=" + trafficType + ". "); } return networkList.get(0); @@ -2037,7 +2035,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { networkSearch.and("traffictype", networkSearch.entity().getTrafficType(), Op.EQ); NicForTrafficTypeSearch.done(); - s_logger.info("Network Model is configured."); + logger.info("Network Model is configured."); return true; } @@ -2051,11 +2049,11 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { Provider implementedProvider = element.getProvider(); if (implementedProvider != null) { if (s_providerToNetworkElementMap.containsKey(implementedProvider.getName())) { - s_logger.error("Cannot start NetworkModel: Provider <-> NetworkElement must be a one-to-one map, " + "multiple NetworkElements found for Provider: " + + logger.error("Cannot start NetworkModel: Provider <-> NetworkElement must be a one-to-one map, " + "multiple NetworkElements found for Provider: " + implementedProvider.getName()); continue; } - s_logger.info("Add provider <-> element map entry. " + implementedProvider.getName() + "-" + element.getName() + "-" + element.getClass().getSimpleName()); + logger.info("Add provider <-> element map entry. " + implementedProvider.getName() + "-" + element.getName() + "-" + element.getClass().getSimpleName()); s_providerToNetworkElementMap.put(implementedProvider.getName(), element.getName()); } if (capabilities != null && implementedProvider != null) { @@ -2071,7 +2069,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { } } } - s_logger.info("Started Network Model"); + logger.info("Started Network Model"); return true; } @@ -2265,7 +2263,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { //if the network has vms in Starting state (nics for those might not be allocated yet as Starting state also used when vm is being Created) //don't GC if (_nicDao.countNicsForStartingVms(networkId) > 0) { - s_logger.debug("Network id=" + networkId + " is not ready for GC as it has vms that are Starting at the moment"); + logger.debug("Network id=" + networkId + " is not ready for GC as it has vms that are Starting at the moment"); return false; } @@ -2318,7 +2316,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { try { md5 = MessageDigest.getInstance("MD5"); } catch (NoSuchAlgorithmException e) { - s_logger.error("Unexpected exception " + e.getMessage(), e); + logger.error("Unexpected exception " + e.getMessage(), e); throw new CloudRuntimeException("Unable to get MD5 MessageDigest", e); } md5.reset(); diff --git a/server/src/com/cloud/network/NetworkServiceImpl.java b/server/src/com/cloud/network/NetworkServiceImpl.java index abee17833fb..d5ee63f0c60 100644 --- a/server/src/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/com/cloud/network/NetworkServiceImpl.java @@ -57,7 +57,6 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; import org.apache.cloudstack.network.element.InternalLoadBalancerElementService; -import org.apache.log4j.Logger; import com.cloud.api.ApiDBUtils; import com.cloud.configuration.Config; @@ -199,7 +198,6 @@ import com.cloud.vm.dao.VMInstanceDao; */ @Local(value = {NetworkService.class}) public class NetworkServiceImpl extends ManagerBase implements NetworkService { - private static final Logger s_logger = Logger.getLogger(NetworkServiceImpl.class); private static final long MIN_VLAN_ID = 0L; private static final long MAX_VLAN_ID = 4095L; // 2^12 - 1 @@ -554,8 +552,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (zone.getNetworkType() == NetworkType.Advanced) { if (isSharedNetworkOfferingWithServices(network.getNetworkOfferingId())) { _accountMgr.checkAccess(caller, AccessType.UseEntry, false, network); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); } return _ipAddrMgr.allocateIp(ipOwner, false, caller, callerUserId, zone, displayIp); } else { @@ -597,8 +595,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (zone.getNetworkType() == NetworkType.Advanced) { if (isSharedNetworkOfferingWithServices(network.getNetworkOfferingId())) { _accountMgr.checkAccess(caller, AccessType.UseEntry, false, network); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); } return _ipAddrMgr.allocatePortableIp(ipOwner, caller, zoneId, networkId, null); } else { @@ -640,7 +638,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { _allowSubdomainNetworkAccess = Boolean.valueOf(_configs.get(Config.SubDomainNetworkAccess.key())); - s_logger.info("Network Service is configured."); + logger.info("Network Service is configured."); return true; } @@ -666,7 +664,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (isZoneSgEnabled) { success = _securityGroupService.securityGroupRulesForVmSecIp(secIp.getNicId(), secIp.getIp4Address(), true); - s_logger.info("Associated ip address to NIC : " + secIp.getIp4Address()); + logger.info("Associated ip address to NIC : " + secIp.getIp4Address()); } else { success = true; } @@ -708,12 +706,12 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { int maxAllowedIpsPerNic = NumbersUtil.parseInt(_configDao.getValue(Config.MaxNumberOfSecondaryIPsPerNIC.key()), 10); Long nicWiseIpCount = _nicSecondaryIpDao.countByNicId(nicId); if(nicWiseIpCount.intValue() >= maxAllowedIpsPerNic) { - s_logger.error("Maximum Number of Ips \"vm.network.nic.max.secondary.ipaddresses = \"" + maxAllowedIpsPerNic + " per Nic has been crossed for the nic " + nicId + "."); + logger.error("Maximum Number of Ips \"vm.network.nic.max.secondary.ipaddresses = \"" + maxAllowedIpsPerNic + " per Nic has been crossed for the nic " + nicId + "."); throw new InsufficientAddressCapacityException("Maximum Number of Ips per Nic has been crossed.", Nic.class, nicId); } - s_logger.debug("Calling the ip allocation ..."); + logger.debug("Calling the ip allocation ..."); String ipaddr = null; //Isolated network can exist in Basic zone only, so no need to verify the zone type if (network.getGuestType() == Network.GuestType.Isolated) { @@ -741,11 +739,11 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { throw new InvalidParameterValueException("Allocating ip to guest nic " + nicId + " failed"); } } catch (InsufficientAddressCapacityException e) { - s_logger.error("Allocating ip to guest nic " + nicId + " failed"); + logger.error("Allocating ip to guest nic " + nicId + " failed"); return null; } } else { - s_logger.error("AddIpToVMNic is not supported in this network..."); + logger.error("AddIpToVMNic is not supported in this network..."); return null; } @@ -759,11 +757,11 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (!nicSecondaryIpSet) { nicVO.setSecondaryIp(true); // commit when previously set ?? - s_logger.debug("Setting nics table ..."); + logger.debug("Setting nics table ..."); _nicDao.update(nicId, nicVO); } - s_logger.debug("Setting nic_secondary_ip table ..."); + logger.debug("Setting nic_secondary_ip table ..."); Long vmId = nicVO.getInstanceId(); NicSecondaryIpVO secondaryIpVO = new NicSecondaryIpVO(nicId, addrFinal, vmId, ipOwner.getId(), ipOwner.getDomainId(), networkId); _nicSecondaryIpDao.persist(secondaryIpVO); @@ -808,7 +806,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { NetworkOfferingVO ntwkOff = _networkOfferingDao.findById(network.getNetworkOfferingId()); Long nicId = secIpVO.getNicId(); - s_logger.debug("ip id = " + ipAddressId + " nic id = " + nicId); + logger.debug("ip id = " + ipAddressId + " nic id = " + nicId); //check is this the last secondary ip for NIC List ipList = _nicSecondaryIpDao.listByNicId(nicId); boolean lastIp = false; @@ -822,7 +820,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { throw new InvalidParameterValueException("Invalid zone Id is given"); } - s_logger.debug("Calling secondary ip " + secIpVO.getIp4Address() + " release "); + logger.debug("Calling secondary ip " + secIpVO.getIp4Address() + " release "); if (dc.getNetworkType() == NetworkType.Advanced && network.getGuestType() == Network.GuestType.Isolated) { //check PF or static NAT is configured on this ip address String secondaryIp = secIpVO.getIp4Address(); @@ -831,7 +829,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (fwRulesList.size() != 0) { for (FirewallRuleVO rule : fwRulesList) { if (_portForwardingDao.findByIdAndIp(rule.getId(), secondaryIp) != null) { - s_logger.debug("VM nic IP " + secondaryIp + " is associated with the port forwarding rule"); + logger.debug("VM nic IP " + secondaryIp + " is associated with the port forwarding rule"); throw new InvalidParameterValueException("Can't remove the secondary ip " + secondaryIp + " is associate with the port forwarding rule"); } } @@ -839,12 +837,12 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { //check if the secondary ip associated with any static nat rule IPAddressVO publicIpVO = _ipAddressDao.findByVmIp(secondaryIp); if (publicIpVO != null) { - s_logger.debug("VM nic IP " + secondaryIp + " is associated with the static NAT rule public IP address id " + publicIpVO.getId()); + logger.debug("VM nic IP " + secondaryIp + " is associated with the static NAT rule public IP address id " + publicIpVO.getId()); throw new InvalidParameterValueException("Can' remove the ip " + secondaryIp + "is associate with static NAT rule public IP address id " + publicIpVO.getId()); } if (_lbService.isLbRuleMappedToVmGuestIp(secondaryIp)) { - s_logger.debug("VM nic IP " + secondaryIp + " is mapped to load balancing rule"); + logger.debug("VM nic IP " + secondaryIp + " is mapped to load balancing rule"); throw new InvalidParameterValueException("Can't remove the secondary ip " + secondaryIp + " is mapped to load balancing rule"); } @@ -876,11 +874,11 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { public void doInTransactionWithoutResult(TransactionStatus status) { if (lastIp) { nic.setSecondaryIp(false); - s_logger.debug("Setting nics secondary ip to false ..."); + logger.debug("Setting nics secondary ip to false ..."); _nicDao.update(nicId, nic); } - s_logger.debug("Revoving nic secondary ip entry ..."); + logger.debug("Revoving nic secondary ip entry ..."); _nicSecondaryIpDao.remove(ipVO.getId()); } }); @@ -914,7 +912,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } if (ipVO.getAllocatedTime() == null) { - s_logger.debug("Ip Address id= " + ipAddressId + " is not allocated, so do nothing."); + logger.debug("Ip Address id= " + ipAddressId + " is not allocated, so do nothing."); return true; } @@ -953,7 +951,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } } } else { - s_logger.warn("Failed to release public ip address id=" + ipAddressId); + logger.warn("Failed to release public ip address id=" + ipAddressId); } return success; } @@ -1173,7 +1171,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { ipv4 = true; } } catch (UnknownHostException e) { - s_logger.error("Unable to convert gateway IP to a InetAddress", e); + logger.error("Unable to convert gateway IP to a InetAddress", e); throw new InvalidParameterValueException("Gateway parameter is invalid"); } } @@ -1307,21 +1305,21 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (ntwkOff.getIsPersistent()) { try { if (network.getState() == Network.State.Setup) { - s_logger.debug("Network id=" + network.getId() + " is already provisioned"); + logger.debug("Network id=" + network.getId() + " is already provisioned"); return network; } DeployDestination dest = new DeployDestination(zone, null, null, null); UserVO callerUser = _userDao.findById(CallContext.current().getCallingUserId()); - Journal journal = new Journal.LogJournal("Implementing " + network, s_logger); + Journal journal = new Journal.LogJournal("Implementing " + network, logger); ReservationContext context = new ReservationContextImpl(UUID.randomUUID().toString(), journal, callerUser, caller); - s_logger.debug("Implementing network " + network + " as a part of network provision for persistent network"); + logger.debug("Implementing network " + network + " as a part of network provision for persistent network"); Pair implementedNetwork = _networkMgr.implementNetwork(network.getId(), dest, context); if (implementedNetwork == null || implementedNetwork.first() == null) { - s_logger.warn("Failed to provision the network " + network); + logger.warn("Failed to provision the network " + network); } network = implementedNetwork.second(); } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to implement persistent guest network " + network + "due to ", ex); + logger.warn("Failed to implement persistent guest network " + network + "due to ", ex); CloudRuntimeException e = new CloudRuntimeException("Failed to implement persistent guest network"); e.addProxyObject(network.getUuid(), "networkId"); throw e; @@ -1891,9 +1889,9 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { boolean success = _networkMgr.restartNetwork(networkId, callerAccount, callerUser, cleanup); if (success) { - s_logger.debug("Network id=" + networkId + " is restarted successfully."); + logger.debug("Network id=" + networkId + " is restarted successfully."); } else { - s_logger.warn("Network id=" + networkId + " failed to restart."); + logger.warn("Network id=" + networkId + " failed to restart."); } return success; @@ -2178,11 +2176,11 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { List nicsPresent = _nicDao.listByNetworkId(networkId); String cidrIpRange[] = NetUtils.getIpRangeFromCidr(guestVmCidrPair[0], size); - s_logger.info("The start IP of the specified guest vm cidr is: " + cidrIpRange[0] + " and end IP is: " + cidrIpRange[1]); + logger.info("The start IP of the specified guest vm cidr is: " + cidrIpRange[0] + " and end IP is: " + cidrIpRange[1]); long startIp = NetUtils.ip2Long(cidrIpRange[0]); long endIp = NetUtils.ip2Long(cidrIpRange[1]); long range = endIp - startIp + 1; - s_logger.info("The specified guest vm cidr has " + range + " IPs"); + logger.info("The specified guest vm cidr has " + range + " IPs"); for (NicVO nic : nicsPresent) { long nicIp = NetUtils.ip2Long(nic.getIPv4Address()); @@ -2218,14 +2216,14 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { // Condition for IP Reservation reset : guestVmCidr and network CIDR are same if (network.getNetworkCidr().equals(guestVmCidr)) { - s_logger.warn("Guest VM CIDR and Network CIDR both are same, reservation will reset."); + logger.warn("Guest VM CIDR and Network CIDR both are same, reservation will reset."); network.setNetworkCidr(null); } // Finally update "cidr" with the guestVmCidr // which becomes the effective address space for CloudStack guest VMs network.setCidr(guestVmCidr); _networksDao.update(networkId, network); - s_logger.info("IP Reservation has been applied. The new CIDR for Guests Vms is " + guestVmCidr); + logger.info("IP Reservation has been applied. The new CIDR for Guests Vms is " + guestVmCidr); } ReservationContext context = new ReservationContextImpl(null, null, callerUser, callerAccount); @@ -2235,21 +2233,21 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (restartNetwork) { if (validStateToShutdown) { if (!changeCidr) { - s_logger.debug("Shutting down elements and resources for network id=" + networkId + " as a part of network update"); + logger.debug("Shutting down elements and resources for network id=" + networkId + " as a part of network update"); if (!_networkMgr.shutdownNetworkElementsAndResources(context, true, network)) { - s_logger.warn("Failed to shutdown the network elements and resources as a part of network restart: " + network); + logger.warn("Failed to shutdown the network elements and resources as a part of network restart: " + network); CloudRuntimeException ex = new CloudRuntimeException("Failed to shutdown the network elements and resources as a part of update to network of specified id"); ex.addProxyObject(network.getUuid(), "networkId"); throw ex; } } else { // We need to shutdown the network, since we want to re-implement the network. - s_logger.debug("Shutting down network id=" + networkId + " as a part of network update"); + logger.debug("Shutting down network id=" + networkId + " as a part of network update"); //check if network has reservation if (NetUtils.isNetworkAWithinNetworkB(network.getCidr(), network.getNetworkCidr())) { - s_logger.warn("Existing IP reservation will become ineffective for the network with id = " + networkId + logger.warn("Existing IP reservation will become ineffective for the network with id = " + networkId + " You need to reapply reservation after network reimplementation."); //set cidr to the newtork cidr network.setCidr(network.getNetworkCidr()); @@ -2258,7 +2256,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } if (!_networkMgr.shutdownNetwork(network.getId(), context, true)) { - s_logger.warn("Failed to shutdown the network as a part of update to network with specified id"); + logger.warn("Failed to shutdown the network as a part of update to network with specified id"); CloudRuntimeException ex = new CloudRuntimeException("Failed to shutdown the network as a part of update of specified network id"); ex.addProxyObject(network.getUuid(), "networkId"); throw ex; @@ -2298,7 +2296,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { long vmId = nic.getInstanceId(); VMInstanceVO vm = _vmDao.findById(vmId); if (vm == null) { - s_logger.error("Vm for nic " + nic.getId() + " not found with Vm Id:" + vmId); + logger.error("Vm for nic " + nic.getId() + " not found with Vm Id:" + vmId); continue; } long isDefault = (nic.isDefaultNic()) ? 1 : 0; @@ -2323,7 +2321,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (restartNetwork) { if (network.getState() != Network.State.Allocated) { DeployDestination dest = new DeployDestination(_dcDao.findById(network.getDataCenterId()), null, null, null); - s_logger.debug("Implementing the network " + network + " elements and resources as a part of network update"); + logger.debug("Implementing the network " + network + " elements and resources as a part of network update"); try { if (!changeCidr) { _networkMgr.implementNetworkElementsAndResources(dest, context, network, _networkOfferingDao.findById(network.getNetworkOfferingId())); @@ -2331,7 +2329,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { _networkMgr.implementNetwork(network.getId(), dest, context); } } catch (Exception ex) { - s_logger.warn("Failed to implement network " + network + " elements and resources as a part of network update due to ", ex); + logger.warn("Failed to implement network " + network + " elements and resources as a part of network update due to ", ex); CloudRuntimeException e = new CloudRuntimeException("Failed to implement network (with specified id) elements and resources as a part of network update"); e.addProxyObject(network.getUuid(), "networkId"); throw e; @@ -2347,7 +2345,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { DeployDestination dest = new DeployDestination(_dcDao.findById(network.getDataCenterId()), null, null, null); _networkMgr.implementNetwork(network.getId(), dest, context); } catch (Exception ex) { - s_logger.warn("Failed to implement network " + network + " elements and resources as a part o" + "f network update due to ", ex); + logger.warn("Failed to implement network " + network + " elements and resources as a part o" + "f network update due to ", ex); CloudRuntimeException e = new CloudRuntimeException("Failed to implement network (with specified" + " id) elements and resources as a part of network update"); e.addProxyObject(network.getUuid(), "networkId"); throw e; @@ -2365,7 +2363,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { for (String ip : ips) { if (requestedIp != null && requestedIp.equals(ip)) { - s_logger.warn("Requested ip address " + requestedIp + " is already in use in network" + network); + logger.warn("Requested ip address " + requestedIp + " is already in use in network" + network); return null; } @@ -2391,44 +2389,44 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { // security group service should be the same if (areServicesSupportedByNetworkOffering(oldNetworkOfferingId, Service.SecurityGroup) != areServicesSupportedByNetworkOffering(newNetworkOfferingId, Service.SecurityGroup)) { - s_logger.debug("Offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different securityGroupProperty, can't upgrade"); + logger.debug("Offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different securityGroupProperty, can't upgrade"); return false; } // Type of the network should be the same if (oldNetworkOffering.getGuestType() != newNetworkOffering.getGuestType()) { - s_logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " are of different types, can't upgrade"); + logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " are of different types, can't upgrade"); return false; } // tags should be the same if (newNetworkOffering.getTags() != null) { if (oldNetworkOffering.getTags() == null) { - s_logger.debug("New network offering id=" + newNetworkOfferingId + " has tags and old network offering id=" + oldNetworkOfferingId + " doesn't, can't upgrade"); + logger.debug("New network offering id=" + newNetworkOfferingId + " has tags and old network offering id=" + oldNetworkOfferingId + " doesn't, can't upgrade"); return false; } if (!StringUtils.areTagsEqual(oldNetworkOffering.getTags(), newNetworkOffering.getTags())) { - s_logger.debug("Network offerings " + newNetworkOffering.getUuid() + " and " + oldNetworkOffering.getUuid() + " have different tags, can't upgrade"); + logger.debug("Network offerings " + newNetworkOffering.getUuid() + " and " + oldNetworkOffering.getUuid() + " have different tags, can't upgrade"); return false; } } // Traffic types should be the same if (oldNetworkOffering.getTrafficType() != newNetworkOffering.getTrafficType()) { - s_logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different traffic types, can't upgrade"); + logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different traffic types, can't upgrade"); return false; } // specify vlan should be the same if (oldNetworkOffering.getSpecifyVlan() != newNetworkOffering.getSpecifyVlan()) { - s_logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different values for specifyVlan, can't upgrade"); + logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different values for specifyVlan, can't upgrade"); return false; } // specify ipRanges should be the same if (oldNetworkOffering.getSpecifyIpRanges() != newNetworkOffering.getSpecifyIpRanges()) { - s_logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different values for specifyIpRangess, can't upgrade"); + logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different values for specifyIpRangess, can't upgrade"); return false; } @@ -2566,7 +2564,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } }); } catch (Exception ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new CloudRuntimeException("Fail to create a physical network"); } } @@ -2703,13 +2701,13 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { @Override public void doInTransactionWithoutResult(TransactionStatus status) { if (addVnetsFinal != null) { - s_logger.debug("Adding vnet range " + addVnetsFinal.toString() + " for the physicalNetwork id= " + network.getId() + " and zone id=" + logger.debug("Adding vnet range " + addVnetsFinal.toString() + " for the physicalNetwork id= " + network.getId() + " and zone id=" + network.getDataCenterId() + " as a part of updatePhysicalNetwork call"); //add vnet takes a list of strings to be added. each string is a vnet. _dcDao.addVnet(network.getDataCenterId(), network.getId(), addVnetsFinal); } if (removeVnetsFinal != null) { - s_logger.debug("removing vnet range " + removeVnetsFinal.toString() + " for the physicalNetwork id= " + network.getId() + " and zone id=" + logger.debug("removing vnet range " + removeVnetsFinal.toString() + " for the physicalNetwork id= " + network.getId() + " and zone id=" + network.getDataCenterId() + " as a part of updatePhysicalNetwork call"); //deleteVnets takes a list of strings to be removed. each string is a vnet. _datacneterVnet.deleteVnets(TransactionLegacy.currentTxn(), network.getDataCenterId(), network.getId(), removeVnetsFinal); @@ -2736,7 +2734,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { // for GRE phynets allow up to 32bits // TODO: Not happy about this test. // What about guru-like objects for physical networs? - s_logger.debug("ISOLATION METHODS:" + network.getIsolationMethods()); + logger.debug("ISOLATION METHODS:" + network.getIsolationMethods()); // Java does not have unsigned types... if (network.getIsolationMethods().contains("GRE")) { minVnet = MIN_GRE_KEY; @@ -2747,12 +2745,12 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { // fail if zone already contains VNI, need to be unique per zone. // since adding a range adds each VNI to the database, need only check min/max for (String vnet : VnetRange) { - s_logger.debug("Looking to see if VNI " + vnet + " already exists on another network in zone " + network.getDataCenterId()); + logger.debug("Looking to see if VNI " + vnet + " already exists on another network in zone " + network.getDataCenterId()); List vnis = _datacneterVnet.findVnet(network.getDataCenterId(), vnet); if (vnis != null && !vnis.isEmpty()) { for (DataCenterVnetVO vni : vnis) { if (vni.getPhysicalNetworkId() != network.getId()) { - s_logger.debug("VNI " + vnet + " already exists on another network in zone, please specify a unique range"); + logger.debug("VNI " + vnet + " already exists on another network in zone, please specify a unique range"); throw new InvalidParameterValueException("VNI " + vnet + " already exists on another network in zone, please specify a unique range"); } } @@ -2776,7 +2774,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { StartVnet = Integer.parseInt(VnetRange[0]); EndVnet = Integer.parseInt(VnetRange[1]); } catch (NumberFormatException e) { - s_logger.warn("Unable to parse vnet range:", e); + logger.warn("Unable to parse vnet range:", e); throw new InvalidParameterValueException("Please provide valid vnet range. The vnet range should be a coma seperated list example 2001-2012,3000-3005." + rangeMessage); } @@ -2914,10 +2912,10 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { try { deleteNetworkServiceProvider(provider.getId()); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to complete destroy of the physical network provider: " + provider.getProviderName() + ", id: " + provider.getId(), e); + logger.warn("Unable to complete destroy of the physical network provider: " + provider.getProviderName() + ", id: " + provider.getId(), e); return false; } catch (ConcurrentOperationException e) { - s_logger.warn("Unable to complete destroy of the physical network provider: " + provider.getProviderName() + ", id: " + provider.getId(), e); + logger.warn("Unable to complete destroy of the physical network provider: " + provider.getProviderName() + ", id: " + provider.getId(), e); return false; } } @@ -3055,7 +3053,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { startVlan = Integer.parseInt(vlanRange[0]); endVlan = Integer.parseInt(vlanRange[1]); } catch (NumberFormatException e) { - s_logger.warn("Unable to parse guest vlan range:", e); + logger.warn("Unable to parse guest vlan range:", e); throw new InvalidParameterValueException("Please provide valid guest vlan range"); } @@ -3169,7 +3167,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { tokens.add(startVlan); tokens.add(endVlan); } catch (NumberFormatException e) { - s_logger.warn("Unable to parse guest vlan range:", e); + logger.warn("Unable to parse guest vlan range:", e); throw new InvalidParameterValueException("Please provide valid guest vlan range"); } return tokens; @@ -3383,7 +3381,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { return nsp; } catch (Exception ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new CloudRuntimeException("Fail to add a provider to physical network"); } @@ -3439,8 +3437,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { boolean update = false; if (state != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("trying to update the state of the service provider id=" + id + " on physical network: " + provider.getPhysicalNetworkId() + " to state: " + if (logger.isDebugEnabled()) { + logger.debug("trying to update the state of the service provider id=" + id + " on physical network: " + provider.getPhysicalNetworkId() + " to state: " + stateStr); } switch (state) { @@ -3508,8 +3506,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { Account callerAccount = _accountMgr.getActiveAccountById(callerUser.getAccountId()); // shutdown the provider instances ReservationContext context = new ReservationContextImpl(null, null, callerUser, callerAccount); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Shutting down the service provider id=" + id + " on physical network: " + provider.getPhysicalNetworkId()); + if (logger.isDebugEnabled()) { + logger.debug("Shutting down the service provider id=" + id + " on physical network: " + provider.getPhysicalNetworkId()); } NetworkElement element = _networkModel.getElementImplementingProvider(provider.getProviderName()); if (element == null) { @@ -3566,7 +3564,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { Long pNtwkId = null; for (PhysicalNetwork pNtwk : pNtwks) { if (pNtwk.getTags().contains(tag)) { - s_logger.debug("Found physical network id=" + pNtwk.getId() + " based on requested tags " + tag); + logger.debug("Found physical network id=" + pNtwk.getId() + " based on requested tags " + tag); pNtwkId = pNtwk.getId(); break; } @@ -3648,7 +3646,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { // find row in networks table that is defined as 'Public', created when zone was deployed NetworkVO publicNetwork = _networksDao.listByZoneAndTrafficType(network.getDataCenterId(),TrafficType.Public).get(0); if (publicNetwork != null) { - s_logger.debug("setting public network " + publicNetwork + " to broadcast type vxlan"); + logger.debug("setting public network " + publicNetwork + " to broadcast type vxlan"); publicNetwork.setBroadcastDomainType(BroadcastDomainType.Vxlan); _networksDao.persist(publicNetwork); } @@ -3657,7 +3655,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { return pNetworktrafficType; } catch (Exception ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new CloudRuntimeException("Fail to add a traffic type to physical network"); } @@ -3811,7 +3809,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } OvsProviderVO element = _ovsProviderDao.findByNspId(nsp.getId()); if (element != null) { - s_logger.debug("There is already a Ovs element with service provider id " + nsp.getId()); + logger.debug("There is already a Ovs element with service provider id " + nsp.getId()); return nsp; } element = new OvsProviderVO(nsp.getId()); @@ -4013,12 +4011,12 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { privateNetwork = _networkMgr.createGuestNetwork(ntwkOffFinal.getId(), networkName, displayText, gateway, cidr, uriString, null, owner, null, pNtwk, pNtwk.getDataCenterId(), ACLType.Account, null, vpcId, null, null, true, null); if (privateNetwork != null) { - s_logger.debug("Successfully created guest network " + privateNetwork); + logger.debug("Successfully created guest network " + privateNetwork); } else { throw new CloudRuntimeException("Creating guest network failed"); } } else { - s_logger.debug("Private network already exists: " + privateNetwork); + logger.debug("Private network already exists: " + privateNetwork); //Do not allow multiple private gateways with same Vlan within a VPC if (vpcId != null && vpcId.equals(privateNetwork.getVpcId())) { throw new InvalidParameterValueException("Private network for the vlan: " + uriString + " and cidr " + cidr + " already exists " + "for Vpc " + vpcId @@ -4040,7 +4038,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { _dcDao.update(dc.getId(), dc); } - s_logger.debug("Private network " + privateNetwork + " is created"); + logger.debug("Private network " + privateNetwork + " is created"); return privateNetwork; } diff --git a/server/src/com/cloud/network/NetworkUsageManagerImpl.java b/server/src/com/cloud/network/NetworkUsageManagerImpl.java index fa4eb167d30..ebfeed19e68 100644 --- a/server/src/com/cloud/network/NetworkUsageManagerImpl.java +++ b/server/src/com/cloud/network/NetworkUsageManagerImpl.java @@ -29,7 +29,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.admin.usage.AddTrafficMonitorCmd; @@ -99,7 +98,6 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage TrafficSentinel; } - private static final org.apache.log4j.Logger s_logger = Logger.getLogger(NetworkUsageManagerImpl.class); @Inject HostDao _hostDao; @Inject @@ -152,7 +150,7 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage try { uri = new URI(cmd.getUrl()); } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); throw new InvalidParameterValueException(e.getMessage()); } @@ -278,11 +276,11 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage HostVO host = _hostDao.findById(agentId); if (host != null) { if ((host.getManagementServerId() == null) || (mgmtSrvrId != host.getManagementServerId())) { - s_logger.warn("Not the owner. Not collecting Direct Network usage from TrafficMonitor : " + agentId); + logger.warn("Not the owner. Not collecting Direct Network usage from TrafficMonitor : " + agentId); return false; } } else { - s_logger.warn("Agent not found. Not collecting Direct Network usage from TrafficMonitor : " + agentId); + logger.warn("Agent not found. Not collecting Direct Network usage from TrafficMonitor : " + agentId); return false; } @@ -302,12 +300,12 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage } private boolean collectDirectNetworkUsage(final HostVO host) { - s_logger.debug("Direct Network Usage stats collector is running..."); + logger.debug("Direct Network Usage stats collector is running..."); final long zoneId = host.getDataCenterId(); final DetailVO lastCollectDetail = _detailsDao.findDetail(host.getId(), "last_collection"); if (lastCollectDetail == null) { - s_logger.warn("Last collection time not available. Skipping direct usage collection for Traffic Monitor: " + host.getId()); + logger.warn("Last collection time not available. Skipping direct usage collection for Traffic Monitor: " + host.getId()); return false; } Date lastCollection = new Date(Long.parseLong(lastCollectDetail.getValue())); @@ -323,7 +321,7 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage final Date now = rightNow.getTime(); if (lastCollection.after(now)) { - s_logger.debug("Current time is less than 2 hours after last collection time : " + lastCollection.toString() + + logger.debug("Current time is less than 2 hours after last collection time : " + lastCollection.toString() + ". Skipping direct network usage collection"); return false; } @@ -382,7 +380,7 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "Unable to get network usage stats from " + host.getId() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); return false; } else { for (UsageIPAddressVO usageIp : fullDurationIpUsage) { @@ -391,11 +389,11 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage Long bytesSent = bytesSentRcvd[0]; Long bytesRcvd = bytesSentRcvd[1]; if (bytesSent == null || bytesRcvd == null) { - s_logger.debug("Incorrect bytes for IP: " + publicIp); + logger.debug("Incorrect bytes for IP: " + publicIp); continue; } if (bytesSent == 0L && bytesRcvd == 0L) { - s_logger.trace("Ignore zero bytes for IP: " + publicIp); + logger.trace("Ignore zero bytes for IP: " + publicIp); continue; } UserStatisticsVO stats = new UserStatisticsVO(usageIp.getAccountId(), zoneId, null, null, null, null); @@ -415,7 +413,7 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "Unable to get network usage stats from " + host.getId() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); return false; } else { String publicIp = usageIp.getAddress(); @@ -423,11 +421,11 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage Long bytesSent = bytesSentRcvd[0]; Long bytesRcvd = bytesSentRcvd[1]; if (bytesSent == null || bytesRcvd == null) { - s_logger.debug("Incorrect bytes for IP: " + publicIp); + logger.debug("Incorrect bytes for IP: " + publicIp); continue; } if (bytesSent == 0L && bytesRcvd == 0L) { - s_logger.trace("Ignore zero bytes for IP: " + publicIp); + logger.trace("Ignore zero bytes for IP: " + publicIp); continue; } UserStatisticsVO stats = new UserStatisticsVO(usageIp.getAccountId(), zoneId, null, null, null, null); @@ -439,7 +437,7 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage } if (collectedStats.size() == 0) { - s_logger.debug("No new direct network stats. No need to persist"); + logger.debug("No new direct network stats. No need to persist"); return false; } //Persist all the stats and last_collection time in a single transaction @@ -479,8 +477,8 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage @Override public boolean processDisconnect(long agentId, Status state) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Disconnected called on " + agentId + " with status " + state.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Disconnected called on " + agentId + " with status " + state.toString()); } return true; } @@ -489,12 +487,12 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) { if (cmd instanceof StartupTrafficMonitorCommand) { long agentId = agent.getId(); - s_logger.debug("Sending RecurringNetworkUsageCommand to " + agentId); + logger.debug("Sending RecurringNetworkUsageCommand to " + agentId); RecurringNetworkUsageCommand watch = new RecurringNetworkUsageCommand(_interval); try { _agentMgr.send(agentId, new Commands(watch), this); } catch (AgentUnavailableException e) { - s_logger.debug("Can not process connect for host " + agentId, e); + logger.debug("Can not process connect for host " + agentId, e); } } return; diff --git a/server/src/com/cloud/network/StorageNetworkManagerImpl.java b/server/src/com/cloud/network/StorageNetworkManagerImpl.java index 76a51d97d8e..d1553765a7a 100644 --- a/server/src/com/cloud/network/StorageNetworkManagerImpl.java +++ b/server/src/com/cloud/network/StorageNetworkManagerImpl.java @@ -24,7 +24,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.admin.network.CreateStorageNetworkIpRangeCmd; @@ -61,7 +60,6 @@ import com.cloud.vm.dao.SecondaryStorageVmDao; @Component @Local(value = {StorageNetworkManager.class, StorageNetworkService.class}) public class StorageNetworkManagerImpl extends ManagerBase implements StorageNetworkManager, StorageNetworkService { - private static final Logger s_logger = Logger.getLogger(StorageNetworkManagerImpl.class); @Inject StorageNetworkIpAddressDao _sNwIpDao; @@ -240,7 +238,7 @@ public class StorageNetworkManagerImpl extends ManagerBase implements StorageNet err.append("endIp=" + endIpFinal); err.append("netmask=" + netmask); err.append("zoneId=" + zoneId); - s_logger.debug(err.toString(), e); + logger.debug(err.toString(), e); throw e; } @@ -280,7 +278,7 @@ public class StorageNetworkManagerImpl extends ManagerBase implements StorageNet range = _sNwIpRangeDao.acquireInLockTable(rangeId); if (range == null) { String msg = "Unable to acquire lock on storage network ip range id=" + rangeId + ", delete failed"; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } /* @@ -332,7 +330,7 @@ public class StorageNetworkManagerImpl extends ManagerBase implements StorageNet r = _sNwIpRangeDao.acquireInLockTable(rangeId); if (r == null) { String msg = "Unable to acquire lock on storage network ip range id=" + rangeId + ", delete failed"; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } diff --git a/server/src/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/com/cloud/network/as/AutoScaleManagerImpl.java index 01c1486dfa6..d372f81ca90 100644 --- a/server/src/com/cloud/network/as/AutoScaleManagerImpl.java +++ b/server/src/com/cloud/network/as/AutoScaleManagerImpl.java @@ -30,7 +30,6 @@ import java.util.concurrent.TimeUnit; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.google.gson.Gson; import com.google.gson.reflect.TypeToken; @@ -127,7 +126,6 @@ import com.cloud.vm.UserVmService; @Local(value = {AutoScaleService.class, AutoScaleManager.class}) public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManager, AutoScaleService { - private static final Logger s_logger = Logger.getLogger(AutoScaleManagerImpl.class); private ScheduledExecutorService _executor = Executors.newScheduledThreadPool(1); @Inject @@ -382,7 +380,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale } profileVO = checkValidityAndPersist(profileVO); - s_logger.info("Successfully create AutoScale Vm Profile with Id: " + profileVO.getId()); + logger.info("Successfully create AutoScale Vm Profile with Id: " + profileVO.getId()); return profileVO; } @@ -433,7 +431,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale } vmProfile = checkValidityAndPersist(vmProfile); - s_logger.info("Updated Auto Scale Vm Profile id:" + vmProfile.getId()); + logger.info("Updated Auto Scale Vm Profile id:" + vmProfile.getId()); return vmProfile; } @@ -448,7 +446,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale } boolean success = _autoScaleVmProfileDao.remove(id); if (success) { - s_logger.info("Successfully deleted AutoScale Vm Profile with Id: " + id); + logger.info("Successfully deleted AutoScale Vm Profile with Id: " + id); } return success; } @@ -577,7 +575,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale AutoScalePolicyVO policyVO = new AutoScalePolicyVO(cmd.getDomainId(), cmd.getAccountId(), duration, quietTime, null, action); policyVO = checkValidityAndPersist(policyVO, cmd.getConditionIds()); - s_logger.info("Successfully created AutoScale Policy with Id: " + policyVO.getId()); + logger.info("Successfully created AutoScale Policy with Id: " + policyVO.getId()); return policyVO; } @@ -598,15 +596,15 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale boolean success = true; success = _autoScalePolicyDao.remove(id); if (!success) { - s_logger.warn("Failed to remove AutoScale Policy db object"); + logger.warn("Failed to remove AutoScale Policy db object"); return false; } success = _autoScalePolicyConditionMapDao.removeByAutoScalePolicyId(id); if (!success) { - s_logger.warn("Failed to remove AutoScale Policy Condition mappings"); + logger.warn("Failed to remove AutoScale Policy Condition mappings"); return false; } - s_logger.info("Successfully deleted autoscale policy id : " + id); + logger.info("Successfully deleted autoscale policy id : " + id); return success; } @@ -738,7 +736,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale for (AutoScaleVmGroupPolicyMapVO vmGroupPolicy : vmGroupPolicyList) { AutoScaleVmGroupVO vmGroupVO = _autoScaleVmGroupDao.findById(vmGroupPolicy.getVmGroupId()); if (vmGroupVO == null) { - s_logger.warn("Stale database entry! There is an entry in VmGroupPolicyMap but the vmGroup is missing:" + vmGroupPolicy.getVmGroupId()); + logger.warn("Stale database entry! There is an entry in VmGroupPolicyMap but the vmGroup is missing:" + vmGroupPolicy.getVmGroupId()); continue; @@ -752,7 +750,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale } policy = checkValidityAndPersist(policy, conditionIds); - s_logger.info("Successfully updated Auto Scale Policy id:" + policyId); + logger.info("Successfully updated Auto Scale Policy id:" + policyId); return policy; } @@ -789,7 +787,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale } vmGroupVO = checkValidityAndPersist(vmGroupVO, cmd.getScaleUpPolicyIds(), cmd.getScaleDownPolicyIds()); - s_logger.info("Successfully created Autoscale Vm Group with Id: " + vmGroupVO.getId()); + logger.info("Successfully created Autoscale Vm Group with Id: " + vmGroupVO.getId()); return vmGroupVO; } @@ -813,7 +811,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale } catch (ResourceUnavailableException re) { throw re; } catch (Exception e) { - s_logger.warn("Exception during configureLbAutoScaleVmGroup in lb rules manager", e); + logger.warn("Exception during configureLbAutoScaleVmGroup in lb rules manager", e); return false; } } @@ -844,7 +842,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale _autoScaleVmGroupDao.persist(autoScaleVmGroupVO); } finally { if (!success) { - s_logger.warn("Could not delete AutoScale Vm Group id : " + id); + logger.warn("Could not delete AutoScale Vm Group id : " + id); return false; } } @@ -855,17 +853,17 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale boolean success = _autoScaleVmGroupDao.remove(id); if (!success) { - s_logger.warn("Failed to remove AutoScale Group db object"); + logger.warn("Failed to remove AutoScale Group db object"); return false; } success = _autoScaleVmGroupPolicyMapDao.removeByGroupId(id); if (!success) { - s_logger.warn("Failed to remove AutoScale Group Policy mappings"); + logger.warn("Failed to remove AutoScale Group Policy mappings"); return false; } - s_logger.info("Successfully deleted autoscale vm group id : " + id); + logger.info("Successfully deleted autoscale vm group id : " + id); return success; // Successfull } }); @@ -1039,7 +1037,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale vmGroupVO = checkValidityAndPersist(vmGroupVO, scaleUpPolicyIds, scaleDownPolicyIds); if (vmGroupVO != null) { - s_logger.debug("Updated Auto Scale VmGroup id:" + vmGroupId); + logger.debug("Updated Auto Scale VmGroup id:" + vmGroupId); return vmGroupVO; } else return null; @@ -1064,10 +1062,10 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale _autoScaleVmGroupDao.persist(vmGroup); } finally { if (!success) { - s_logger.warn("Failed to enable AutoScale Vm Group id : " + id); + logger.warn("Failed to enable AutoScale Vm Group id : " + id); return null; } - s_logger.info("Successfully enabled AutoScale Vm Group with Id:" + id); + logger.info("Successfully enabled AutoScale Vm Group with Id:" + id); } return vmGroup; } @@ -1091,10 +1089,10 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale _autoScaleVmGroupDao.persist(vmGroup); } finally { if (!success) { - s_logger.warn("Failed to disable AutoScale Vm Group id : " + id); + logger.warn("Failed to disable AutoScale Vm Group id : " + id); return null; } - s_logger.info("Successfully disabled AutoScale Vm Group with Id:" + id); + logger.info("Successfully disabled AutoScale Vm Group with Id:" + id); } return vmGroup; } @@ -1115,7 +1113,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale CounterVO counter = null; - s_logger.debug("Adding Counter " + name); + logger.debug("Adding Counter " + name); counter = _counterDao.persist(new CounterVO(src, name, cmd.getValue())); CallContext.current().setEventDetails(" Id: " + counter.getId() + " Name: " + name); @@ -1146,7 +1144,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale ConditionVO condition = null; condition = _conditionDao.persist(new ConditionVO(cid, threshold, cmd.getEntityOwnerId(), cmd.getDomainId(), op)); - s_logger.info("Successfully created condition with Id: " + condition.getId()); + logger.info("Successfully created condition with Id: " + condition.getId()); CallContext.current().setEventDetails(" Id: " + condition.getId()); return condition; @@ -1215,13 +1213,13 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale ConditionVO condition = _conditionDao.findByCounterId(counterId); if (condition != null) { - s_logger.info("Cannot delete counter " + counter.getName() + " as it is being used in a condition."); + logger.info("Cannot delete counter " + counter.getName() + " as it is being used in a condition."); throw new ResourceInUseException("Counter is in use."); } boolean success = _counterDao.remove(counterId); if (success) { - s_logger.info("Successfully deleted counter with Id: " + counterId); + logger.info("Successfully deleted counter with Id: " + counterId); } return success; @@ -1238,12 +1236,12 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale // Verify if condition is used in any autoscale policy if (_autoScalePolicyConditionMapDao.isConditionInUse(conditionId)) { - s_logger.info("Cannot delete condition " + conditionId + " as it is being used in a condition."); + logger.info("Cannot delete condition " + conditionId + " as it is being used in a condition."); throw new ResourceInUseException("Cannot delete Condition when it is in use by one or more AutoScale Policies."); } boolean success = _conditionDao.remove(conditionId); if (success) { - s_logger.info("Successfully deleted condition " + condition.getId()); + logger.info("Successfully deleted condition " + condition.getId()); } return success; } @@ -1254,15 +1252,15 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale int count = 0; count = _autoScaleVmProfileDao.removeByAccountId(accountId); if (count > 0) { - s_logger.debug("Deleted " + count + " AutoScale Vm Profile for account Id: " + accountId); + logger.debug("Deleted " + count + " AutoScale Vm Profile for account Id: " + accountId); } count = _autoScalePolicyDao.removeByAccountId(accountId); if (count > 0) { - s_logger.debug("Deleted " + count + " AutoScale Policies for account Id: " + accountId); + logger.debug("Deleted " + count + " AutoScale Policies for account Id: " + accountId); } count = _conditionDao.removeByAccountId(accountId); if (count > 0) { - s_logger.debug("Deleted " + count + " Conditions for account Id: " + accountId); + logger.debug("Deleted " + count + " Conditions for account Id: " + accountId); } } @@ -1271,7 +1269,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale Integer currentVM = _autoScaleVmGroupVmMapDao.countByGroup(asGroup.getId()); Integer maxVm = asGroup.getMaxMembers(); if (currentVM + numVm > maxVm) { - s_logger.warn("number of VM will greater than the maximum in this group if scaling up, so do nothing more"); + logger.warn("number of VM will greater than the maximum in this group if scaling up, so do nothing more"); return false; } return true; @@ -1281,7 +1279,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale Integer currentVM = _autoScaleVmGroupVmMapDao.countByGroup(asGroup.getId()); Integer minVm = asGroup.getMinMembers(); if (currentVM - 1 < minVm) { - s_logger.warn("number of VM will less than the minimum in this group if scaling down, so do nothing more"); + logger.warn("number of VM will less than the minimum in this group if scaling down, so do nothing more"); return false; } return true; @@ -1349,17 +1347,17 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale return -1; } } catch (InsufficientCapacityException ex) { - s_logger.info(ex); - s_logger.trace(ex.getMessage(), ex); + logger.info(ex); + logger.trace(ex.getMessage(), ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ex.getMessage()); } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (ResourceAllocationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_ALLOCATION_ERROR, ex.getMessage()); } } @@ -1376,10 +1374,10 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale CallContext.current().setEventDetails("Vm Id: " + vmId); _userVmManager.startVirtualMachine(vmId, null, null, null); } catch (final ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (InsufficientCapacityException ex) { StringBuilder message = new StringBuilder(ex.getMessage()); @@ -1388,8 +1386,8 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale message.append(", Please check the affinity groups provided, there may not be sufficient capacity to follow them"); } } - s_logger.info(ex); - s_logger.info(message.toString(), ex); + logger.info(ex); + logger.info(message.toString(), ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, message.toString()); } return true; @@ -1404,7 +1402,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale for (LoadBalancerVMMapVO LbVmMapVo : LbVmMapVos) { long instanceId = LbVmMapVo.getInstanceId(); if (instanceId == vmId) { - s_logger.warn("the new VM is already mapped to LB rule. What's wrong?"); + logger.warn("the new VM is already mapped to LB rule. What's wrong?"); return true; } } @@ -1437,7 +1435,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale public void doScaleUp(long groupId, Integer numVm) { AutoScaleVmGroupVO asGroup = _autoScaleVmGroupDao.findById(groupId); if (asGroup == null) { - s_logger.error("Can not find the groupid " + groupId + " for scaling up"); + logger.error("Can not find the groupid " + groupId + " for scaling up"); return; } if (!checkConditionUp(asGroup, numVm)) { @@ -1446,7 +1444,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale for (int i = 0; i < numVm; i++) { long vmId = createNewVM(asGroup); if (vmId == -1) { - s_logger.error("Can not deploy new VM for scaling up in the group " + logger.error("Can not deploy new VM for scaling up in the group " + asGroup.getId() + ". Waiting for next round"); break; } @@ -1469,11 +1467,11 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale } } } else { - s_logger.error("Can not assign LB rule for this new VM"); + logger.error("Can not assign LB rule for this new VM"); break; } } else { - s_logger.error("Can not deploy new VM for scaling up in the group " + logger.error("Can not deploy new VM for scaling up in the group " + asGroup.getId() + ". Waiting for next round"); break; } @@ -1484,7 +1482,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale public void doScaleDown(final long groupId) { AutoScaleVmGroupVO asGroup = _autoScaleVmGroupDao.findById(groupId); if (asGroup == null) { - s_logger.error("Can not find the groupid " + groupId + " for scaling up"); + logger.error("Can not find the groupid " + groupId + " for scaling up"); return; } if (!checkConditionDown(asGroup)) { @@ -1527,7 +1525,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale }, destroyVmGracePeriod, TimeUnit.SECONDS); } } else { - s_logger.error("Can not remove LB rule for the VM being destroyed. Do nothing more."); + logger.error("Can not remove LB rule for the VM being destroyed. Do nothing more."); } } diff --git a/server/src/com/cloud/network/element/CloudZonesNetworkElement.java b/server/src/com/cloud/network/element/CloudZonesNetworkElement.java index 5dfc127c8a1..0346fd549fc 100644 --- a/server/src/com/cloud/network/element/CloudZonesNetworkElement.java +++ b/server/src/com/cloud/network/element/CloudZonesNetworkElement.java @@ -23,7 +23,6 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -62,7 +61,6 @@ import com.cloud.vm.dao.UserVmDao; @Local(value = NetworkElement.class) public class CloudZonesNetworkElement extends AdapterBase implements NetworkElement, UserDataServiceProvider { - private static final Logger s_logger = Logger.getLogger(CloudZonesNetworkElement.class); private static final Map> capabilities = setCapabilities(); @@ -227,15 +225,15 @@ public class CloudZonesNetworkElement extends AdapterBase implements NetworkElem try { _agentManager.send(dest.getHost().getId(), cmds); } catch (OperationTimedoutException e) { - s_logger.debug("Unable to send vm data command to host " + dest.getHost()); + logger.debug("Unable to send vm data command to host " + dest.getHost()); return false; } Answer dataAnswer = cmds.getAnswer("vmdata"); if (dataAnswer != null && dataAnswer.getResult()) { - s_logger.info("Sent vm data successfully to vm " + uservm.getInstanceName()); + logger.info("Sent vm data successfully to vm " + uservm.getInstanceName()); return true; } - s_logger.info("Failed to send vm data to vm " + uservm.getInstanceName()); + logger.info("Failed to send vm data to vm " + uservm.getInstanceName()); return false; } return false; diff --git a/server/src/com/cloud/network/element/HAProxyLBRule.java b/server/src/com/cloud/network/element/HAProxyLBRule.java new file mode 100644 index 00000000000..406ef270b7e --- /dev/null +++ b/server/src/com/cloud/network/element/HAProxyLBRule.java @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.element; + +import java.util.List; + +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.network.lb.LoadBalancingRule; +import com.cloud.network.lb.LoadBalancingRule.LbStickinessPolicy; +import com.cloud.network.rules.LbStickinessMethod.StickinessMethodType; +import com.cloud.utils.Pair; +import com.cloud.utils.net.NetUtils; + +@Component +public class HAProxyLBRule { + + private Logger logger = Logger.getLogger(getClass()); + + public boolean validateHAProxyLBRule(final LoadBalancingRule rule) { + final String timeEndChar = "dhms"; + + if (rule.getSourcePortStart() == NetUtils.HAPROXY_STATS_PORT) { + logger.debug("Can't create LB on port 8081, haproxy is listening for LB stats on this port"); + return false; + } + + for (final LbStickinessPolicy stickinessPolicy : rule.getStickinessPolicies()) { + final List> paramsList = stickinessPolicy.getParams(); + + if (StickinessMethodType.LBCookieBased.getName().equalsIgnoreCase(stickinessPolicy.getMethodName())) { + + } else if (StickinessMethodType.SourceBased.getName().equalsIgnoreCase(stickinessPolicy.getMethodName())) { + String tablesize = "200k"; // optional + String expire = "30m"; // optional + + /* overwrite default values with the stick parameters */ + for (final Pair paramKV : paramsList) { + final String key = paramKV.first(); + final String value = paramKV.second(); + if ("tablesize".equalsIgnoreCase(key)) { + tablesize = value; + } + if ("expire".equalsIgnoreCase(key)) { + expire = value; + } + } + if (expire != null && !containsOnlyNumbers(expire, timeEndChar)) { + throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: expire is not in timeformat: " + expire); + } + if (tablesize != null && !containsOnlyNumbers(tablesize, "kmg")) { + throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: tablesize is not in size format: " + tablesize); + + } + } else if (StickinessMethodType.AppCookieBased.getName().equalsIgnoreCase(stickinessPolicy.getMethodName())) { + String length = null; // optional + String holdTime = null; // optional + + for (final Pair paramKV : paramsList) { + final String key = paramKV.first(); + final String value = paramKV.second(); + if ("length".equalsIgnoreCase(key)) { + length = value; + } + if ("holdtime".equalsIgnoreCase(key)) { + holdTime = value; + } + } + + if (length != null && !containsOnlyNumbers(length, null)) { + throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: length is not a number: " + length); + } + if (holdTime != null && !containsOnlyNumbers(holdTime, timeEndChar) && !containsOnlyNumbers(holdTime, null)) { + throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: holdtime is not in timeformat: " + holdTime); + } + } + } + return true; + } + + /** + * This function detects numbers like 12 ,32h ,42m .. etc,. 1) plain number + * like 12 2) time or tablesize like 12h, 34m, 45k, 54m , here last + * character is non-digit but from known characters . + */ + private static boolean containsOnlyNumbers(final String str, final String endChar) { + if (str == null) { + return false; + } + + String number = str; + if (endChar != null) { + boolean matchedEndChar = false; + if (str.length() < 2) { + return false; // at least one numeric and one char. example: + } + // 3h + final char strEnd = str.toCharArray()[str.length() - 1]; + for (final char c : endChar.toCharArray()) { + if (strEnd == c) { + number = str.substring(0, str.length() - 1); + matchedEndChar = true; + break; + } + } + if (!matchedEndChar) { + return false; + } + } + try { + Integer.parseInt(number); + } catch (final NumberFormatException e) { + return false; + } + return true; + } + +} diff --git a/server/src/com/cloud/network/element/VirtualRouterElement.java b/server/src/com/cloud/network/element/VirtualRouterElement.java index 1a340fab407..feb4cc7e234 100644 --- a/server/src/com/cloud/network/element/VirtualRouterElement.java +++ b/server/src/com/cloud/network/element/VirtualRouterElement.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.command.admin.router.ListVirtualRouterElementsC import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.network.topology.NetworkTopology; import org.apache.cloudstack.network.topology.NetworkTopologyContext; -import org.apache.log4j.Logger; import org.cloud.network.router.deployment.RouterDeploymentDefinition; import org.cloud.network.router.deployment.RouterDeploymentDefinitionBuilder; @@ -73,7 +72,6 @@ import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.OvsProviderDao; import com.cloud.network.dao.VirtualRouterProviderDao; import com.cloud.network.lb.LoadBalancingRule; -import com.cloud.network.lb.LoadBalancingRule.LbStickinessPolicy; import com.cloud.network.lb.LoadBalancingRulesManager; import com.cloud.network.router.VirtualRouter; import com.cloud.network.router.VirtualRouter.Role; @@ -90,13 +88,11 @@ import com.cloud.offerings.NetworkOfferingVO; import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; -import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.net.NetUtils; import com.cloud.vm.DomainRouterVO; import com.cloud.vm.NicProfile; import com.cloud.vm.ReservationContext; @@ -114,7 +110,6 @@ import com.google.gson.Gson; public class VirtualRouterElement extends AdapterBase implements VirtualRouterElementService, DhcpServiceProvider, UserDataServiceProvider, SourceNatServiceProvider, StaticNatServiceProvider, FirewallServiceProvider, LoadBalancingServiceProvider, PortForwardingServiceProvider, RemoteAccessVPNServiceProvider, IpDeployer, NetworkMigrationResponder, AggregatedCommandExecutor { - private static final Logger s_logger = Logger.getLogger(VirtualRouterElement.class); public static final AutoScaleCounterType AutoScaleCounterCpu = new AutoScaleCounterType("cpu"); public static final AutoScaleCounterType AutoScaleCounterMemory = new AutoScaleCounterType("memory"); protected static final Map> capabilities = setCapabilities(); @@ -164,6 +159,9 @@ NetworkMigrationResponder, AggregatedCommandExecutor { @Inject protected RouterDeploymentDefinitionBuilder routerDeploymentDefinitionBuilder; + @Inject + private HAProxyLBRule haProxyLBRule; + protected boolean canHandle(final Network network, final Service service) { final Long physicalNetworkId = _networkMdl.getPhysicalNetworkId(network); if (physicalNetworkId == null) { @@ -180,12 +178,12 @@ NetworkMigrationResponder, AggregatedCommandExecutor { if (service == null) { if (!_networkMdl.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.trace("Element " + getProvider().getName() + " is not a provider for the network " + network); + logger.trace("Element " + getProvider().getName() + " is not a provider for the network " + network); return false; } } else { if (!_networkMdl.isProviderSupportServiceInNetwork(network.getId(), service, getProvider())) { - s_logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + " in the network " + network); + logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + " in the network " + network); return false; } } @@ -265,7 +263,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { if (canHandle(network, Service.Firewall)) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -291,104 +289,6 @@ NetworkMigrationResponder, AggregatedCommandExecutor { } } - /* - * This function detects numbers like 12 ,32h ,42m .. etc,. 1) plain number - * like 12 2) time or tablesize like 12h, 34m, 45k, 54m , here last - * character is non-digit but from known characters . - */ - private static boolean containsOnlyNumbers(final String str, final String endChar) { - if (str == null) { - return false; - } - - String number = str; - if (endChar != null) { - boolean matchedEndChar = false; - if (str.length() < 2) { - return false; // at least one numeric and one char. example: - } - // 3h - final char strEnd = str.toCharArray()[str.length() - 1]; - for (final char c : endChar.toCharArray()) { - if (strEnd == c) { - number = str.substring(0, str.length() - 1); - matchedEndChar = true; - break; - } - } - if (!matchedEndChar) { - return false; - } - } - try { - Integer.parseInt(number); - } catch (final NumberFormatException e) { - return false; - } - return true; - } - - public static boolean validateHAProxyLBRule(final LoadBalancingRule rule) { - final String timeEndChar = "dhms"; - - if (rule.getSourcePortStart() == NetUtils.HAPROXY_STATS_PORT) { - s_logger.debug("Can't create LB on port 8081, haproxy is listening for LB stats on this port"); - return false; - } - - for (final LbStickinessPolicy stickinessPolicy : rule.getStickinessPolicies()) { - final List> paramsList = stickinessPolicy.getParams(); - - if (StickinessMethodType.LBCookieBased.getName().equalsIgnoreCase(stickinessPolicy.getMethodName())) { - - } else if (StickinessMethodType.SourceBased.getName().equalsIgnoreCase(stickinessPolicy.getMethodName())) { - String tablesize = "200k"; // optional - String expire = "30m"; // optional - - /* overwrite default values with the stick parameters */ - for (final Pair paramKV : paramsList) { - final String key = paramKV.first(); - final String value = paramKV.second(); - if ("tablesize".equalsIgnoreCase(key)) { - tablesize = value; - } - if ("expire".equalsIgnoreCase(key)) { - expire = value; - } - } - if (expire != null && !containsOnlyNumbers(expire, timeEndChar)) { - throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: expire is not in timeformat: " + expire); - } - if (tablesize != null && !containsOnlyNumbers(tablesize, "kmg")) { - throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: tablesize is not in size format: " + tablesize); - - } - } else if (StickinessMethodType.AppCookieBased.getName().equalsIgnoreCase(stickinessPolicy.getMethodName())) { - String length = null; // optional - String holdTime = null; // optional - - for (final Pair paramKV : paramsList) { - final String key = paramKV.first(); - final String value = paramKV.second(); - if ("length".equalsIgnoreCase(key)) { - length = value; - } - if ("holdtime".equalsIgnoreCase(key)) { - holdTime = value; - } - } - - if (length != null && !containsOnlyNumbers(length, null)) { - throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: length is not a number: " + length); - } - if (holdTime != null && !containsOnlyNumbers(holdTime, timeEndChar) && !containsOnlyNumbers(holdTime, null)) { - throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: holdtime is not in timeformat: " + holdTime); - } - } - } - return true; - } - @Override public boolean validateLBRule(final Network network, final LoadBalancingRule rule) { final List rules = new ArrayList(); @@ -398,7 +298,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { if (routers == null || routers.isEmpty()) { return true; } - return validateHAProxyLBRule(rule); + return haProxyLBRule.validateHAProxyLBRule(rule); } return true; } @@ -412,7 +312,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to apply lb rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router elemnt doesn't need to apply lb rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -439,7 +339,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { if (canHandle(network, Service.Vpn)) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to apply vpn users on the backend; virtual router" + " doesn't exist in the network " + network.getId()); + logger.debug("Virtual router elemnt doesn't need to apply vpn users on the backend; virtual router" + " doesn't exist in the network " + network.getId()); return null; } @@ -448,7 +348,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { return networkTopology.applyVpnUsers(network, users, routers); } else { - s_logger.debug("Element " + getName() + " doesn't handle applyVpnUsers command"); + logger.debug("Element " + getName() + " doesn't handle applyVpnUsers command"); return null; } } @@ -463,12 +363,12 @@ NetworkMigrationResponder, AggregatedCommandExecutor { if (canHandle(network, Service.Vpn)) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need stop vpn on the backend; virtual router doesn't" + " exist in the network " + network.getId()); + logger.debug("Virtual router elemnt doesn't need stop vpn on the backend; virtual router doesn't" + " exist in the network " + network.getId()); return true; } return _routerMgr.startRemoteAccessVpn(network, vpn, routers); } else { - s_logger.debug("Element " + getName() + " doesn't handle createVpn command"); + logger.debug("Element " + getName() + " doesn't handle createVpn command"); return false; } } @@ -483,12 +383,12 @@ NetworkMigrationResponder, AggregatedCommandExecutor { if (canHandle(network, Service.Vpn)) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need stop vpn on the backend; virtual router doesn't " + "exist in the network " + network.getId()); + logger.debug("Virtual router elemnt doesn't need stop vpn on the backend; virtual router doesn't " + "exist in the network " + network.getId()); return true; } return _routerMgr.deleteRemoteAccessVpn(network, vpn, routers); } else { - s_logger.debug("Element " + getName() + " doesn't handle removeVpn command"); + logger.debug("Element " + getName() + " doesn't handle removeVpn command"); return false; } } @@ -505,7 +405,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { if (canHandle) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to associate ip addresses on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router elemnt doesn't need to associate ip addresses on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -662,7 +562,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { if (canHandle(network, Service.StaticNat)) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to apply static nat on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router elemnt doesn't need to apply static nat on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -686,11 +586,11 @@ NetworkMigrationResponder, AggregatedCommandExecutor { result = result && _routerMgr.stop(router, false, context.getCaller(), context.getAccount()) != null; if (cleanup) { if (!result) { - s_logger.warn("Failed to stop virtual router element " + router + ", but would try to process clean up anyway."); + logger.warn("Failed to stop virtual router element " + router + ", but would try to process clean up anyway."); } result = _routerMgr.destroyRouter(router.getId(), context.getAccount(), context.getCaller().getId()) != null; if (!result) { - s_logger.warn("Failed to clean up virtual router element " + router); + logger.warn("Failed to clean up virtual router element " + router); } } } @@ -722,7 +622,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { } final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Can't find virtual router element in network " + network.getId()); + logger.debug("Can't find virtual router element in network " + network.getId()); return true; } @@ -759,7 +659,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { } final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Can't find virtual router element in network " + network.getId()); + logger.debug("Can't find virtual router element in network " + network.getId()); return true; } @@ -778,7 +678,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { } final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Can't find virtual router element in network " + network.getId()); + logger.debug("Can't find virtual router element in network " + network.getId()); return true; } @@ -803,7 +703,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { public VirtualRouterProvider configure(final ConfigureVirtualRouterElementCmd cmd) { final VirtualRouterProviderVO element = _vrProviderDao.findById(cmd.getId()); if (element == null || !(element.getType() == Type.VirtualRouter || element.getType() == Type.VPCVirtualRouter)) { - s_logger.debug("Can't find Virtual Router element with network service provider id " + cmd.getId()); + logger.debug("Can't find Virtual Router element with network service provider id " + cmd.getId()); return null; } @@ -817,7 +717,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { public OvsProvider configure(final ConfigureOvsElementCmd cmd) { final OvsProviderVO element = _ovsProviderDao.findById(cmd.getId()); if (element == null) { - s_logger.debug("Can't find Ovs element with network service provider id " + cmd.getId()); + logger.debug("Can't find Ovs element with network service provider id " + cmd.getId()); return null; } @@ -834,7 +734,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { } VirtualRouterProviderVO element = _vrProviderDao.findByNspIdAndType(nspId, providerType); if (element != null) { - s_logger.debug("There is already a virtual router element with service provider id " + nspId); + logger.debug("There is already a virtual router element with service provider id " + nspId); return null; } element = new VirtualRouterProviderVO(nspId, providerType); @@ -847,7 +747,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { if (canHandle(network, Service.PortForwarding)) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -951,7 +851,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { try { return _routerMgr.removeDhcpSupportForSubnet(network, routers); } catch (final ResourceUnavailableException e) { - s_logger.debug("Router resource unavailable "); + logger.debug("Router resource unavailable "); } } return false; @@ -990,7 +890,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { } if (network.getIp6Gateway() != null) { - s_logger.info("Skip password and userdata service setup for IPv6 VM"); + logger.info("Skip password and userdata service setup for IPv6 VM"); return true; } @@ -1117,7 +1017,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { if (schemeCaps != null) { for (final LoadBalancingRule rule : rules) { if (!schemeCaps.contains(rule.getScheme().toString())) { - s_logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + getName()); + logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + getName()); return false; } } @@ -1141,7 +1041,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { try { networkTopology.setupDhcpForPvlan(false, router, router.getHostId(), nic); } catch (final ResourceUnavailableException e) { - s_logger.warn("Timed Out", e); + logger.warn("Timed Out", e); } } else if (vm.getType() == VirtualMachine.Type.User) { assert vm instanceof UserVmVO; @@ -1166,7 +1066,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { try { networkTopology.setupDhcpForPvlan(true, router, router.getHostId(), nic); } catch (final ResourceUnavailableException e) { - s_logger.warn("Timed Out", e); + logger.warn("Timed Out", e); } } else if (vm.getType() == VirtualMachine.Type.User) { assert vm instanceof UserVmVO; @@ -1190,7 +1090,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor { try { networkTopology.setupDhcpForPvlan(true, router, router.getHostId(), nic); } catch (final ResourceUnavailableException e) { - s_logger.warn("Timed Out", e); + logger.warn("Timed Out", e); } } else if (vm.getType() == VirtualMachine.Type.User) { assert vm instanceof UserVmVO; diff --git a/server/src/com/cloud/network/element/VpcVirtualRouterElement.java b/server/src/com/cloud/network/element/VpcVirtualRouterElement.java index 078eab3d885..9e5d81ba35a 100644 --- a/server/src/com/cloud/network/element/VpcVirtualRouterElement.java +++ b/server/src/com/cloud/network/element/VpcVirtualRouterElement.java @@ -25,7 +25,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.cloudstack.network.topology.NetworkTopology; -import org.apache.log4j.Logger; import org.cloud.network.router.deployment.RouterDeploymentDefinition; import org.cloud.network.router.deployment.RouterDeploymentDefinitionBuilder; import org.springframework.beans.factory.annotation.Autowired; @@ -83,7 +82,6 @@ import com.cloud.vm.VirtualMachineProfile; NetworkACLServiceProvider.class }) public class VpcVirtualRouterElement extends VirtualRouterElement implements VpcProvider, Site2SiteVpnServiceProvider, NetworkACLServiceProvider { - private static final Logger s_logger = Logger.getLogger(VpcVirtualRouterElement.class); private static final Map> capabilities = setCapabilities(); @@ -136,12 +134,12 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc if (service == null) { if (!_networkMdl.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.trace("Element " + getProvider().getName() + " is not a provider for the network " + network); + logger.trace("Element " + getProvider().getName() + " is not a provider for the network " + network); return false; } } else { if (!_networkMdl.isProviderSupportServiceInNetwork(network.getId(), service, getProvider())) { - s_logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + " in the network " + network); + logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + " in the network " + network); return false; } } @@ -184,13 +182,13 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final Long vpcId = network.getVpcId(); if (vpcId == null) { - s_logger.trace("Network " + network + " is not associated with any VPC"); + logger.trace("Network " + network + " is not associated with any VPC"); return false; } final Vpc vpc = _vpcMgr.getActiveVpc(vpcId); if (vpc == null) { - s_logger.warn("Unable to find Enabled VPC by id " + vpcId); + logger.warn("Unable to find Enabled VPC by id " + vpcId); return false; } @@ -219,7 +217,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc protected void configureGuestNetwork(final Network network, final List routers ) throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { - s_logger.info("Adding VPC routers to Guest Network: " + routers.size() + " to be added!"); + logger.info("Adding VPC routers to Guest Network: " + routers.size() + " to be added!"); for (final DomainRouterVO router : routers) { if (!_networkMdl.isVmPartOfNetwork(router.getId(), network.getId())) { @@ -228,9 +226,9 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc paramsForRouter.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true); } if (!_vpcRouterMgr.addVpcRouterToGuestNetwork(router, network, paramsForRouter)) { - s_logger.error("Failed to add VPC router " + router + " to guest network " + network); + logger.error("Failed to add VPC router " + router + " to guest network " + network); } else { - s_logger.debug("Successfully added VPC router " + router + " to guest network " + network); + logger.debug("Successfully added VPC router " + router + " to guest network " + network); } } } @@ -242,13 +240,13 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final Long vpcId = network.getVpcId(); if (vpcId == null) { - s_logger.trace("Network " + network + " is not associated with any VPC"); + logger.trace("Network " + network + " is not associated with any VPC"); return false; } final Vpc vpc = _vpcMgr.getActiveVpc(vpcId); if (vpc == null) { - s_logger.warn("Unable to find Enabled VPC by id " + vpcId); + logger.warn("Unable to find Enabled VPC by id " + vpcId); return false; } @@ -281,7 +279,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc boolean success = true; final Long vpcId = network.getVpcId(); if (vpcId == null) { - s_logger.debug("Network " + network + " doesn't belong to any vpc, so skipping unplug nic part"); + logger.debug("Network " + network + " doesn't belong to any vpc, so skipping unplug nic part"); return success; } @@ -289,15 +287,15 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc for (final VirtualRouter router : routers) { // 1) Check if router is already a part of the network if (!_networkMdl.isVmPartOfNetwork(router.getId(), network.getId())) { - s_logger.debug("Router " + router + " is not a part the network " + network); + logger.debug("Router " + router + " is not a part the network " + network); continue; } // 2) Call unplugNics in the network service success = success && _vpcRouterMgr.removeVpcRouterFromGuestNetwork(router, network); if (!success) { - s_logger.warn("Failed to unplug nic in network " + network + " for virtual router " + router); + logger.warn("Failed to unplug nic in network " + network + " for virtual router " + router); } else { - s_logger.debug("Successfully unplugged nic in network " + network + " for virtual router " + router); + logger.debug("Successfully unplugged nic in network " + network + " for virtual router " + router); } } @@ -309,7 +307,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc boolean success = true; final Long vpcId = config.getVpcId(); if (vpcId == null) { - s_logger.debug("Network " + config + " doesn't belong to any vpc, so skipping unplug nic part"); + logger.debug("Network " + config + " doesn't belong to any vpc, so skipping unplug nic part"); return success; } @@ -317,15 +315,15 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc for (final VirtualRouter router : routers) { // 1) Check if router is already a part of the network if (!_networkMdl.isVmPartOfNetwork(router.getId(), config.getId())) { - s_logger.debug("Router " + router + " is not a part the network " + config); + logger.debug("Router " + router + " is not a part the network " + config); continue; } // 2) Call unplugNics in the network service success = success && _vpcRouterMgr.removeVpcRouterFromGuestNetwork(router, config); if (!success) { - s_logger.warn("Failed to unplug nic in network " + config + " for virtual router " + router); + logger.warn("Failed to unplug nic in network " + config + " for virtual router " + router); } else { - s_logger.debug("Successfully unplugged nic in network " + config + " for virtual router " + router); + logger.debug("Successfully unplugged nic in network " + config + " for virtual router " + router); } } @@ -349,13 +347,13 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc //For the 2nd time it returns the VPC routers. final Long vpcId = network.getVpcId(); if (vpcId == null) { - s_logger.error("Network " + network + " is not associated with any VPC"); + logger.error("Network " + network + " is not associated with any VPC"); return routers; } final Vpc vpc = _vpcMgr.getActiveVpc(vpcId); if (vpc == null) { - s_logger.warn("Unable to find Enabled VPC by id " + vpcId); + logger.warn("Unable to find Enabled VPC by id " + vpcId); return routers; } @@ -369,11 +367,11 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc try { routers = routerDeploymentDefinition.deployVirtualRouter(); } catch (final ConcurrentOperationException e) { - s_logger.error("Error occurred when loading routers from routerDeploymentDefinition.deployVirtualRouter()!", e); + logger.error("Error occurred when loading routers from routerDeploymentDefinition.deployVirtualRouter()!", e); } catch (final InsufficientCapacityException e) { - s_logger.error("Error occurred when loading routers from routerDeploymentDefinition.deployVirtualRouter()!", e); + logger.error("Error occurred when loading routers from routerDeploymentDefinition.deployVirtualRouter()!", e); } catch (final ResourceUnavailableException e) { - s_logger.error("Error occurred when loading routers from routerDeploymentDefinition.deployVirtualRouter()!", e); + logger.error("Error occurred when loading routers from routerDeploymentDefinition.deployVirtualRouter()!", e); } return routers; @@ -413,17 +411,17 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc @Override public boolean createPrivateGateway(final PrivateGateway gateway) throws ConcurrentOperationException, ResourceUnavailableException { if (gateway.getType() != VpcGateway.Type.Private) { - s_logger.warn("Type of vpc gateway is not " + VpcGateway.Type.Private); + logger.warn("Type of vpc gateway is not " + VpcGateway.Type.Private); return false; } final List routers = _vpcRouterMgr.getVpcRouters(gateway.getVpcId()); if (routers == null || routers.isEmpty()) { - s_logger.debug(getName() + " element doesn't need to create Private gateway on the backend; VPC virtual " + "router doesn't exist in the vpc id=" + gateway.getVpcId()); + logger.debug(getName() + " element doesn't need to create Private gateway on the backend; VPC virtual " + "router doesn't exist in the vpc id=" + gateway.getVpcId()); return true; } - s_logger.info("Adding VPC routers to Guest Network: " + routers.size() + " to be added!"); + logger.info("Adding VPC routers to Guest Network: " + routers.size() + " to be added!"); final DataCenterVO dcVO = _dcDao.findById(gateway.getZoneId()); final NetworkTopology networkTopology = networkTopologyContext.retrieveNetworkTopology(dcVO); @@ -433,15 +431,15 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc try { final List rules = _networkACLItemDao.listByACL(gateway.getNetworkACLId()); if (!applyACLItemsToPrivateGw(gateway, rules)) { - s_logger.debug("Failed to apply network acl id " + gateway.getNetworkACLId() + " on gateway "); + logger.debug("Failed to apply network acl id " + gateway.getNetworkACLId() + " on gateway "); return false; } } catch (final Exception ex) { - s_logger.debug("Failed to apply network acl id " + gateway.getNetworkACLId() + " on gateway "); + logger.debug("Failed to apply network acl id " + gateway.getNetworkACLId() + " on gateway "); return false; } } else { - s_logger.debug("Failed to setup private gateway " + gateway); + logger.debug("Failed to setup private gateway " + gateway); return false; } } @@ -452,17 +450,17 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc @Override public boolean deletePrivateGateway(final PrivateGateway gateway) throws ConcurrentOperationException, ResourceUnavailableException { if (gateway.getType() != VpcGateway.Type.Private) { - s_logger.warn("Type of vpc gateway is not " + VpcGateway.Type.Private); + logger.warn("Type of vpc gateway is not " + VpcGateway.Type.Private); return false; } final List routers = _vpcRouterMgr.getVpcRouters(gateway.getVpcId()); if (routers == null || routers.isEmpty()) { - s_logger.debug(getName() + " element doesn't need to delete Private gateway on the backend; VPC virtual " + "router doesn't exist in the vpc id=" + gateway.getVpcId()); + logger.debug(getName() + " element doesn't need to delete Private gateway on the backend; VPC virtual " + "router doesn't exist in the vpc id=" + gateway.getVpcId()); return true; } - s_logger.info("Adding VPC routers to Guest Network: " + routers.size() + " to be added!"); + logger.info("Adding VPC routers to Guest Network: " + routers.size() + " to be added!"); int result = 0; for (final DomainRouterVO domainRouterVO : routers) { @@ -486,7 +484,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc if (canHandle) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug(getName() + " element doesn't need to associate ip addresses on the backend; VPC virtual " + "router doesn't exist in the network " + logger.debug(getName() + " element doesn't need to associate ip addresses on the backend; VPC virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -505,7 +503,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc if (canHandle(network, Service.NetworkACL)) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -519,7 +517,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc return true; } } catch (final Exception ex) { - s_logger.debug("Failed to apply network acl in network " + network.getId()); + logger.debug("Failed to apply network acl in network " + network.getId()); return false; } } else { @@ -536,7 +534,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc public boolean applyStaticRoutes(final Vpc vpc, final List routes) throws ResourceUnavailableException { final List routers = _routerDao.listByVpcId(vpc.getId()); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to static routes on the backend; virtual " + "router doesn't exist in the vpc " + vpc); + logger.debug("Virtual router elemnt doesn't need to static routes on the backend; virtual " + "router doesn't exist in the vpc " + vpc); return true; } @@ -546,7 +544,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc if (!networkTopology.applyStaticRoutes(routes, routers)) { throw new CloudRuntimeException("Failed to apply static routes in vpc " + vpc); } else { - s_logger.debug("Applied static routes on vpc " + vpc); + logger.debug("Applied static routes on vpc " + vpc); return true; } } @@ -558,7 +556,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final List routers = _vpcRouterMgr.getVpcRouters(gateway.getVpcId()); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router element doesn't need to apply network acl rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to apply network acl rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -579,7 +577,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final Map vpnCapabilities = capabilities.get(Service.Vpn); if (!vpnCapabilities.get(Capability.VpnTypes).contains("s2svpn")) { - s_logger.error("try to start site 2 site vpn on unsupported network element?"); + logger.error("try to start site 2 site vpn on unsupported network element?"); return false; } @@ -610,7 +608,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final Map vpnCapabilities = capabilities.get(Service.Vpn); if (!vpnCapabilities.get(Capability.VpnTypes).contains("s2svpn")) { - s_logger.error("try to stop site 2 site vpn on unsupported network element?"); + logger.error("try to stop site 2 site vpn on unsupported network element?"); return false; } @@ -643,7 +641,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final List routers = _vpcRouterMgr.getVpcRouters(vpn.getVpcId()); if (routers == null) { - s_logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpn.getVpcId()); + logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpn.getVpcId()); return null; } @@ -666,7 +664,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final List routers = _vpcRouterMgr.getVpcRouters(vpn.getVpcId()); if (routers == null) { - s_logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpn.getVpcId()); + logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpn.getVpcId()); return false; } @@ -685,7 +683,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final List routers = _vpcRouterMgr.getVpcRouters(vpn.getVpcId()); if (routers == null) { - s_logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpn.getVpcId()); + logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpn.getVpcId()); return false; } diff --git a/server/src/com/cloud/network/firewall/FirewallManagerImpl.java b/server/src/com/cloud/network/firewall/FirewallManagerImpl.java index b4e3bc3780d..12812676b90 100644 --- a/server/src/com/cloud/network/firewall/FirewallManagerImpl.java +++ b/server/src/com/cloud/network/firewall/FirewallManagerImpl.java @@ -27,7 +27,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.user.firewall.IListFirewallRulesCmd; @@ -100,7 +99,6 @@ import com.cloud.vm.dao.UserVmDao; @Component @Local(value = {FirewallService.class, FirewallManager.class}) public class FirewallManagerImpl extends ManagerBase implements FirewallService, FirewallManager, NetworkRuleApplier { - private static final Logger s_logger = Logger.getLogger(FirewallManagerImpl.class); @Inject FirewallRulesDao _firewallDao; @@ -156,7 +154,7 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, @Override public boolean start() { - s_logger.info("Firewall provider list is " + _firewallElements.iterator().next()); + logger.info("Firewall provider list is " + _firewallElements.iterator().next()); return super.start(); } @@ -437,8 +435,8 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("No network rule conflicts detected for " + newRule + " against " + (rules.size() - 1) + " existing rules"); + if (logger.isDebugEnabled()) { + logger.debug("No network rule conflicts detected for " + newRule + " against " + (rules.size() - 1) + " existing rules"); } } @@ -525,12 +523,12 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, public boolean applyRules(List rules, boolean continueOnError, boolean updateRulesInDB) throws ResourceUnavailableException { boolean success = true; if (rules == null || rules.size() == 0) { - s_logger.debug("There are no rules to forward to the network elements"); + logger.debug("There are no rules to forward to the network elements"); return true; } Purpose purpose = rules.get(0).getPurpose(); if (!_ipAddrMgr.applyRules(rules, purpose, this, continueOnError)) { - s_logger.warn("Rules are not completely applied"); + logger.warn("Rules are not completely applied"); return false; } else { if (updateRulesInDB) { @@ -538,7 +536,7 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, if (rule.getState() == FirewallRule.State.Revoke) { FirewallRuleVO relatedRule = _firewallDao.findByRelatedId(rule.getId()); if (relatedRule != null) { - s_logger.warn("Can't remove the firewall rule id=" + rule.getId() + " as it has related firewall rule id=" + relatedRule.getId() + + logger.warn("Can't remove the firewall rule id=" + rule.getId() + " as it has related firewall rule id=" + relatedRule.getId() + "; leaving it in Revoke state"); success = false; } else { @@ -605,7 +603,7 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, break;*/ default: assert (false) : "Unexpected fall through in applying rules to the network elements"; - s_logger.error("FirewallManager cannot process rules of type " + purpose); + logger.error("FirewallManager cannot process rules of type " + purpose); throw new CloudRuntimeException("FirewallManager cannot process rules of type " + purpose); } return handled; @@ -641,7 +639,7 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, public boolean applyFirewallRules(List rules, boolean continueOnError, Account caller) { if (rules.size() == 0) { - s_logger.debug("There are no firewall rules to apply"); + logger.debug("There are no firewall rules to apply"); return true; } @@ -659,7 +657,7 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to apply firewall rules due to : "+ ex.getMessage()); + logger.warn("Failed to apply firewall rules due to : "+ ex.getMessage()); return false; } @@ -669,7 +667,7 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, @Override public boolean applyDefaultEgressFirewallRule(Long networkId, boolean defaultPolicy, boolean add) throws ResourceUnavailableException { - s_logger.debug("applying default firewall egress rules "); + logger.debug("applying default firewall egress rules "); NetworkVO network = _networkDao.findById(networkId); List sourceCidr = new ArrayList(); @@ -688,7 +686,7 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to apply default egress rules for guest network due to ", ex); + logger.warn("Failed to apply default egress rules for guest network due to ", ex); return false; } return true; @@ -804,8 +802,8 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, boolean generateUsageEvent = false; if (rule.getState() == State.Staged) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found a rule that is still in stage state so just removing it: " + rule); + if (logger.isDebugEnabled()) { + logger.debug("Found a rule that is still in stage state so just removing it: " + rule); } removeRule(rule); generateUsageEvent = true; @@ -834,8 +832,8 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, List rules = new ArrayList(); List fwRules = _firewallDao.listByIpAndPurposeAndNotRevoked(ipId, Purpose.Firewall); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + fwRules.size() + " firewall rules for ip id=" + ipId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + fwRules.size() + " firewall rules for ip id=" + ipId); } for (FirewallRuleVO rule : fwRules) { @@ -851,8 +849,8 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, // Now we check again in case more rules have been inserted. rules.addAll(_firewallDao.listByIpAndPurposeAndNotRevoked(ipId, Purpose.Firewall)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully released firewall rules for ip id=" + ipId + " and # of rules now = " + rules.size()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully released firewall rules for ip id=" + ipId + " and # of rules now = " + rules.size()); } return rules.size() == 0; @@ -881,8 +879,8 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, List rules = new ArrayList(); List fwRules = _firewallDao.listByNetworkAndPurposeAndNotRevoked(networkId, Purpose.Firewall); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + fwRules.size() + " firewall rules for network id=" + networkId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + fwRules.size() + " firewall rules for network id=" + networkId); } for (FirewallRuleVO rule : fwRules) { @@ -898,8 +896,8 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, // Now we check again in case more rules have been inserted. rules.addAll(_firewallDao.listByNetworkAndPurposeAndNotRevoked(networkId, Purpose.Firewall)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully released firewall rules for network id=" + networkId + " and # of rules now = " + rules.size()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully released firewall rules for network id=" + networkId + " and # of rules now = " + rules.size()); } return success && rules.size() == 0; @@ -910,11 +908,11 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, FirewallRule fwRule = _firewallDao.findByRelatedId(ruleId); if (fwRule == null) { - s_logger.trace("No related firewall rule exists for rule id=" + ruleId + " so returning true here"); + logger.trace("No related firewall rule exists for rule id=" + ruleId + " so returning true here"); return true; } - s_logger.debug("Revoking Firewall rule id=" + fwRule.getId() + " as a part of rule delete id=" + ruleId + " with apply=" + apply); + logger.debug("Revoking Firewall rule id=" + fwRule.getId() + " as a part of rule delete id=" + ruleId + " with apply=" + apply); return revokeIngressFirewallRule(fwRule.getId(), apply); } @@ -950,10 +948,10 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, Set ipsToReprogram = new HashSet(); if (firewallRules.isEmpty()) { - s_logger.debug("No firewall rules are found for vm id=" + vmId); + logger.debug("No firewall rules are found for vm id=" + vmId); return true; } else { - s_logger.debug("Found " + firewallRules.size() + " to cleanup for vm id=" + vmId); + logger.debug("Found " + firewallRules.size() + " to cleanup for vm id=" + vmId); } for (FirewallRuleVO rule : firewallRules) { @@ -964,11 +962,11 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, // apply rules for all ip addresses for (Long ipId : ipsToReprogram) { - s_logger.debug("Applying firewall rules for ip address id=" + ipId + " as a part of vm expunge"); + logger.debug("Applying firewall rules for ip address id=" + ipId + " as a part of vm expunge"); try { success = success && applyIngressFirewallRules(ipId, _accountMgr.getSystemAccount()); } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to apply port forwarding rules for ip id=" + ipId); + logger.warn("Failed to apply port forwarding rules for ip id=" + ipId); success = false; } } @@ -988,7 +986,7 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, createFirewallRule(ip.getId(), acct, rule.getXid(), rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol(), rule.getSourceCidrList(), rule.getIcmpCode(), rule.getIcmpType(), rule.getRelated(), FirewallRuleType.System, rule.getNetworkId(), rule.getTrafficType(), true); } catch (Exception e) { - s_logger.debug("Failed to add system wide firewall rule, due to:" + e.toString()); + logger.debug("Failed to add system wide firewall rule, due to:" + e.toString()); } } return true; diff --git a/server/src/com/cloud/network/guru/ControlNetworkGuru.java b/server/src/com/cloud/network/guru/ControlNetworkGuru.java index 5e80bea0a17..0392e99ae1e 100644 --- a/server/src/com/cloud/network/guru/ControlNetworkGuru.java +++ b/server/src/com/cloud/network/guru/ControlNetworkGuru.java @@ -23,7 +23,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import com.cloud.configuration.Config; import com.cloud.dc.DataCenter; @@ -55,7 +54,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = {NetworkGuru.class}) public class ControlNetworkGuru extends PodBasedNetworkGuru implements NetworkGuru { - private static final Logger s_logger = Logger.getLogger(ControlNetworkGuru.class); @Inject DataCenterDao _dcDao; @Inject @@ -86,7 +84,7 @@ public class ControlNetworkGuru extends PodBasedNetworkGuru implements NetworkGu if (offering.isSystemOnly() && isMyTrafficType(offering.getTrafficType())) { return true; } else { - s_logger.trace("We only care about System only Control network"); + logger.trace("We only care about System only Control network"); return false; } } @@ -178,14 +176,14 @@ public class ControlNetworkGuru extends PodBasedNetworkGuru implements NetworkGu DataCenterVO dcVo = _dcDao.findById(dcId); if (dcVo.getNetworkType() != NetworkType.Basic) { super.release(nic, vm, reservationId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Released nic: " + nic); + if (logger.isDebugEnabled()) { + logger.debug("Released nic: " + nic); } return true; } else { nic.deallocate(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Released nic: " + nic); + if (logger.isDebugEnabled()) { + logger.debug("Released nic: " + nic); } return true; } @@ -194,8 +192,8 @@ public class ControlNetworkGuru extends PodBasedNetworkGuru implements NetworkGu _dcDao.releaseLinkLocalIpAddress(nic.getId(), reservationId); nic.deallocate(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Released nic: " + nic); + if (logger.isDebugEnabled()) { + logger.debug("Released nic: " + nic); } return true; @@ -233,7 +231,7 @@ public class ControlNetworkGuru extends PodBasedNetworkGuru implements NetworkGu _gateway = NetUtils.getLinkLocalGateway(); } - s_logger.info("Control network setup: cidr=" + _cidr + "; gateway = " + _gateway); + logger.info("Control network setup: cidr=" + _cidr + "; gateway = " + _gateway); return true; } diff --git a/server/src/com/cloud/network/guru/DirectNetworkGuru.java b/server/src/com/cloud/network/guru/DirectNetworkGuru.java index 9686f8087dd..237d03d5cad 100644 --- a/server/src/com/cloud/network/guru/DirectNetworkGuru.java +++ b/server/src/com/cloud/network/guru/DirectNetworkGuru.java @@ -22,7 +22,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter.NetworkType; @@ -74,7 +73,6 @@ import com.cloud.vm.dao.NicSecondaryIpDao; @Local(value = {NetworkGuru.class}) public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { - private static final Logger s_logger = Logger.getLogger(DirectNetworkGuru.class); @Inject DataCenterDao _dcDao; @@ -121,7 +119,7 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { if (dc.getNetworkType() == NetworkType.Advanced && isMyTrafficType(offering.getTrafficType()) && offering.getGuestType() == GuestType.Shared) { return true; } else { - s_logger.trace("We only take care of Guest networks of type " + GuestType.Shared); + logger.trace("We only take care of Guest networks of type " + GuestType.Shared); return false; } } @@ -253,7 +251,7 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { if (vm.getType() == VirtualMachine.Type.DomainRouter) { Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, null); if (placeholderNic == null) { - s_logger.debug("Saving placeholder nic with ip4 address " + nic.getIPv4Address() + " and ipv6 address " + nic.getIPv6Address() + + logger.debug("Saving placeholder nic with ip4 address " + nic.getIPv4Address() + " and ipv6 address " + nic.getIPv6Address() + " for the network " + network); _networkMgr.savePlaceholderNic(network, nic.getIPv4Address(), nic.getIPv6Address(), VirtualMachine.Type.DomainRouter); } @@ -282,8 +280,8 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { @Override @DB public void deallocate(final Network network, final NicProfile nic, VirtualMachineProfile vm) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); + if (logger.isDebugEnabled()) { + logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); } if (nic.getIPv4Address() != null) { @@ -295,14 +293,14 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { // if the ip address a part of placeholder, don't release it Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, null); if (placeholderNic != null && placeholderNic.getIPv4Address().equalsIgnoreCase(ip.getAddress().addr())) { - s_logger.debug("Not releasing direct ip " + ip.getId() + " yet as its ip is saved in the placeholder"); + logger.debug("Not releasing direct ip " + ip.getId() + " yet as its ip is saved in the placeholder"); } else { _ipAddrMgr.markIpAsUnavailable(ip.getId()); _ipAddressDao.unassignIpAddress(ip.getId()); } //unassign nic secondary ip address - s_logger.debug("remove nic " + nic.getId() + " secondary ip "); + logger.debug("remove nic " + nic.getId() + " secondary ip "); List nicSecIps = null; nicSecIps = _nicSecondaryIpDao.getSecondaryIpAddressesForNic(nic.getId()); for (String secIp : nicSecIps) { @@ -338,12 +336,12 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { public void doInTransactionWithoutResult(TransactionStatus status) { for (Nic nic : nics) { if (nic.getIPv4Address() != null) { - s_logger.debug("Releasing ip " + nic.getIPv4Address() + " of placeholder nic " + nic); + logger.debug("Releasing ip " + nic.getIPv4Address() + " of placeholder nic " + nic); IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIPv4Address()); if (ip != null) { _ipAddrMgr.markIpAsUnavailable(ip.getId()); _ipAddressDao.unassignIpAddress(ip.getId()); - s_logger.debug("Removing placeholder nic " + nic); + logger.debug("Removing placeholder nic " + nic); _nicDao.remove(nic.getId()); } } @@ -353,7 +351,7 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { } return true; }catch (Exception e) { - s_logger.error("trash. Exception:" + e.getMessage()); + logger.error("trash. Exception:" + e.getMessage()); throw new CloudRuntimeException("trash. Exception:" + e.getMessage(),e); } } diff --git a/server/src/com/cloud/network/guru/DirectPodBasedNetworkGuru.java b/server/src/com/cloud/network/guru/DirectPodBasedNetworkGuru.java index 26c80151add..71f7374a868 100644 --- a/server/src/com/cloud/network/guru/DirectPodBasedNetworkGuru.java +++ b/server/src/com/cloud/network/guru/DirectPodBasedNetworkGuru.java @@ -22,7 +22,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; import com.cloud.configuration.ZoneConfig; import com.cloud.dc.DataCenter; @@ -64,7 +63,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = NetworkGuru.class) public class DirectPodBasedNetworkGuru extends DirectNetworkGuru { - private static final Logger s_logger = Logger.getLogger(DirectPodBasedNetworkGuru.class); @Inject DataCenterDao _dcDao; @@ -87,7 +85,7 @@ public class DirectPodBasedNetworkGuru extends DirectNetworkGuru { if (dc.getNetworkType() == NetworkType.Basic && isMyTrafficType(offering.getTrafficType())) { return true; } else { - s_logger.trace("We only take care of Guest Direct Pod based networks"); + logger.trace("We only take care of Guest Direct Pod based networks"); return false; } } @@ -184,7 +182,7 @@ public class DirectPodBasedNetworkGuru extends DirectNetworkGuru { if (placeholderNic != null) { IPAddressVO userIp = _ipAddressDao.findByIpAndSourceNetworkId(network.getId(), placeholderNic.getIPv4Address()); ip = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); - s_logger.debug("Nic got an ip address " + placeholderNic.getIPv4Address() + " stored in placeholder nic for the network " + network + + logger.debug("Nic got an ip address " + placeholderNic.getIPv4Address() + " stored in placeholder nic for the network " + network + " and gateway " + podRangeGateway); } } @@ -209,7 +207,7 @@ public class DirectPodBasedNetworkGuru extends DirectNetworkGuru { if (vm.getType() == VirtualMachine.Type.DomainRouter) { Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, pod.getId()); if (placeholderNic == null) { - s_logger.debug("Saving placeholder nic with ip4 address " + nic.getIPv4Address() + " for the network " + network); + logger.debug("Saving placeholder nic with ip4 address " + nic.getIPv4Address() + " for the network " + network); _networkMgr.savePlaceholderNic(network, nic.getIPv4Address(), null, VirtualMachine.Type.DomainRouter); } } diff --git a/server/src/com/cloud/network/guru/ExternalGuestNetworkGuru.java b/server/src/com/cloud/network/guru/ExternalGuestNetworkGuru.java index b1d123669cc..f0739ca9e90 100644 --- a/server/src/com/cloud/network/guru/ExternalGuestNetworkGuru.java +++ b/server/src/com/cloud/network/guru/ExternalGuestNetworkGuru.java @@ -23,7 +23,6 @@ import javax.inject.Inject; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter.NetworkType; @@ -67,7 +66,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = NetworkGuru.class) public class ExternalGuestNetworkGuru extends GuestNetworkGuru { - private static final Logger s_logger = Logger.getLogger(ExternalGuestNetworkGuru.class); @Inject NetworkOrchestrationService _networkMgr; @Inject @@ -98,7 +96,7 @@ public class ExternalGuestNetworkGuru extends GuestNetworkGuru { isMyIsolationMethod(physicalNetwork) && !offering.isSystemOnly()) { return true; } else { - s_logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); + logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); return false; } } diff --git a/server/src/com/cloud/network/guru/GuestNetworkGuru.java b/server/src/com/cloud/network/guru/GuestNetworkGuru.java index 92613c47482..4929335bd43 100644 --- a/server/src/com/cloud/network/guru/GuestNetworkGuru.java +++ b/server/src/com/cloud/network/guru/GuestNetworkGuru.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationSe import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import com.cloud.configuration.Config; import com.cloud.dc.DataCenter; @@ -84,7 +83,6 @@ import com.cloud.vm.dao.NicDao; @Local(value = NetworkGuru.class) public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGuru, Configurable { - private static final Logger s_logger = Logger.getLogger(GuestNetworkGuru.class); @Inject protected VpcDao _vpcDao; @@ -161,7 +159,7 @@ public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGur } if (methods.isEmpty()) { // The empty isolation method is assumed to be VLAN - s_logger.debug("Empty physical isolation type for physical network " + physicalNetwork.getUuid()); + logger.debug("Empty physical isolation type for physical network " + physicalNetwork.getUuid()); methods = new ArrayList(1); methods.add("VLAN".toLowerCase()); } @@ -233,8 +231,8 @@ public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGur @DB public void deallocate(final Network network, final NicProfile nic, final VirtualMachineProfile vm) { if (network.getSpecifyIpRanges()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); + if (logger.isDebugEnabled()) { + logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); } final IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIPv4Address()); @@ -430,7 +428,7 @@ public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGur } if ((profile.getBroadcastDomainType() == BroadcastDomainType.Vlan || profile.getBroadcastDomainType() == BroadcastDomainType.Vxlan) && !offering.getSpecifyVlan()) { - s_logger.debug("Releasing vnet for the network id=" + profile.getId()); + logger.debug("Releasing vnet for the network id=" + profile.getId()); _dcDao.releaseVnet(BroadcastDomainType.getValue(profile.getBroadcastUri()), profile.getDataCenterId(), profile.getPhysicalNetworkId(), profile.getAccountId(), profile.getReservationId()); ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), profile.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_ZONE_VLAN_RELEASE, diff --git a/server/src/com/cloud/network/guru/PodBasedNetworkGuru.java b/server/src/com/cloud/network/guru/PodBasedNetworkGuru.java index 2470ea7c67b..249cea568a2 100644 --- a/server/src/com/cloud/network/guru/PodBasedNetworkGuru.java +++ b/server/src/com/cloud/network/guru/PodBasedNetworkGuru.java @@ -21,7 +21,6 @@ import java.util.Random; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.dc.Pod; import com.cloud.dc.dao.DataCenterDao; @@ -50,7 +49,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = {NetworkGuru.class}) public class PodBasedNetworkGuru extends AdapterBase implements NetworkGuru { - private static final Logger s_logger = Logger.getLogger(PodBasedNetworkGuru.class); @Inject DataCenterDao _dcDao; @Inject @@ -134,7 +132,7 @@ public class PodBasedNetworkGuru extends AdapterBase implements NetworkGuru { nic.setBroadcastUri(null); nic.setIsolationUri(null); - s_logger.debug("Allocated a nic " + nic + " for " + vm); + logger.debug("Allocated a nic " + nic + " for " + vm); } @Override @@ -151,8 +149,8 @@ public class PodBasedNetworkGuru extends AdapterBase implements NetworkGuru { nic.deallocate(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Released nic: " + nic); + if (logger.isDebugEnabled()) { + logger.debug("Released nic: " + nic); } return true; diff --git a/server/src/com/cloud/network/guru/PrivateNetworkGuru.java b/server/src/com/cloud/network/guru/PrivateNetworkGuru.java index 340c8b12524..5aaf15511a8 100644 --- a/server/src/com/cloud/network/guru/PrivateNetworkGuru.java +++ b/server/src/com/cloud/network/guru/PrivateNetworkGuru.java @@ -19,7 +19,6 @@ package com.cloud.network.guru; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.configuration.ConfigurationManager; import com.cloud.dc.DataCenter; @@ -56,7 +55,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = NetworkGuru.class) public class PrivateNetworkGuru extends AdapterBase implements NetworkGuru { - private static final Logger s_logger = Logger.getLogger(PrivateNetworkGuru.class); @Inject protected ConfigurationManager _configMgr; @Inject @@ -93,7 +91,7 @@ public class PrivateNetworkGuru extends AdapterBase implements NetworkGuru { offering.isSystemOnly()) { return true; } else { - s_logger.trace("We only take care of system Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); + logger.trace("We only take care of system Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); return false; } } @@ -140,8 +138,8 @@ public class PrivateNetworkGuru extends AdapterBase implements NetworkGuru { @Override public void deallocate(Network network, NicProfile nic, VirtualMachineProfile vm) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); + if (logger.isDebugEnabled()) { + logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); } PrivateIpVO ip = _privateIpDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIPv4Address()); diff --git a/server/src/com/cloud/network/guru/PublicNetworkGuru.java b/server/src/com/cloud/network/guru/PublicNetworkGuru.java index 9f855ea116d..219ba9a51c9 100644 --- a/server/src/com/cloud/network/guru/PublicNetworkGuru.java +++ b/server/src/com/cloud/network/guru/PublicNetworkGuru.java @@ -20,7 +20,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.dc.Vlan.VlanType; @@ -60,7 +59,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = {NetworkGuru.class}) public class PublicNetworkGuru extends AdapterBase implements NetworkGuru { - private static final Logger s_logger = Logger.getLogger(PublicNetworkGuru.class); @Inject DataCenterDao _dcDao; @@ -197,8 +195,8 @@ public class PublicNetworkGuru extends AdapterBase implements NetworkGuru { @Override @DB public void deallocate(Network network, NicProfile nic, VirtualMachineProfile vm) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("public network deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); + if (logger.isDebugEnabled()) { + logger.debug("public network deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); } final IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIPv4Address()); @@ -213,8 +211,8 @@ public class PublicNetworkGuru extends AdapterBase implements NetworkGuru { } nic.deallocate(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deallocated nic: " + nic); + if (logger.isDebugEnabled()) { + logger.debug("Deallocated nic: " + nic); } } diff --git a/server/src/com/cloud/network/guru/StorageNetworkGuru.java b/server/src/com/cloud/network/guru/StorageNetworkGuru.java index bf21cd77160..5f34a363edd 100644 --- a/server/src/com/cloud/network/guru/StorageNetworkGuru.java +++ b/server/src/com/cloud/network/guru/StorageNetworkGuru.java @@ -19,7 +19,6 @@ package com.cloud.network.guru; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.dc.Pod; import com.cloud.dc.StorageNetworkIpAddressVO; @@ -46,7 +45,6 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value = NetworkGuru.class) public class StorageNetworkGuru extends PodBasedNetworkGuru implements NetworkGuru { - private static final Logger s_logger = Logger.getLogger(StorageNetworkGuru.class); @Inject StorageNetworkManager _sNwMgr; @Inject @@ -77,7 +75,7 @@ public class StorageNetworkGuru extends PodBasedNetworkGuru implements NetworkGu if (isMyTrafficType(offering.getTrafficType()) && offering.isSystemOnly()) { return true; } else { - s_logger.trace("It's not storage network offering, skip it."); + logger.trace("It's not storage network offering, skip it."); return false; } } @@ -144,7 +142,7 @@ public class StorageNetworkGuru extends PodBasedNetworkGuru implements NetworkGu nic.setBroadcastUri(null); } nic.setIsolationUri(null); - s_logger.debug("Allocated a storage nic " + nic + " for " + vm); + logger.debug("Allocated a storage nic " + nic + " for " + vm); } @Override @@ -155,7 +153,7 @@ public class StorageNetworkGuru extends PodBasedNetworkGuru implements NetworkGu } _sNwMgr.releaseIpAddress(nic.getIPv4Address()); - s_logger.debug("Release an storage ip " + nic.getIPv4Address()); + logger.debug("Release an storage ip " + nic.getIPv4Address()); nic.deallocate(); return true; } diff --git a/server/src/com/cloud/network/lb/LBHealthCheckManagerImpl.java b/server/src/com/cloud/network/lb/LBHealthCheckManagerImpl.java index 84ac061b40c..3e208599235 100644 --- a/server/src/com/cloud/network/lb/LBHealthCheckManagerImpl.java +++ b/server/src/com/cloud/network/lb/LBHealthCheckManagerImpl.java @@ -27,7 +27,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -44,7 +43,6 @@ import com.cloud.utils.concurrency.NamedThreadFactory; @Component @Local(value = {LBHealthCheckManager.class}) public class LBHealthCheckManagerImpl extends ManagerBase implements LBHealthCheckManager, Manager { - private static final Logger s_logger = Logger.getLogger(LBHealthCheckManagerImpl.class); @Inject ConfigurationDao _configDao; @@ -60,8 +58,8 @@ public class LBHealthCheckManagerImpl extends ManagerBase implements LBHealthChe @Override public boolean configure(String name, Map params) throws ConfigurationException { _configs = _configDao.getConfiguration("management-server", params); - if (s_logger.isInfoEnabled()) { - s_logger.info(format("Configuring LBHealthCheck Manager %1$s", name)); + if (logger.isInfoEnabled()) { + logger.info(format("Configuring LBHealthCheck Manager %1$s", name)); } this.name = name; _interval = NumbersUtil.parseLong(_configs.get(Config.LBHealthCheck.key()), 600); @@ -71,14 +69,14 @@ public class LBHealthCheckManagerImpl extends ManagerBase implements LBHealthChe @Override public boolean start() { - s_logger.debug("LB HealthCheckmanager is getting Started"); + logger.debug("LB HealthCheckmanager is getting Started"); _executor.scheduleAtFixedRate(new UpdateLBHealthCheck(), 10, _interval, TimeUnit.SECONDS); return true; } @Override public boolean stop() { - s_logger.debug("HealthCheckmanager is getting Stopped"); + logger.debug("HealthCheckmanager is getting Stopped"); _executor.shutdown(); return true; } @@ -95,7 +93,7 @@ public class LBHealthCheckManagerImpl extends ManagerBase implements LBHealthChe updateLBHealthCheck(Scheme.Public); updateLBHealthCheck(Scheme.Internal); } catch (Exception e) { - s_logger.error("Exception in LB HealthCheck Update Checker", e); + logger.error("Exception in LB HealthCheck Update Checker", e); } } } @@ -105,9 +103,9 @@ public class LBHealthCheckManagerImpl extends ManagerBase implements LBHealthChe try { _lbService.updateLBHealthChecks(scheme); } catch (ResourceUnavailableException e) { - s_logger.debug("Error while updating the LB HealtCheck ", e); + logger.debug("Error while updating the LB HealtCheck ", e); } - s_logger.debug("LB HealthCheck Manager is running and getting the updates from LB providers and updating service status"); + logger.debug("LB HealthCheck Manager is running and getting the updates from LB providers and updating service status"); } } diff --git a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java index c03af2d3fad..5ae741d2ecb 100644 --- a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java +++ b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java @@ -45,7 +45,6 @@ import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationSe import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.lb.ApplicationLoadBalancerRuleVO; import org.apache.cloudstack.lb.dao.ApplicationLoadBalancerRuleDao; -import org.apache.log4j.Logger; import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.configuration.ConfigurationManager; @@ -170,7 +169,6 @@ import com.google.gson.reflect.TypeToken; @Local(value = {LoadBalancingRulesManager.class, LoadBalancingRulesService.class}) public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancingRulesManager, LoadBalancingRulesService { - private static final Logger s_logger = Logger.getLogger(LoadBalancingRulesManagerImpl.class); @Inject NetworkOrchestrationService _networkMgr; @@ -318,7 +316,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements DataCenter zone = _entityMgr.findById(DataCenter.class, vmGroup.getZoneId()); if (zone == null) { // This should never happen, but still a cautious check - s_logger.warn("Unable to find zone while packaging AutoScale Vm Group, zoneid: " + vmGroup.getZoneId()); + logger.warn("Unable to find zone while packaging AutoScale Vm Group, zoneid: " + vmGroup.getZoneId()); throw new InvalidParameterValueException("Unable to find zone"); } else { if (zone.getNetworkType() == NetworkType.Advanced) { @@ -364,7 +362,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements List rules = Arrays.asList(rule); if (!applyLbRules(rules, false)) { - s_logger.debug("LB rules' autoscale config are not completely applied"); + logger.debug("LB rules' autoscale config are not completely applied"); return false; } @@ -403,16 +401,16 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements try { success = applyAutoScaleConfig(loadBalancer, vmGroup, currentState); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to configure AutoScaleVmGroup to the lb rule: " + loadBalancer.getId() + " because resource is unavaliable:", e); + logger.warn("Unable to configure AutoScaleVmGroup to the lb rule: " + loadBalancer.getId() + " because resource is unavaliable:", e); if (isRollBackAllowedForProvider(loadBalancer)) { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating AutoscaleVmGroup"); + logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating AutoscaleVmGroup"); } throw e; } finally { if (!success) { - s_logger.warn("Failed to configure LB Auto Scale Vm Group with Id:" + vmGroupid); + logger.warn("Failed to configure LB Auto Scale Vm Group with Id:" + vmGroupid); } } @@ -422,15 +420,15 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Override public void doInTransactionWithoutResult(TransactionStatus status) { loadBalancer.setState(FirewallRule.State.Active); - s_logger.debug("LB rule " + loadBalancer.getId() + " state is set to Active"); + logger.debug("LB rule " + loadBalancer.getId() + " state is set to Active"); _lbDao.persist(loadBalancer); vmGroup.setState(AutoScaleVmGroup.State_Enabled); _autoScaleVmGroupDao.persist(vmGroup); - s_logger.debug("LB Auto Scale Vm Group with Id: " + vmGroupid + " is set to Enabled state."); + logger.debug("LB Auto Scale Vm Group with Id: " + vmGroupid + " is set to Enabled state."); } }); } - s_logger.info("Successfully configured LB Autoscale Vm Group with Id: " + vmGroupid); + logger.info("Successfully configured LB Autoscale Vm Group with Id: " + vmGroupid); } return success; } @@ -637,7 +635,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements Network network = _networkDao.findById(lbRule.getNetworkId()); Purpose purpose = lbRule.getPurpose(); if (purpose != Purpose.LoadBalancing) { - s_logger.debug("Unable to validate network rules for purpose: " + purpose.toString()); + logger.debug("Unable to validate network rules for purpose: " + purpose.toString()); return false; } for (LoadBalancingServiceProvider ne : _lbProviders) { @@ -675,12 +673,12 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements try { applyLoadBalancerConfig(cmd.getLbRuleId()); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to apply Stickiness policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavaliable:", e); + logger.warn("Unable to apply Stickiness policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavaliable:", e); if (isRollBackAllowedForProvider(loadBalancer)) { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); deleteLBStickinessPolicy(cmd.getEntityId(), false); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating sticky policy"); + logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating sticky policy"); } else { deleteLBStickinessPolicy(cmd.getEntityId(), false); if (oldStickinessPolicyId != 0) { @@ -691,7 +689,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (backupState.equals(FirewallRule.State.Active)) applyLoadBalancerConfig(cmd.getLbRuleId()); } catch (ResourceUnavailableException e1) { - s_logger.info("[ignored] applying load balancer config.", e1); + logger.info("[ignored] applying load balancer config.", e1); } finally { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); @@ -720,11 +718,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements try { applyLoadBalancerConfig(cmd.getLbRuleId()); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to apply healthcheck policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavaliable:", e); + logger.warn("Unable to apply healthcheck policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavaliable:", e); if (isRollBackAllowedForProvider(loadBalancer)) { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating healthcheck policy"); + logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating healthcheck policy"); } deleteLBHealthCheckPolicy(cmd.getEntityId(), false); success = false; @@ -760,11 +758,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements boolean backupStickyState = stickinessPolicy.isRevoke(); stickinessPolicy.setRevoke(true); _lb2stickinesspoliciesDao.persist(stickinessPolicy); - s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", stickinesspolicyID " + stickinessPolicyId); + logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", stickinesspolicyID " + stickinessPolicyId); try { if (!applyLoadBalancerConfig(loadBalancerId)) { - s_logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for stickinesspolicyID " + stickinessPolicyId); + logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for stickinesspolicyID " + stickinessPolicyId); throw new CloudRuntimeException("Failed to remove load balancer rule id " + loadBalancerId + " for stickinesspolicyID " + stickinessPolicyId); } } catch (ResourceUnavailableException e) { @@ -773,9 +771,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _lb2stickinesspoliciesDao.persist(stickinessPolicy); loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting sticky policy: " + stickinessPolicyId); + logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting sticky policy: " + stickinessPolicyId); } - s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); + logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); success = false; } } else { @@ -813,7 +811,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements boolean backupStickyState = healthCheckPolicy.isRevoke(); healthCheckPolicy.setRevoke(true); _lb2healthcheckDao.persist(healthCheckPolicy); - s_logger.debug("Set health check policy to revoke for loadbalancing rule id : " + loadBalancerId + ", healthCheckpolicyID " + healthCheckPolicyId); + logger.debug("Set health check policy to revoke for loadbalancing rule id : " + loadBalancerId + ", healthCheckpolicyID " + healthCheckPolicyId); // removing the state of services set by the monitor. final List maps = _lb2VmMapDao.listByLoadBalancerId(loadBalancerId); @@ -821,7 +819,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - s_logger.debug("Resetting health state policy for services in loadbalancing rule id : " + loadBalancerId); + logger.debug("Resetting health state policy for services in loadbalancing rule id : " + loadBalancerId); for (LoadBalancerVMMapVO map : maps) { map.setState(null); _lb2VmMapDao.persist(map); @@ -832,7 +830,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements try { if (!applyLoadBalancerConfig(loadBalancerId)) { - s_logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for healthCheckpolicyID " + healthCheckPolicyId); + logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for healthCheckpolicyID " + healthCheckPolicyId); throw new CloudRuntimeException("Failed to remove load balancer rule id " + loadBalancerId + " for healthCheckpolicyID " + healthCheckPolicyId); } } catch (ResourceUnavailableException e) { @@ -841,9 +839,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _lb2healthcheckDao.persist(healthCheckPolicy); loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting healthcheck policy: " + healthCheckPolicyId); + logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting healthcheck policy: " + healthCheckPolicyId); } - s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); + logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); success = false; } } else { @@ -867,7 +865,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (capability != null && capability.equalsIgnoreCase("true")) { /* - * s_logger.debug( + * logger.debug( * "HealthCheck Manager :: LB Provider in the Network has the Healthcheck policy capability :: " * + provider.get(0).getName()); */ @@ -903,7 +901,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (dstIp.equalsIgnoreCase(lbto.getDestinations()[i].getDestIp())) { lbVmMap.setState(des.getMonitorState()); _lb2VmMapDao.persist(lbVmMap); - s_logger.debug("Updating the LB VM Map table with the service state"); + logger.debug("Updating the LB VM Map table with the service state"); } } } @@ -917,7 +915,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } } } else { - // s_logger.debug("HealthCheck Manager :: LB Provider in the Network DNOT the Healthcheck policy capability "); + // logger.debug("HealthCheck Manager :: LB Provider in the Network DNOT the Healthcheck policy capability "); } } } @@ -1079,8 +1077,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements vmIdIpMap.put(instanceId, vmIpsList); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Adding " + vm + " to the load balancer pool"); + if (logger.isDebugEnabled()) { + logger.debug("Adding " + vm + " to the load balancer pool"); } vmsToAdd.add(vm); } @@ -1116,7 +1114,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements applyLoadBalancerConfig(loadBalancerId); success = true; } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); + logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); success = false; } finally { if (!success) { @@ -1131,7 +1129,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements }); if (!vmInstanceIds.isEmpty()) { _lb2VmMapDao.remove(loadBalancer.getId(), vmInstanceIds, null); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while attaching VM: " + vmInstanceIds); + logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while attaching VM: " + vmInstanceIds); } loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); @@ -1150,7 +1148,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Override public boolean assignSSLCertToLoadBalancerRule(Long lbId, String certName, String publicCert, String privateKey) { - s_logger.error("Calling the manager for LB"); + logger.error("Calling the manager for LB"); LoadBalancerVO loadBalancer = _lbDao.findById(lbId); return false; //TODO @@ -1171,7 +1169,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements SslCertVO certVO = _entityMgr.findById(SslCertVO.class, lbCertMap.getCertId()); if (certVO == null) { - s_logger.warn("Cert rule with cert ID " + lbCertMap.getCertId() + " but Cert is not found"); + logger.warn("Cert rule with cert ID " + lbCertMap.getCertId() + " but Cert is not found"); return null; } @@ -1233,9 +1231,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _lbDao.persist(loadBalancer); LoadBalancerCertMapVO certMap = _lbCertMapDao.findByLbRuleId(lbRuleId); _lbCertMapDao.remove(certMap.getId()); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while adding cert"); + logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while adding cert"); } - s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); + logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); } return success; } @@ -1269,7 +1267,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _lbCertMapDao.persist(lbCertMap); if (!applyLoadBalancerConfig(lbRuleId)) { - s_logger.warn("Failed to remove cert from load balancer rule id " + lbRuleId); + logger.warn("Failed to remove cert from load balancer rule id " + lbRuleId); CloudRuntimeException ex = new CloudRuntimeException("Failed to remove certificate load balancer rule id " + lbRuleId); ex.addProxyObject(loadBalancer.getUuid(), "loadBalancerId"); throw ex; @@ -1281,9 +1279,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _lbCertMapDao.persist(lbCertMap); loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("Rolled back certificate removal lb id " + lbRuleId); + logger.debug("Rolled back certificate removal lb id " + lbRuleId); } - s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); + logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); if (!success) { CloudRuntimeException ex = new CloudRuntimeException("Failed to remove certificate from load balancer rule id " + lbRuleId); ex.addProxyObject(loadBalancer.getUuid(), "loadBalancerId"); @@ -1347,7 +1345,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements lbvm.setRevoke(true); _lb2VmMapDao.persist(lbvm); } - s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + instanceId); + logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + instanceId); } else { for (String vmIp: lbVmIps) { @@ -1358,7 +1356,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } map.setRevoke(true); _lb2VmMapDao.persist(map); - s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + + logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + instanceId + ", vmip " + vmIp); } } @@ -1374,7 +1372,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } if (!applyLoadBalancerConfig(loadBalancerId)) { - s_logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for vms " + instanceIds); + logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for vms " + instanceIds); CloudRuntimeException ex = new CloudRuntimeException("Failed to remove specified load balancer rule id for vms " + instanceIds); ex.addProxyObject(loadBalancer.getUuid(), "loadBalancerId"); throw ex; @@ -1390,13 +1388,13 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancerVMMapVO map = _lb2VmMapDao.findByLoadBalancerIdAndVmId(loadBalancerId, instanceId); map.setRevoke(false); _lb2VmMapDao.persist(map); - s_logger.debug("LB Rollback rule id: " + loadBalancerId + ",while removing vmId " + instanceId); + logger.debug("LB Rollback rule id: " + loadBalancerId + ",while removing vmId " + instanceId); }else { for (String vmIp: lbVmIps) { LoadBalancerVMMapVO map = _lb2VmMapDao.findByLoadBalancerIdAndVmIdVmIp (loadBalancerId, instanceId, vmIp); map.setRevoke(true); _lb2VmMapDao.persist(map); - s_logger.debug("LB Rollback rule id: " + loadBalancerId + ",while removing vmId " + + logger.debug("LB Rollback rule id: " + loadBalancerId + ",while removing vmId " + instanceId + ", vmip " + vmIp); } } @@ -1404,9 +1402,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("LB Rollback rule id: " + loadBalancerId + " while removing vm instances"); + logger.debug("LB Rollback rule id: " + loadBalancerId + " while removing vm instances"); } - s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); + logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); } if (!success) { CloudRuntimeException ex = new CloudRuntimeException("Failed to remove specified load balancer rule id for vms " + vmIds); @@ -1438,7 +1436,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements map.setRevoke(true); _lb2VmMapDao.persist(map); - s_logger.debug("Set load balancer rule for revoke: rule id " + map.getLoadBalancerId() + ", vmId " + instanceId); + logger.debug("Set load balancer rule for revoke: rule id " + map.getLoadBalancerId() + ", vmId " + instanceId); } // Reapply all lbs that had the vm assigned @@ -1492,8 +1490,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements boolean generateUsageEvent = false; if (lb.getState() == FirewallRule.State.Staged) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found a rule that is still in stage state so just removing it: " + lb); + if (logger.isDebugEnabled()) { + logger.debug("Found a rule that is still in stage state so just removing it: " + lb); } generateUsageEvent = true; } else if (lb.getState() == FirewallRule.State.Add || lb.getState() == FirewallRule.State.Active) { @@ -1507,7 +1505,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements for (LoadBalancerVMMapVO map : maps) { map.setRevoke(true); _lb2VmMapDao.persist(map); - s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + map.getInstanceId()); + logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + map.getInstanceId()); } } @@ -1539,7 +1537,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (apply) { try { if (!applyLoadBalancerConfig(loadBalancerId)) { - s_logger.warn("Unable to apply the load balancer config"); + logger.warn("Unable to apply the load balancer config"); return false; } } catch (ResourceUnavailableException e) { @@ -1547,14 +1545,14 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (backupMaps != null) { for (LoadBalancerVMMapVO map : backupMaps) { _lb2VmMapDao.persist(map); - s_logger.debug("LB Rollback rule id: " + loadBalancerId + ", vmId " + map.getInstanceId()); + logger.debug("LB Rollback rule id: " + loadBalancerId + ", vmId " + map.getInstanceId()); } } lb.setState(backupState); _lbDao.persist(lb); - s_logger.debug("LB Rollback rule id: " + loadBalancerId + " while deleting LB rule."); + logger.debug("LB Rollback rule id: " + loadBalancerId + " while deleting LB rule."); } else { - s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); + logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); } return false; } @@ -1562,7 +1560,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements FirewallRuleVO relatedRule = _firewallDao.findByRelatedId(lb.getId()); if (relatedRule != null) { - s_logger.warn("Unable to remove firewall rule id=" + lb.getId() + " as it has related firewall rule id=" + relatedRule.getId() + + logger.warn("Unable to remove firewall rule id=" + lb.getId() + " as it has related firewall rule id=" + relatedRule.getId() + "; leaving it in Revoke state"); return false; } else { @@ -1574,7 +1572,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements // Bug CS-15411 opened to document this // _elbMgr.handleDeleteLoadBalancerRule(lb, callerUserId, caller); - s_logger.debug("Load balancer with id " + lb.getId() + " is removed successfully"); + logger.debug("Load balancer with id " + lb.getId() + " is removed successfully"); return true; } @@ -1628,7 +1626,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements // set networkId just for verification purposes _networkModel.checkIpForService(ipVO, Service.Lb, networkId); - s_logger.debug("The ip is not associated with the VPC network id=" + networkId + " so assigning"); + logger.debug("The ip is not associated with the VPC network id=" + networkId + " so assigning"); ipVO = _ipAddrMgr.associateIPToGuestNetwork(ipAddrId, networkId, false); performedIpAssoc = true; } @@ -1643,7 +1641,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements result = createPublicLoadBalancer(xId, name, description, srcPortStart, defPortStart, ipVO.getId(), protocol, algorithm, openFirewall, CallContext.current(), lbProtocol, forDisplay); } catch (Exception ex) { - s_logger.warn("Failed to create load balancer due to ", ex); + logger.warn("Failed to create load balancer due to ", ex); if (ex instanceof NetworkRuleConflictException) { throw (NetworkRuleConflictException)ex; } @@ -1654,7 +1652,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } finally { if (result == null && systemIp != null) { - s_logger.debug("Releasing system IP address " + systemIp + " as corresponding lb rule failed to create"); + logger.debug("Releasing system IP address " + systemIp + " as corresponding lb rule failed to create"); _ipAddrMgr.handleSystemIpRelease(systemIp); } // release ip address if ipassoc was perfored @@ -1765,7 +1763,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (!_firewallDao.setStateToAdd(newRule)) { throw new CloudRuntimeException("Unable to update the state to add for " + newRule); } - s_logger.debug("Load balancer " + newRule.getId() + " for Ip address id=" + sourceIpId + ", public port " + srcPort + ", private port " + destPort + + logger.debug("Load balancer " + newRule.getId() + " for Ip address id=" + sourceIpId + ", public port " + srcPort + ", private port " + destPort + " is added successfully."); CallContext.current().setEventDetails("Load balancer Id: " + newRule.getId()); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_LOAD_BALANCER_CREATE, ipAddr.getAllocatedToAccountId(), ipAddr.getDataCenterId(), newRule.getId(), @@ -1807,8 +1805,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Override public boolean revokeLoadBalancersForNetwork(long networkId, Scheme scheme) throws ResourceUnavailableException { List lbs = _lbDao.listByNetworkIdAndScheme(networkId, scheme); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Revoking " + lbs.size() + " " + scheme + " load balancing rules for network id=" + networkId); + if (logger.isDebugEnabled()) { + logger.debug("Revoking " + lbs.size() + " " + scheme + " load balancing rules for network id=" + networkId); } if (lbs != null) { for (LoadBalancerVO lb : lbs) { // called during restart, not persisting state in db @@ -1816,7 +1814,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } return applyLoadBalancerRules(lbs, false); // called during restart, not persisting state in db } else { - s_logger.info("Network id=" + networkId + " doesn't have load balancer rules, nothing to revoke"); + logger.info("Network id=" + networkId + " doesn't have load balancer rules, nothing to revoke"); return true; } } @@ -1825,10 +1823,10 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements public boolean applyLoadBalancersForNetwork(long networkId, Scheme scheme) throws ResourceUnavailableException { List lbs = _lbDao.listByNetworkIdAndScheme(networkId, scheme); if (lbs != null) { - s_logger.debug("Applying load balancer rules of scheme " + scheme + " in network id=" + networkId); + logger.debug("Applying load balancer rules of scheme " + scheme + " in network id=" + networkId); return applyLoadBalancerRules(lbs, true); } else { - s_logger.info("Network id=" + networkId + " doesn't have load balancer rules of scheme " + scheme + ", nothing to apply"); + logger.info("Network id=" + networkId + " doesn't have load balancer rules of scheme " + scheme + ", nothing to apply"); return true; } } @@ -1878,7 +1876,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } if (!applyLbRules(rules, false)) { - s_logger.debug("LB rules are not completely applied"); + logger.debug("LB rules are not completely applied"); return false; } @@ -1891,11 +1889,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (lb.getState() == FirewallRule.State.Revoke) { removeLBRule(lb); - s_logger.debug("LB " + lb.getId() + " is successfully removed"); + logger.debug("LB " + lb.getId() + " is successfully removed"); checkForReleaseElasticIp = true; } else if (lb.getState() == FirewallRule.State.Add) { lb.setState(FirewallRule.State.Active); - s_logger.debug("LB rule " + lb.getId() + " state is set to Active"); + logger.debug("LB rule " + lb.getId() + " state is set to Active"); _lbDao.persist(lb); } @@ -1906,7 +1904,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements for (LoadBalancerVMMapVO lbVmMap : lbVmMaps) { instanceIds.add(lbVmMap.getInstanceId()); _lb2VmMapDao.remove(lb.getId(), lbVmMap.getInstanceId(), lbVmMap.getInstanceIp(), null); - s_logger.debug("Load balancer rule id " + lb.getId() + " is removed for vm " + + logger.debug("Load balancer rule id " + lb.getId() + " is removed for vm " + lbVmMap.getInstanceId() + " instance ip " + lbVmMap.getInstanceIp()); } @@ -1914,14 +1912,14 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (_lb2VmMapDao.listByLoadBalancerId(lb.getId()).isEmpty()) { lb.setState(FirewallRule.State.Add); _lbDao.persist(lb); - s_logger.debug("LB rule " + lb.getId() + " state is set to Add as there are no more active LB-VM mappings"); + logger.debug("LB rule " + lb.getId() + " state is set to Add as there are no more active LB-VM mappings"); } // remove LB-Stickiness policy mapping that were state to revoke List stickinesspolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId(lb.getId(), true); if (!stickinesspolicies.isEmpty()) { _lb2stickinesspoliciesDao.remove(lb.getId(), true); - s_logger.debug("Load balancer rule id " + lb.getId() + " is removed stickiness policies"); + logger.debug("Load balancer rule id " + lb.getId() + " is removed stickiness policies"); } // remove LB-HealthCheck policy mapping that were state to @@ -1929,13 +1927,13 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements List healthCheckpolicies = _lb2healthcheckDao.listByLoadBalancerId(lb.getId(), true); if (!healthCheckpolicies.isEmpty()) { _lb2healthcheckDao.remove(lb.getId(), true); - s_logger.debug("Load balancer rule id " + lb.getId() + " is removed health check monitors policies"); + logger.debug("Load balancer rule id " + lb.getId() + " is removed health check monitors policies"); } LoadBalancerCertMapVO lbCertMap = _lbCertMapDao.findByLbRuleId(lb.getId()); if (lbCertMap != null && lbCertMap.isRevoke()) { _lbCertMapDao.remove(lbCertMap.getId()); - s_logger.debug("Load balancer rule id " + lb.getId() + " removed certificate mapping"); + logger.debug("Load balancer rule id " + lb.getId() + " removed certificate mapping"); } return checkForReleaseElasticIp; @@ -1949,11 +1947,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements try { success = handleSystemLBIpRelease(lb); } catch (Exception ex) { - s_logger.warn("Failed to release system ip as a part of lb rule " + lb + " deletion due to exception ", ex); + logger.warn("Failed to release system ip as a part of lb rule " + lb + " deletion due to exception ", ex); success = false; } finally { if (!success) { - s_logger.warn("Failed to release system ip as a part of lb rule " + lb + " deletion"); + logger.warn("Failed to release system ip as a part of lb rule " + lb + " deletion"); } } } @@ -1974,12 +1972,12 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements IpAddress ip = _ipAddressDao.findById(lb.getSourceIpAddressId()); boolean success = true; if (ip.getSystem()) { - s_logger.debug("Releasing system ip address " + lb.getSourceIpAddressId() + " as a part of delete lb rule"); + logger.debug("Releasing system ip address " + lb.getSourceIpAddressId() + " as a part of delete lb rule"); if (!_ipAddrMgr.disassociatePublicIpAddress(lb.getSourceIpAddressId(), CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount())) { - s_logger.warn("Unable to release system ip address id=" + lb.getSourceIpAddressId() + " as a part of delete lb rule"); + logger.warn("Unable to release system ip address id=" + lb.getSourceIpAddressId() + " as a part of delete lb rule"); success = false; } else { - s_logger.warn("Successfully released system ip address id=" + lb.getSourceIpAddressId() + " as a part of delete lb rule"); + logger.warn("Successfully released system ip address id=" + lb.getSourceIpAddressId() + " as a part of delete lb rule"); } } return success; @@ -1989,11 +1987,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements public boolean removeAllLoadBalanacersForIp(long ipId, Account caller, long callerUserId) { List rules = _firewallDao.listByIpAndPurposeAndNotRevoked(ipId, Purpose.LoadBalancing); if (rules != null) { - s_logger.debug("Found " + rules.size() + " lb rules to cleanup"); + logger.debug("Found " + rules.size() + " lb rules to cleanup"); for (FirewallRule rule : rules) { boolean result = deleteLoadBalancerRule(rule.getId(), true, caller, callerUserId, false); if (result == false) { - s_logger.warn("Unable to remove load balancer rule " + rule.getId()); + logger.warn("Unable to remove load balancer rule " + rule.getId()); return false; } } @@ -2005,11 +2003,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements public boolean removeAllLoadBalanacersForNetwork(long networkId, Account caller, long callerUserId) { List rules = _firewallDao.listByNetworkAndPurposeAndNotRevoked(networkId, Purpose.LoadBalancing); if (rules != null) { - s_logger.debug("Found " + rules.size() + " lb rules to cleanup"); + logger.debug("Found " + rules.size() + " lb rules to cleanup"); for (FirewallRule rule : rules) { boolean result = deleteLoadBalancerRule(rule.getId(), true, caller, callerUserId, false); if (result == false) { - s_logger.warn("Unable to remove load balancer rule " + rule.getId()); + logger.warn("Unable to remove load balancer rule " + rule.getId()); return false; } } @@ -2135,9 +2133,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _lbDao.update(lb.getId(), lb); _lbDao.persist(lb); - s_logger.debug("LB Rollback rule id: " + lbRuleId + " while updating LB rule."); + logger.debug("LB Rollback rule id: " + lbRuleId + " while updating LB rule."); } - s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); + logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); success = false; } } @@ -2172,7 +2170,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements vmLoadBalancerMappings = _lb2VmMapDao.listByLoadBalancerId(loadBalancerId); if(vmLoadBalancerMappings == null) { String msg = "no VM Loadbalancer Mapping found"; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } Map vmServiceState = new HashMap(vmLoadBalancerMappings.size()); @@ -2414,7 +2412,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements public boolean applyLbRules(List rules, boolean continueOnError) throws ResourceUnavailableException { if (rules == null || rules.size() == 0) { - s_logger.debug("There are no Load Balancing Rules to forward to the network elements"); + logger.debug("There are no Load Balancing Rules to forward to the network elements"); return true; } @@ -2443,7 +2441,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (!continueOnError) { throw e; } - s_logger.warn("Problems with applying load balancing rules but pushing on", e); + logger.warn("Problems with applying load balancing rules but pushing on", e); success = false; } diff --git a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index c32aeba1a35..c7bc61c49c0 100644 --- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -62,7 +62,6 @@ import org.apache.cloudstack.network.topology.NetworkTopology; import org.apache.cloudstack.network.topology.NetworkTopologyContext; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.cloudstack.utils.usage.UsageUtils; -import org.apache.log4j.Logger; import org.cloud.network.router.deployment.RouterDeploymentDefinitionBuilder; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; @@ -252,7 +251,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Local(value = { VirtualNetworkApplianceManager.class, VirtualNetworkApplianceService.class }) public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements VirtualNetworkApplianceManager, VirtualNetworkApplianceService, VirtualMachineGuru, Listener, Configurable, StateListener { - private static final Logger s_logger = Logger.getLogger(VirtualNetworkApplianceManagerImpl.class); @Inject EntityManager _entityMgr; @@ -428,7 +426,7 @@ Configurable, StateListener { _accountMgr.checkAccess(caller, null, true, router); if (router.getServiceOfferingId() == serviceOfferingId) { - s_logger.debug("Router: " + routerId + "already has service offering: " + serviceOfferingId); + logger.debug("Router: " + routerId + "already has service offering: " + serviceOfferingId); return _routerDao.findById(routerId); } @@ -445,7 +443,7 @@ Configurable, StateListener { // Check that the router is stopped if (!router.getState().equals(State.Stopped)) { - s_logger.warn("Unable to upgrade router " + router.toString() + " in state " + router.getState()); + logger.warn("Unable to upgrade router " + router.toString() + " in state " + router.getState()); throw new InvalidParameterValueException("Unable to upgrade router " + router.toString() + " in state " + router.getState() + "; make sure the router is stopped and not in an error state before upgrading."); } @@ -492,7 +490,7 @@ Configurable, StateListener { // Clear stop pending flag after stopped successfully if (router.isStopPending()) { - s_logger.info("Clear the stop pending flag of router " + router.getHostName() + " after stop router successfully"); + logger.info("Clear the stop pending flag of router " + router.getHostName() + " after stop router successfully"); router.setStopPending(false); _routerDao.persist(router); virtualRouter.setStopPending(false); @@ -520,9 +518,9 @@ Configurable, StateListener { userStats.setCurrentBytesSent(0); userStats.setNetBytesSent(userStats.getNetBytesSent() + currentBytesSent); _userStatsDao.update(userStats.getId(), userStats); - s_logger.debug("Successfully updated user statistics as a part of domR " + router + " reboot/stop"); + logger.debug("Successfully updated user statistics as a part of domR " + router + " reboot/stop"); } else { - s_logger.warn("User stats were not created for account " + router.getAccountId() + " and dc " + router.getDataCenterId()); + logger.warn("User stats were not created for account " + router.getAccountId() + " and dc " + router.getDataCenterId()); } } } @@ -545,12 +543,12 @@ Configurable, StateListener { // Can reboot domain router only in Running state if (router == null || router.getState() != State.Running) { - s_logger.warn("Unable to reboot, virtual router is not in the right state " + router.getState()); + logger.warn("Unable to reboot, virtual router is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to reboot domR, it is not in right state " + router.getState(), DataCenter.class, router.getDataCenterId()); } final UserVO user = _userDao.findById(CallContext.current().getCallingUserId()); - s_logger.debug("Stopping and starting router " + router + " as a part of router reboot"); + logger.debug("Stopping and starting router " + router + " as a part of router reboot"); if (stop(router, false, user, caller) != null) { return startRouter(routerId, reprogramNetwork); @@ -625,7 +623,7 @@ Configurable, StateListener { _dnsBasicZoneUpdates = String.valueOf(_configDao.getValue(Config.DnsBasicZoneUpdates.key())); - s_logger.info("Router configurations: " + "ramsize=" + _routerRamSize); + logger.info("Router configurations: " + "ramsize=" + _routerRamSize); _agentMgr.registerForHostEvents(new SshKeysDistriMonitor(_agentMgr, _hostDao, _configDao), true, false, false); @@ -635,7 +633,7 @@ Configurable, StateListener { // this can sometimes happen, if DB is manually or programmatically manipulated if (offerings == null || offerings.size() < 2) { final String msg = "Data integrity problem : System Offering For Software router VM has been removed?"; - s_logger.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } @@ -650,7 +648,7 @@ Configurable, StateListener { _agentMgr.registerForHostEvents(this, true, false, false); - s_logger.info("DomainRouterManager is configured."); + logger.info("DomainRouterManager is configured."); return true; } @@ -660,7 +658,7 @@ Configurable, StateListener { if (_routerStatsInterval > 0) { _executor.scheduleAtFixedRate(new NetworkUsageTask(), _routerStatsInterval, _routerStatsInterval, TimeUnit.SECONDS); } else { - s_logger.debug("router.stats.interval - " + _routerStatsInterval + " so not scheduling the router stats thread"); + logger.debug("router.stats.interval - " + _routerStatsInterval + " so not scheduling the router stats thread"); } //Schedule Network stats update task @@ -697,7 +695,7 @@ Configurable, StateListener { } if (_usageAggregationRange < UsageUtils.USAGE_AGGREGATION_RANGE_MIN) { - s_logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + UsageUtils.USAGE_AGGREGATION_RANGE_MIN); + logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + UsageUtils.USAGE_AGGREGATION_RANGE_MIN); _usageAggregationRange = UsageUtils.USAGE_AGGREGATION_RANGE_MIN; } @@ -705,7 +703,7 @@ Configurable, StateListener { final long initialDelay = aggDate - System.currentTimeMillis(); if( initialDelay < 0){ - s_logger.warn("Initial delay for network usage stats update task is incorrect. Stats update task will run immediately"); + logger.warn("Initial delay for network usage stats update task is incorrect. Stats update task will run immediately"); } _networkStatsUpdateExecutor.scheduleAtFixedRate(new NetworkStatsUpdateTask(), initialDelay, _usageAggregationRange * 60 * 1000, @@ -717,14 +715,14 @@ Configurable, StateListener { _rvrStatusUpdateExecutor.execute(new RvRStatusUpdateTask()); } } else { - s_logger.debug("router.check.interval - " + _routerCheckInterval + " so not scheduling the redundant router checking thread"); + logger.debug("router.check.interval - " + _routerCheckInterval + " so not scheduling the redundant router checking thread"); } final int routerAlertsCheckInterval = RouterAlertsCheckInterval.value(); if (routerAlertsCheckInterval > 0) { _checkExecutor.scheduleAtFixedRate(new CheckRouterAlertsTask(), routerAlertsCheckInterval, routerAlertsCheckInterval, TimeUnit.SECONDS); } else { - s_logger.debug("router.alerts.check.interval - " + routerAlertsCheckInterval + " so not scheduling the router alerts checking thread"); + logger.debug("router.alerts.check.interval - " + routerAlertsCheckInterval + " so not scheduling the router alerts checking thread"); } return true; @@ -747,7 +745,7 @@ Configurable, StateListener { protected void runInContext() { try { final List routers = _routerDao.listByStateAndNetworkType(State.Running, GuestType.Isolated, mgmtSrvrId); - s_logger.debug("Found " + routers.size() + " running routers. "); + logger.debug("Found " + routers.size() + " running routers. "); for (final DomainRouterVO router : routers) { final String privateIP = router.getPrivateIpAddress(); @@ -762,7 +760,7 @@ Configurable, StateListener { //[TODO] Avoiding the NPE now, but I have to find out what is going on with the network. - Wilder Rodrigues if (network == null) { - s_logger.error("Could not find a network with ID => " + routerNic.getNetworkId() + ". It might be a problem!"); + logger.error("Could not find a network with ID => " + routerNic.getNetworkId() + ". It might be a problem!"); continue; } if (forVpc && network.getTrafficType() == TrafficType.Public || !forVpc && network.getTrafficType() == TrafficType.Guest @@ -775,19 +773,19 @@ Configurable, StateListener { try { answer = (NetworkUsageAnswer) _agentMgr.easySend(router.getHostId(), usageCmd); } catch (final Exception e) { - s_logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId(), e); + logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId(), e); continue; } if (answer != null) { if (!answer.getResult()) { - s_logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId() + logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId() + "; details: " + answer.getDetails()); continue; } try { if (answer.getBytesReceived() == 0 && answer.getBytesSent() == 0) { - s_logger.debug("Recieved and Sent bytes are both 0. Not updating user_statistics"); + logger.debug("Recieved and Sent bytes are both 0. Not updating user_statistics"); continue; } final NetworkUsageAnswer answerFinal = answer; @@ -797,21 +795,21 @@ Configurable, StateListener { final UserStatisticsVO stats = _userStatsDao.lock(router.getAccountId(), router.getDataCenterId(), network.getId(), forVpc ? routerNic.getIPv4Address() : null, router.getId(), routerType); if (stats == null) { - s_logger.warn("unable to find stats for account: " + router.getAccountId()); + logger.warn("unable to find stats for account: " + router.getAccountId()); return; } if (previousStats != null && (previousStats.getCurrentBytesReceived() != stats.getCurrentBytesReceived() || previousStats.getCurrentBytesSent() != stats .getCurrentBytesSent())) { - s_logger.debug("Router stats changed from the time NetworkUsageCommand was sent. " + "Ignoring current answer. Router: " + logger.debug("Router stats changed from the time NetworkUsageCommand was sent. " + "Ignoring current answer. Router: " + answerFinal.getRouterName() + " Rcvd: " + answerFinal.getBytesReceived() + "Sent: " + answerFinal.getBytesSent()); return; } if (stats.getCurrentBytesReceived() > answerFinal.getBytesReceived()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Received # of bytes that's less than the last one. " + if (logger.isDebugEnabled()) { + logger.debug("Received # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Router: " + answerFinal.getRouterName() + " Reported: " + answerFinal.getBytesReceived() + " Stored: " + stats.getCurrentBytesReceived()); } @@ -819,8 +817,8 @@ Configurable, StateListener { } stats.setCurrentBytesReceived(answerFinal.getBytesReceived()); if (stats.getCurrentBytesSent() > answerFinal.getBytesSent()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Received # of bytes that's less than the last one. " + if (logger.isDebugEnabled()) { + logger.debug("Received # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Router: " + answerFinal.getRouterName() + " Reported: " + answerFinal.getBytesSent() + " Stored: " + stats.getCurrentBytesSent()); } @@ -837,7 +835,7 @@ Configurable, StateListener { }); } catch (final Exception e) { - s_logger.warn("Unable to update user statistics for account: " + router.getAccountId() + " Rx: " + answer.getBytesReceived() + "; Tx: " + logger.warn("Unable to update user statistics for account: " + router.getAccountId() + " Rx: " + answer.getBytesReceived() + "; Tx: " + answer.getBytesSent()); } } @@ -846,7 +844,7 @@ Configurable, StateListener { } } } catch (final Exception e) { - s_logger.warn("Error while collecting network stats", e); + logger.warn("Error while collecting network stats", e); } } } @@ -865,7 +863,7 @@ Configurable, StateListener { // msHost in UP state with min id should run the job final ManagementServerHostVO msHost = _msHostDao.findOneInUpState(new Filter(ManagementServerHostVO.class, "id", false, 0L, 1L)); if (msHost == null || msHost.getMsid() != mgmtSrvrId) { - s_logger.debug("Skipping aggregate network stats update"); + logger.debug("Skipping aggregate network stats update"); scanLock.unlock(); return; } @@ -886,17 +884,17 @@ Configurable, StateListener { .getCurrentBytesReceived(), stat.getCurrentBytesSent(), stat.getAggBytesReceived(), stat.getAggBytesSent(), updatedTime); _userStatsLogDao.persist(statsLog); } - s_logger.debug("Successfully updated aggregate network stats"); + logger.debug("Successfully updated aggregate network stats"); } }); } catch (final Exception e) { - s_logger.debug("Failed to update aggregate network stats", e); + logger.debug("Failed to update aggregate network stats", e); } finally { scanLock.unlock(); } } } catch (final Exception e) { - s_logger.debug("Exception while trying to acquire network stats lock", e); + logger.debug("Exception while trying to acquire network stats lock", e); } finally { scanLock.releaseRef(); } @@ -944,11 +942,11 @@ Configurable, StateListener { if (origAnswer instanceof CheckS2SVpnConnectionsAnswer) { answer = (CheckS2SVpnConnectionsAnswer) origAnswer; } else { - s_logger.warn("Unable to update router " + router.getHostName() + "'s VPN connection status"); + logger.warn("Unable to update router " + router.getHostName() + "'s VPN connection status"); continue; } if (!answer.getResult()) { - s_logger.warn("Unable to update router " + router.getHostName() + "'s VPN connection status"); + logger.warn("Unable to update router " + router.getHostName() + "'s VPN connection status"); continue; } for (final Site2SiteVpnConnectionVO conn : conns) { @@ -972,7 +970,7 @@ Configurable, StateListener { final String title = "Site-to-site Vpn Connection to " + gw.getName() + " just switch from " + oldState + " to " + conn.getState(); final String context = "Site-to-site Vpn Connection to " + gw.getName() + " on router " + router.getHostName() + "(id: " + router.getId() + ") " + " just switch from " + oldState + " to " + conn.getState(); - s_logger.info(context); + logger.info(context); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), title, context); } } finally { @@ -1010,13 +1008,13 @@ Configurable, StateListener { if (origAnswer instanceof CheckRouterAnswer) { answer = (CheckRouterAnswer) origAnswer; } else { - s_logger.warn("Unable to update router " + router.getHostName() + "'s status"); + logger.warn("Unable to update router " + router.getHostName() + "'s status"); } RedundantState state = RedundantState.UNKNOWN; if (answer != null && answer.getResult()) { state = answer.getState(); } else { - s_logger.info("Agent response doesn't seem to be correct ==> " + answer.getResult()); + logger.info("Agent response doesn't seem to be correct ==> " + answer.getResult()); } router.setRedundantState(state); updated = true; @@ -1030,7 +1028,7 @@ Configurable, StateListener { final String title = "Redundant virtual router " + router.getInstanceName() + " just switch from " + prevState + " to " + currState; final String context = "Redundant virtual router (name: " + router.getHostName() + ", id: " + router.getId() + ") " + " just switch from " + prevState + " to " + currState; - s_logger.info(context); + logger.info(context); if (currState == RedundantState.MASTER) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), title, context); } @@ -1046,18 +1044,18 @@ Configurable, StateListener { final HostVO backupHost = _hostDao.findById(backupRouter.getHostId()); if (masterHost.getState() == Status.Up && backupHost.getState() == Status.Up) { final String title = "Reboot " + backupRouter.getInstanceName() + " to ensure redundant virtual routers work"; - if (s_logger.isDebugEnabled()) { - s_logger.debug(title); + if (logger.isDebugEnabled()) { + logger.debug(title); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, backupRouter.getDataCenterId(), backupRouter.getPodIdToDeployIn(), title, title); try { rebootRouter(backupRouter.getId(), true); } catch (final ConcurrentOperationException e) { - s_logger.warn("Fail to reboot " + backupRouter.getInstanceName(), e); + logger.warn("Fail to reboot " + backupRouter.getInstanceName(), e); } catch (final ResourceUnavailableException e) { - s_logger.warn("Fail to reboot " + backupRouter.getInstanceName(), e); + logger.warn("Fail to reboot " + backupRouter.getInstanceName(), e); } catch (final InsufficientCapacityException e) { - s_logger.warn("Fail to reboot " + backupRouter.getInstanceName(), e); + logger.warn("Fail to reboot " + backupRouter.getInstanceName(), e); } } } @@ -1138,7 +1136,7 @@ Configurable, StateListener { + ", id: " + router.getId() + ") are both in MASTER state! If the problem persist, restart both of routers. "; _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), title, context); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, dupRouter.getDataCenterId(), dupRouter.getPodIdToDeployIn(), title, context); - s_logger.warn(context); + logger.warn(context); } else { networkRouterMaps.put(routerGuestNtwkId, router); } @@ -1190,19 +1188,19 @@ Configurable, StateListener { } // && router.getState() == State.Stopped if (router.getHostId() == null && router.getState() == State.Running) { - s_logger.debug("Skip router pair (" + router0.getInstanceName() + "," + router1.getInstanceName() + ") due to can't find host"); + logger.debug("Skip router pair (" + router0.getInstanceName() + "," + router1.getInstanceName() + ") due to can't find host"); continue; } final HostVO host = _hostDao.findById(router.getHostId()); if (host == null || host.getManagementServerId() == null || host.getManagementServerId() != ManagementServerNode.getManagementServerId()) { - s_logger.debug("Skip router pair (" + router0.getInstanceName() + "," + router1.getInstanceName() + ") due to not belong to this mgmt server"); + logger.debug("Skip router pair (" + router0.getInstanceName() + "," + router1.getInstanceName() + ") due to not belong to this mgmt server"); continue; } updateRoutersRedundantState(routers); checkDuplicateMaster(routers); checkSanity(routers); } catch (final Exception ex) { - s_logger.error("Fail to complete the RvRStatusUpdateTask! ", ex); + logger.error("Fail to complete the RvRStatusUpdateTask! ", ex); } } } @@ -1217,26 +1215,26 @@ Configurable, StateListener { protected void runInContext() { try { final List routers = _routerDao.listIsolatedByHostId(null); - s_logger.debug("Found " + routers.size() + " routers to update status. "); + logger.debug("Found " + routers.size() + " routers to update status. "); updateSite2SiteVpnConnectionState(routers); List networks = _networkDao.listVpcNetworks(); - s_logger.debug("Found " + networks.size() + " VPC networks to update Redundant State. "); + logger.debug("Found " + networks.size() + " VPC networks to update Redundant State. "); pushToUpdateQueue(networks); networks = _networkDao.listRedundantNetworks(); - s_logger.debug("Found " + networks.size() + " networks to update RvR status. "); + logger.debug("Found " + networks.size() + " networks to update RvR status. "); pushToUpdateQueue(networks); } catch (final Exception ex) { - s_logger.error("Fail to complete the CheckRouterTask! ", ex); + logger.error("Fail to complete the CheckRouterTask! ", ex); } } protected void pushToUpdateQueue(final List networks) throws InterruptedException { for (final NetworkVO network : networks) { if (!_vrUpdateQueue.offer(network.getId(), 500, TimeUnit.MILLISECONDS)) { - s_logger.warn("Cannot insert into virtual router update queue! Adjustment of router.check.interval and router.check.poolsize maybe needed."); + logger.warn("Cannot insert into virtual router update queue! Adjustment of router.check.interval and router.check.poolsize maybe needed."); break; } } @@ -1252,7 +1250,7 @@ Configurable, StateListener { try { getRouterAlerts(); } catch (final Exception ex) { - s_logger.error("Fail to complete the CheckRouterAlertsTask! ", ex); + logger.error("Fail to complete the CheckRouterAlertsTask! ", ex); } } } @@ -1261,7 +1259,7 @@ Configurable, StateListener { try { final List routers = _routerDao.listByStateAndManagementServer(State.Running, mgmtSrvrId); - s_logger.debug("Found " + routers.size() + " running routers. "); + logger.debug("Found " + routers.size() + " running routers. "); for (final DomainRouterVO router : routers) { final String serviceMonitoringFlag = SetServiceMonitor.valueIn(router.getDataCenterId()); @@ -1294,17 +1292,17 @@ Configurable, StateListener { GetRouterAlertsAnswer answer = null; if (origAnswer == null) { - s_logger.warn("Unable to get alerts from router " + router.getHostName()); + logger.warn("Unable to get alerts from router " + router.getHostName()); continue; } if (origAnswer instanceof GetRouterAlertsAnswer) { answer = (GetRouterAlertsAnswer) origAnswer; } else { - s_logger.warn("Unable to get alerts from router " + router.getHostName()); + logger.warn("Unable to get alerts from router " + router.getHostName()); continue; } if (!answer.getResult()) { - s_logger.warn("Unable to get alerts from router " + router.getHostName() + " " + answer.getDetails()); + logger.warn("Unable to get alerts from router " + router.getHostName() + " " + answer.getDetails()); continue; } @@ -1316,7 +1314,7 @@ Configurable, StateListener { try { sdfrmt.parse(lastAlertTimeStamp); } catch (final ParseException e) { - s_logger.warn("Invalid last alert timestamp received while collecting alerts from router: " + router.getInstanceName()); + logger.warn("Invalid last alert timestamp received while collecting alerts from router: " + router.getInstanceName()); continue; } for (final String alert : alerts) { @@ -1332,13 +1330,13 @@ Configurable, StateListener { } } } catch (final Exception e) { - s_logger.warn("Error while collecting alerts from router: " + router.getInstanceName(), e); + logger.warn("Error while collecting alerts from router: " + router.getInstanceName(), e); continue; } } } } catch (final Exception e) { - s_logger.warn("Error while collecting alerts from router", e); + logger.warn("Error while collecting alerts from router", e); } } @@ -1406,12 +1404,12 @@ Configurable, StateListener { // DOMR control command is sent over management server in VMware if (dest.getHost().getHypervisorType() == HypervisorType.VMware || dest.getHost().getHypervisorType() == HypervisorType.Hyperv) { - s_logger.info("Check if we need to add management server explicit route to DomR. pod cidr: " + dest.getPod().getCidrAddress() + "/" + logger.info("Check if we need to add management server explicit route to DomR. pod cidr: " + dest.getPod().getCidrAddress() + "/" + dest.getPod().getCidrSize() + ", pod gateway: " + dest.getPod().getGateway() + ", management host: " + ApiServiceConfiguration.ManagementHostIPAdr.value()); - if (s_logger.isInfoEnabled()) { - s_logger.info("Add management server explicit route to DomR."); + if (logger.isInfoEnabled()) { + logger.info("Add management server explicit route to DomR."); } // always add management explicit route, for basic @@ -1514,7 +1512,7 @@ Configurable, StateListener { acntq.and(acntq.entity().getUsername(), SearchCriteria.Op.EQ, "baremetal-system-account"); final UserVO user = acntq.find(); if (user == null) { - s_logger.warn(String + logger.warn(String .format("global setting[baremetal.provision.done.notification] is enabled but user baremetal-system-account is not found. Baremetal provision done notification will not be enabled")); } else { buf.append(String.format(" baremetalnotificationsecuritykey=%s", user.getSecretKey())); @@ -1524,8 +1522,8 @@ Configurable, StateListener { } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Boot Args for " + profile + ": " + buf.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Boot Args for " + profile + ": " + buf.toString()); } return true; @@ -1616,7 +1614,7 @@ Configurable, StateListener { buf.append(" router_password=").append(password); } catch (final NoSuchAlgorithmException e) { - s_logger.error("Failed to pssword! Will use the plan B instead."); + logger.error("Failed to pssword! Will use the plan B instead."); buf.append(" router_password=").append(vpc.getUuid()); } @@ -1671,7 +1669,7 @@ Configurable, StateListener { final NicProfile controlNic = getControlNic(profile); if (controlNic == null) { - s_logger.error("Control network doesn't exist for the router " + router); + logger.error("Control network doesn't exist for the router " + router); return false; } @@ -1732,7 +1730,7 @@ Configurable, StateListener { final NetworkVO network = _networkDao.findById(networkId); - s_logger.debug("Creating monitoring services on " + router + " start..."); + logger.debug("Creating monitoring services on " + router + " start..."); // get the list of sevices for this network to monitor final List services = new ArrayList(); @@ -1822,19 +1820,19 @@ Configurable, StateListener { protected void finalizeUserDataAndDhcpOnStart(final Commands cmds, final DomainRouterVO router, final Provider provider, final Long guestNetworkId) { if (_networkModel.isProviderSupportServiceInNetwork(guestNetworkId, Service.Dhcp, provider)) { // Resend dhcp - s_logger.debug("Reapplying dhcp entries as a part of domR " + router + " start..."); + logger.debug("Reapplying dhcp entries as a part of domR " + router + " start..."); _commandSetupHelper.createDhcpEntryCommandsForVMs(router, cmds, guestNetworkId); } if (_networkModel.isProviderSupportServiceInNetwork(guestNetworkId, Service.UserData, provider)) { // Resend user data - s_logger.debug("Reapplying vm data (userData and metaData) entries as a part of domR " + router + " start..."); + logger.debug("Reapplying vm data (userData and metaData) entries as a part of domR " + router + " start..."); _commandSetupHelper.createVmDataCommandForVMs(router, cmds, guestNetworkId); } } protected void finalizeNetworkRulesForNetwork(final Commands cmds, final DomainRouterVO router, final Provider provider, final Long guestNetworkId) { - s_logger.debug("Resending ipAssoc, port forwarding, load balancing rules as a part of Virtual router start"); + logger.debug("Resending ipAssoc, port forwarding, load balancing rules as a part of Virtual router start"); final ArrayList publicIps = getPublicIpsToApply(router, provider, guestNetworkId); final List firewallRulesEgress = new ArrayList(); @@ -1849,7 +1847,7 @@ Configurable, StateListener { } // Re-apply firewall Egress rules - s_logger.debug("Found " + firewallRulesEgress.size() + " firewall Egress rule(s) to apply as a part of domR " + router + " start."); + logger.debug("Found " + firewallRulesEgress.size() + " firewall Egress rule(s) to apply as a part of domR " + router + " start."); if (!firewallRulesEgress.isEmpty()) { _commandSetupHelper.createFirewallRulesCommands(firewallRulesEgress, router, cmds, guestNetworkId); } @@ -1890,25 +1888,25 @@ Configurable, StateListener { } // Re-apply static nats - s_logger.debug("Found " + staticNats.size() + " static nat(s) to apply as a part of domR " + router + " start."); + logger.debug("Found " + staticNats.size() + " static nat(s) to apply as a part of domR " + router + " start."); if (!staticNats.isEmpty()) { _commandSetupHelper.createApplyStaticNatCommands(staticNats, router, cmds, guestNetworkId); } // Re-apply firewall Ingress rules - s_logger.debug("Found " + firewallRulesIngress.size() + " firewall Ingress rule(s) to apply as a part of domR " + router + " start."); + logger.debug("Found " + firewallRulesIngress.size() + " firewall Ingress rule(s) to apply as a part of domR " + router + " start."); if (!firewallRulesIngress.isEmpty()) { _commandSetupHelper.createFirewallRulesCommands(firewallRulesIngress, router, cmds, guestNetworkId); } // Re-apply port forwarding rules - s_logger.debug("Found " + pfRules.size() + " port forwarding rule(s) to apply as a part of domR " + router + " start."); + logger.debug("Found " + pfRules.size() + " port forwarding rule(s) to apply as a part of domR " + router + " start."); if (!pfRules.isEmpty()) { _commandSetupHelper.createApplyPortForwardingRulesCommands(pfRules, router, cmds, guestNetworkId); } // Re-apply static nat rules - s_logger.debug("Found " + staticNatFirewallRules.size() + " static nat rule(s) to apply as a part of domR " + router + " start."); + logger.debug("Found " + staticNatFirewallRules.size() + " static nat rule(s) to apply as a part of domR " + router + " start."); if (!staticNatFirewallRules.isEmpty()) { final List staticNatRules = new ArrayList(); for (final FirewallRule rule : staticNatFirewallRules) { @@ -1918,7 +1916,7 @@ Configurable, StateListener { } // Re-apply vpn rules - s_logger.debug("Found " + vpns.size() + " vpn(s) to apply as a part of domR " + router + " start."); + logger.debug("Found " + vpns.size() + " vpn(s) to apply as a part of domR " + router + " start."); if (!vpns.isEmpty()) { for (final RemoteAccessVpn vpn : vpns) { _commandSetupHelper.createApplyVpnCommands(true, vpn, router, cmds); @@ -1940,7 +1938,7 @@ Configurable, StateListener { } } - s_logger.debug("Found " + lbRules.size() + " load balancing rule(s) to apply as a part of domR " + router + " start."); + logger.debug("Found " + lbRules.size() + " load balancing rule(s) to apply as a part of domR " + router + " start."); if (!lbRules.isEmpty()) { _commandSetupHelper.createApplyLoadBalancingRulesCommands(lbRules, router, cmds, guestNetworkId); } @@ -1953,11 +1951,11 @@ Configurable, StateListener { final String supportsMultipleSubnets = dhcpCapabilities.get(Network.Capability.DhcpAccrossMultipleSubnets); if (supportsMultipleSubnets != null && Boolean.valueOf(supportsMultipleSubnets)) { final List revokedIpAliasVOs = _nicIpAliasDao.listByNetworkIdAndState(guestNetworkId, NicIpAlias.state.revoked); - s_logger.debug("Found" + revokedIpAliasVOs.size() + "ip Aliases to revoke on the router as a part of dhcp configuration"); + logger.debug("Found" + revokedIpAliasVOs.size() + "ip Aliases to revoke on the router as a part of dhcp configuration"); removeRevokedIpAliasFromDb(revokedIpAliasVOs); final List aliasVOs = _nicIpAliasDao.listByNetworkIdAndState(guestNetworkId, NicIpAlias.state.active); - s_logger.debug("Found" + aliasVOs.size() + "ip Aliases to apply on the router as a part of dhcp configuration"); + logger.debug("Found" + aliasVOs.size() + "ip Aliases to apply on the router as a part of dhcp configuration"); final List activeIpAliasTOs = new ArrayList(); for (final NicIpAliasVO aliasVO : aliasVOs) { activeIpAliasTOs.add(new IpAliasTO(aliasVO.getIp4Address(), aliasVO.getNetmask(), aliasVO.getAliasCount().toString())); @@ -1991,7 +1989,7 @@ Configurable, StateListener { rules.add(rule); } else { - s_logger.debug(" Egress policy for the Network "+ networkId +" is "+defaultEgressPolicy + " So no need"+ + logger.debug(" Egress policy for the Network "+ networkId +" is "+defaultEgressPolicy + " So no need"+ " of default rule is needed. "); } } @@ -2009,7 +2007,7 @@ Configurable, StateListener { final ArrayList publicIps = getPublicIpsToApply(router, provider, guestNetworkId); if (publicIps != null && !publicIps.isEmpty()) { - s_logger.debug("Found " + publicIps.size() + " ip(s) to apply as a part of domR " + router + " start."); + logger.debug("Found " + publicIps.size() + " ip(s) to apply as a part of domR " + router + " start."); // Re-apply public ip addresses - should come before PF/LB/VPN if (_networkModel.isProviderSupportServiceInNetwork(guestNetworkId, Service.Firewall, provider)) { _commandSetupHelper.createAssociateIPCommands(router, publicIps, cmds, 0); @@ -2037,7 +2035,7 @@ Configurable, StateListener { if (skipInStates != null) { for (final IpAddress.State stateToSkip : skipInStates) { if (userIp.getState() == stateToSkip) { - s_logger.debug("Skipping ip address " + userIp + " in state " + userIp.getState()); + logger.debug("Skipping ip address " + userIp + " in state " + userIp.getState()); addIp = false; break; } @@ -2076,8 +2074,8 @@ Configurable, StateListener { final String errorDetails = "Details: " + answer.getDetails() + " " + answer.toString(); // add alerts for the failed commands _alertMgr.sendAlert(AlertService.AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), errorMessage, errorDetails); - s_logger.error(answer.getDetails()); - s_logger.warn(errorMessage); + logger.error(answer.getDetails()); + logger.warn(errorMessage); // Stop the router if any of the commands failed return false; } @@ -2103,7 +2101,7 @@ Configurable, StateListener { try { result = networkTopology.setupDhcpForPvlan(true, router, router.getHostId(), nicProfile); } catch (final ResourceUnavailableException e) { - s_logger.debug("ERROR in finalizeStart: ", e); + logger.debug("ERROR in finalizeStart: ", e); } } } @@ -2136,7 +2134,7 @@ Configurable, StateListener { try { networkTopology.setupDhcpForPvlan(false, domR, domR.getHostId(), nicProfile); } catch (final ResourceUnavailableException e) { - s_logger.debug("ERROR in finalizeStop: ", e); + logger.debug("ERROR in finalizeStop: ", e); } } } @@ -2151,13 +2149,13 @@ Configurable, StateListener { @Override public boolean startRemoteAccessVpn(final Network network, final RemoteAccessVpn vpn, final List routers) throws ResourceUnavailableException { if (routers == null || routers.isEmpty()) { - s_logger.warn("Failed to start remote access VPN: no router found for account and zone"); + logger.warn("Failed to start remote access VPN: no router found for account and zone"); throw new ResourceUnavailableException("Failed to start remote access VPN: no router found for account and zone", DataCenter.class, network.getDataCenterId()); } for (final VirtualRouter router : routers) { if (router.getState() != State.Running) { - s_logger.warn("Failed to start remote access VPN: router not in right state " + router.getState()); + logger.warn("Failed to start remote access VPN: router not in right state " + router.getState()); throw new ResourceUnavailableException("Failed to start remote access VPN: router not in right state " + router.getState(), DataCenter.class, network.getDataCenterId()); } @@ -2171,14 +2169,14 @@ Configurable, StateListener { Answer answer = cmds.getAnswer("users"); if (!answer.getResult()) { - s_logger.error("Unable to start vpn: unable add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + logger.error("Unable to start vpn: unable add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + answer.getDetails()); throw new ResourceUnavailableException("Unable to start vpn: Unable to add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + answer.getDetails(), DataCenter.class, router.getDataCenterId()); } answer = cmds.getAnswer("startVpn"); if (!answer.getResult()) { - s_logger.error("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + logger.error("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + answer.getDetails()); throw new ResourceUnavailableException("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + answer.getDetails(), DataCenter.class, router.getDataCenterId()); @@ -2191,7 +2189,7 @@ Configurable, StateListener { @Override public boolean deleteRemoteAccessVpn(final Network network, final RemoteAccessVpn vpn, final List routers) throws ResourceUnavailableException { if (routers == null || routers.isEmpty()) { - s_logger.warn("Failed to delete remote access VPN: no router found for account and zone"); + logger.warn("Failed to delete remote access VPN: no router found for account and zone"); throw new ResourceUnavailableException("Failed to delete remote access VPN", DataCenter.class, network.getDataCenterId()); } @@ -2202,10 +2200,10 @@ Configurable, StateListener { _commandSetupHelper.createApplyVpnCommands(false, vpn, router, cmds); result = result && _nwHelper.sendCommandsToRouter(router, cmds); } else if (router.getState() == State.Stopped) { - s_logger.debug("Router " + router + " is in Stopped state, not sending deleteRemoteAccessVpn command to it"); + logger.debug("Router " + router + " is in Stopped state, not sending deleteRemoteAccessVpn command to it"); continue; } else { - s_logger.warn("Failed to delete remote access VPN: domR " + router + " is not in right state " + router.getState()); + logger.warn("Failed to delete remote access VPN: domR " + router + " is not in right state " + router.getState()); throw new ResourceUnavailableException("Failed to delete remote access VPN: domR is not in right state " + router.getState(), DataCenter.class, network.getDataCenterId()); } @@ -2217,7 +2215,7 @@ Configurable, StateListener { @Override public DomainRouterVO stop(final VirtualRouter router, final boolean forced, final User user, final Account caller) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("Stopping router " + router); + logger.debug("Stopping router " + router); try { _itMgr.advanceStop(router.getUuid(), forced); return _routerDao.findById(router.getId()); @@ -2229,26 +2227,26 @@ Configurable, StateListener { @Override public boolean removeDhcpSupportForSubnet(final Network network, final List routers) throws ResourceUnavailableException { if (routers == null || routers.isEmpty()) { - s_logger.warn("Failed to add/remove VPN users: no router found for account and zone"); + logger.warn("Failed to add/remove VPN users: no router found for account and zone"); throw new ResourceUnavailableException("Unable to assign ip addresses, domR doesn't exist for network " + network.getId(), DataCenter.class, network.getDataCenterId()); } for (final DomainRouterVO router : routers) { if (router.getState() != State.Running) { - s_logger.warn("Failed to add/remove VPN users: router not in running state"); + logger.warn("Failed to add/remove VPN users: router not in running state"); throw new ResourceUnavailableException("Unable to assign ip addresses, domR is not in right state " + router.getState(), DataCenter.class, network.getDataCenterId()); } final Commands cmds = new Commands(Command.OnError.Continue); final List revokedIpAliasVOs = _nicIpAliasDao.listByNetworkIdAndState(network.getId(), NicIpAlias.state.revoked); - s_logger.debug("Found" + revokedIpAliasVOs.size() + "ip Aliases to revoke on the router as a part of dhcp configuration"); + logger.debug("Found" + revokedIpAliasVOs.size() + "ip Aliases to revoke on the router as a part of dhcp configuration"); final List revokedIpAliasTOs = new ArrayList(); for (final NicIpAliasVO revokedAliasVO : revokedIpAliasVOs) { revokedIpAliasTOs.add(new IpAliasTO(revokedAliasVO.getIp4Address(), revokedAliasVO.getNetmask(), revokedAliasVO.getAliasCount().toString())); } final List aliasVOs = _nicIpAliasDao.listByNetworkIdAndState(network.getId(), NicIpAlias.state.active); - s_logger.debug("Found" + aliasVOs.size() + "ip Aliases to apply on the router as a part of dhcp configuration"); + logger.debug("Found" + aliasVOs.size() + "ip Aliases to apply on the router as a part of dhcp configuration"); final List activeIpAliasTOs = new ArrayList(); for (final NicIpAliasVO aliasVO : aliasVOs) { activeIpAliasTOs.add(new IpAliasTO(aliasVO.getIp4Address(), aliasVO.getNetmask(), aliasVO.getAliasCount().toString())); @@ -2307,7 +2305,7 @@ Configurable, StateListener { for (final NicVO nic : nics) { if (!_networkMgr.startNetwork(nic.getNetworkId(), dest, context)) { - s_logger.warn("Failed to start network id=" + nic.getNetworkId() + " as a part of domR start"); + logger.warn("Failed to start network id=" + nic.getNetworkId() + " as a part of domR start"); throw new CloudRuntimeException("Failed to start network id=" + nic.getNetworkId() + " as a part of domR start"); } } @@ -2372,16 +2370,16 @@ Configurable, StateListener { final List routers = _routerDao.listIsolatedByHostId(host.getId()); for (DomainRouterVO router : routers) { if (router.isStopPending()) { - s_logger.info("Stopping router " + router.getInstanceName() + " due to stop pending flag found!"); + logger.info("Stopping router " + router.getInstanceName() + " due to stop pending flag found!"); final State state = router.getState(); if (state != State.Stopped && state != State.Destroyed) { try { stopRouter(router.getId(), false); } catch (final ResourceUnavailableException e) { - s_logger.warn("Fail to stop router " + router.getInstanceName(), e); + logger.warn("Fail to stop router " + router.getInstanceName(), e); throw new ConnectionException(false, "Fail to stop router " + router.getInstanceName()); } catch (final ConcurrentOperationException e) { - s_logger.warn("Fail to stop router " + router.getInstanceName(), e); + logger.warn("Fail to stop router " + router.getInstanceName(), e); throw new ConnectionException(false, "Fail to stop router " + router.getInstanceName()); } } @@ -2435,19 +2433,19 @@ Configurable, StateListener { try { answer = (NetworkUsageAnswer) _agentMgr.easySend(router.getHostId(), usageCmd); } catch (final Exception e) { - s_logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId(), e); + logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId(), e); continue; } if (answer != null) { if (!answer.getResult()) { - s_logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId() + "; details: " + logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId() + "; details: " + answer.getDetails()); continue; } try { if (answer.getBytesReceived() == 0 && answer.getBytesSent() == 0) { - s_logger.debug("Recieved and Sent bytes are both 0. Not updating user_statistics"); + logger.debug("Recieved and Sent bytes are both 0. Not updating user_statistics"); continue; } @@ -2458,29 +2456,29 @@ Configurable, StateListener { final UserStatisticsVO stats = _userStatsDao.lock(router.getAccountId(), router.getDataCenterId(), network.getId(), forVpc ? routerNic.getIPv4Address() : null, router.getId(), routerType); if (stats == null) { - s_logger.warn("unable to find stats for account: " + router.getAccountId()); + logger.warn("unable to find stats for account: " + router.getAccountId()); return; } if (previousStats != null && (previousStats.getCurrentBytesReceived() != stats.getCurrentBytesReceived() || previousStats.getCurrentBytesSent() != stats .getCurrentBytesSent())) { - s_logger.debug("Router stats changed from the time NetworkUsageCommand was sent. " + "Ignoring current answer. Router: " + logger.debug("Router stats changed from the time NetworkUsageCommand was sent. " + "Ignoring current answer. Router: " + answerFinal.getRouterName() + " Rcvd: " + answerFinal.getBytesReceived() + "Sent: " + answerFinal.getBytesSent()); return; } if (stats.getCurrentBytesReceived() > answerFinal.getBytesReceived()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Received # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Router: " + if (logger.isDebugEnabled()) { + logger.debug("Received # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Router: " + answerFinal.getRouterName() + " Reported: " + answerFinal.getBytesReceived() + " Stored: " + stats.getCurrentBytesReceived()); } stats.setNetBytesReceived(stats.getNetBytesReceived() + stats.getCurrentBytesReceived()); } stats.setCurrentBytesReceived(answerFinal.getBytesReceived()); if (stats.getCurrentBytesSent() > answerFinal.getBytesSent()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Received # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Router: " + if (logger.isDebugEnabled()) { + logger.debug("Received # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Router: " + answerFinal.getRouterName() + " Reported: " + answerFinal.getBytesSent() + " Stored: " + stats.getCurrentBytesSent()); } stats.setNetBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent()); @@ -2495,7 +2493,7 @@ Configurable, StateListener { } }); } catch (final Exception e) { - s_logger.warn("Unable to update user statistics for account: " + router.getAccountId() + " Rx: " + answer.getBytesReceived() + "; Tx: " + logger.warn("Unable to update user statistics for account: " + router.getAccountId() + " Rx: " + answer.getBytesReceived() + "; Tx: " + answer.getBytesSent()); } } @@ -2574,7 +2572,7 @@ Configurable, StateListener { final List jobIds = new ArrayList(); for (final DomainRouterVO router : routers) { if (!_nwHelper.checkRouterVersion(router)) { - s_logger.debug("Upgrading template for router: " + router.getId()); + logger.debug("Upgrading template for router: " + router.getId()); final Map params = new HashMap(); params.put("ctxUserId", "1"); params.put("ctxAccountId", "" + router.getAccountId()); @@ -2589,7 +2587,7 @@ Configurable, StateListener { final long jobId = _asyncMgr.submitAsyncJob(job); jobIds.add(jobId); } else { - s_logger.debug("Router: " + router.getId() + " is already at the latest version. No upgrade required"); + logger.debug("Router: " + router.getId() + " is already at the latest version. No upgrade required"); } } return jobIds; @@ -2619,7 +2617,7 @@ Configurable, StateListener { event == VirtualMachine.Event.FollowAgentPowerOnReport && newState == State.Running && isOutOfBandMigrated(opaque)) { - s_logger.debug("Virtual router " + vo.getInstanceName() + " is powered-on out-of-band"); + logger.debug("Virtual router " + vo.getInstanceName() + " is powered-on out-of-band"); } return true; @@ -2660,10 +2658,10 @@ Configurable, StateListener { @Override protected void runInContext() { try { - s_logger.info("Reboot router " + _routerId + " to refresh network rules"); + logger.info("Reboot router " + _routerId + " to refresh network rules"); rebootRouter(_routerId, true); } catch (final Exception e) { - s_logger.warn("Error while rebooting the router", e); + logger.warn("Error while rebooting the router", e); } } } @@ -2680,7 +2678,7 @@ Configurable, StateListener { if (routerIpInNetwork == null) { // Nic hasn't been created in this router yet. Try to configure the next one. - s_logger.warn("The Network is not configured in the router " + router.getHostName() + " yet. Try the next router!"); + logger.warn("The Network is not configured in the router " + router.getHostName() + " yet. Try the next router!"); errors++; continue; } @@ -2692,7 +2690,7 @@ Configurable, StateListener { } } if (errors == routers.size()) { - s_logger.error("aggregationExecution() on " + getClass().getName() + " failed! Network is not configured in any router."); + logger.error("aggregationExecution() on " + getClass().getName() + " failed! Network is not configured in any router."); return false; } return true; diff --git a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java index a13ff218d46..dfa9b7dc3af 100644 --- a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java @@ -27,7 +27,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.api.Answer; @@ -96,7 +95,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Component @Local(value = { VpcVirtualNetworkApplianceManager.class, VpcVirtualNetworkApplianceService.class }) public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplianceManagerImpl implements VpcVirtualNetworkApplianceManager { - private static final Logger s_logger = Logger.getLogger(VpcVirtualNetworkApplianceManagerImpl.class); @Inject private NetworkACLManager _networkACLMgr; @@ -127,7 +125,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian public boolean addVpcRouterToGuestNetwork(final VirtualRouter router, final Network network, final Map params) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { if (network.getTrafficType() != TrafficType.Guest) { - s_logger.warn("Network " + network + " is not of type " + TrafficType.Guest); + logger.warn("Network " + network + " is not of type " + TrafficType.Guest); return false; } @@ -143,7 +141,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian if (guestNic != null) { result = setupVpcGuestNetwork(network, router, true, guestNic); } else { - s_logger.warn("Failed to add router " + router + " to guest network " + network); + logger.warn("Failed to add router " + router + " to guest network " + network); result = false; } // 3) apply networking rules @@ -151,18 +149,18 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian sendNetworkRulesToRouter(router.getId(), network.getId()); } } catch (final Exception ex) { - s_logger.warn("Failed to add router " + router + " to network " + network + " due to ", ex); + logger.warn("Failed to add router " + router + " to network " + network + " due to ", ex); result = false; } finally { if (!result) { - s_logger.debug("Removing the router " + router + " from network " + network + " as a part of cleanup"); + logger.debug("Removing the router " + router + " from network " + network + " as a part of cleanup"); if (removeVpcRouterFromGuestNetwork(router, network)) { - s_logger.debug("Removed the router " + router + " from network " + network + " as a part of cleanup"); + logger.debug("Removed the router " + router + " from network " + network + " as a part of cleanup"); } else { - s_logger.warn("Failed to remove the router " + router + " from network " + network + " as a part of cleanup"); + logger.warn("Failed to remove the router " + router + " from network " + network + " as a part of cleanup"); } } else { - s_logger.debug("Succesfully added router " + router + " to guest network " + network); + logger.debug("Succesfully added router " + router + " to guest network " + network); } } @@ -173,7 +171,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian public boolean removeVpcRouterFromGuestNetwork(final VirtualRouter router, final Network network) throws ConcurrentOperationException, ResourceUnavailableException { if (network.getTrafficType() != TrafficType.Guest) { - s_logger.warn("Network " + network + " is not of type " + TrafficType.Guest); + logger.warn("Network " + network + " is not of type " + TrafficType.Guest); return false; } @@ -181,13 +179,13 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian try { // Check if router is a part of the Guest network if (!_networkModel.isVmPartOfNetwork(router.getId(), network.getId())) { - s_logger.debug("Router " + router + " is not a part of the Guest network " + network); + logger.debug("Router " + router + " is not a part of the Guest network " + network); return result; } result = setupVpcGuestNetwork(network, router, false, _networkModel.getNicProfile(router, network.getId(), null)); if (!result) { - s_logger.warn("Failed to destroy guest network config " + network + " on router " + router); + logger.warn("Failed to destroy guest network config " + network + " on router " + router); return false; } @@ -215,15 +213,15 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian final Answer setupAnswer = cmds.getAnswer("setupguestnetwork"); final String setup = add ? "set" : "destroy"; if (!(setupAnswer != null && setupAnswer.getResult())) { - s_logger.warn("Unable to " + setup + " guest network on router " + router); + logger.warn("Unable to " + setup + " guest network on router " + router); result = false; } return result; } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - s_logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending setup guest network command to the backend"); + logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending setup guest network command to the backend"); return true; } else { - s_logger.warn("Unable to setup guest network on virtual router " + router + " is not in the right state " + router.getState()); + logger.warn("Unable to setup guest network on virtual router " + router + " is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to setup guest network on the backend," + " virtual router " + router + " is not in the right state", DataCenter.class, router.getDataCenterId()); } @@ -249,7 +247,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian defaultDns1 = nic.getIPv4Dns1(); defaultDns2 = nic.getIPv4Dns2(); } - s_logger.debug("Removing nic " + nic + " of type " + nic.getTrafficType() + " from the nics passed on vm start. " + "The nic will be plugged later"); + logger.debug("Removing nic " + nic + " of type " + nic.getTrafficType() + " from the nics passed on vm start. " + "The nic will be plugged later"); it.remove(); } } @@ -282,7 +280,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian // 1) FORM SSH CHECK COMMAND final NicProfile controlNic = getControlNic(profile); if (controlNic == null) { - s_logger.error("Control network doesn't exist for the router " + domainRouterVO); + logger.error("Control network doesn't exist for the router " + domainRouterVO); return false; } @@ -380,7 +378,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian if (privateGwAclId != null) { // set network acl on private gateway final List networkACLs = _networkACLItemDao.listByACL(privateGwAclId); - s_logger.debug("Found " + networkACLs.size() + " network ACLs to apply as a part of VPC VR " + domainRouterVO + " start for private gateway ip = " + logger.debug("Found " + networkACLs.size() + " network ACLs to apply as a part of VPC VR " + domainRouterVO + " start for private gateway ip = " + ipVO.getIpAddress()); _commandSetupHelper.createNetworkACLsCommands(networkACLs, domainRouterVO, cmds, ipVO.getNetworkId(), true); @@ -388,7 +386,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian } } } catch (final Exception ex) { - s_logger.warn("Failed to add router " + domainRouterVO + " to network due to exception ", ex); + logger.warn("Failed to add router " + domainRouterVO + " to network due to exception ", ex); return false; } @@ -405,7 +403,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian staticRouteProfiles.add(new StaticRouteProfile(route, gateway)); } - s_logger.debug("Found " + staticRouteProfiles.size() + " static routes to apply as a part of vpc route " + domainRouterVO + " start"); + logger.debug("Found " + staticRouteProfiles.size() + " static routes to apply as a part of vpc route " + domainRouterVO + " start"); if (!staticRouteProfiles.isEmpty()) { _commandSetupHelper.createStaticRouteCommands(staticRouteProfiles, domainRouterVO, cmds); } @@ -464,7 +462,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian if (_networkModel.isProviderSupportServiceInNetwork(guestNetworkId, Service.NetworkACL, Provider.VPCVirtualRouter)) { final List networkACLs = _networkACLMgr.listNetworkACLItems(guestNetworkId); if (networkACLs != null && !networkACLs.isEmpty()) { - s_logger.debug("Found " + networkACLs.size() + " network ACLs to apply as a part of VPC VR " + domainRouterVO + " start for guest network id=" + guestNetworkId); + logger.debug("Found " + networkACLs.size() + " network ACLs to apply as a part of VPC VR " + domainRouterVO + " start for guest network id=" + guestNetworkId); _commandSetupHelper.createNetworkACLsCommands(networkACLs, domainRouterVO, cmds, guestNetworkId, false); } } @@ -511,20 +509,20 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian try { if (_nwHelper.sendCommandsToRouter(router, cmds)) { - s_logger.debug("Successfully applied ip association for ip " + ip + " in vpc network " + network); + logger.debug("Successfully applied ip association for ip " + ip + " in vpc network " + network); return true; } else { - s_logger.warn("Failed to associate ip address " + ip + " in vpc network " + network); + logger.warn("Failed to associate ip address " + ip + " in vpc network " + network); return false; } } catch (final Exception ex) { - s_logger.warn("Failed to send " + (add ? "add " : "delete ") + " private network " + network + " commands to rotuer "); + logger.warn("Failed to send " + (add ? "add " : "delete ") + " private network " + network + " commands to rotuer "); return false; } } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - s_logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending setup private network command to the backend"); + logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending setup private network command to the backend"); } else { - s_logger.warn("Unable to setup private gateway, virtual router " + router + " is not in the right state " + router.getState()); + logger.warn("Unable to setup private gateway, virtual router " + router + " is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to setup Private gateway on the backend," + " virtual router " + router + " is not in the right state", DataCenter.class, router.getDataCenterId()); @@ -536,28 +534,28 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian public boolean destroyPrivateGateway(final PrivateGateway gateway, final VirtualRouter router) throws ConcurrentOperationException, ResourceUnavailableException { if (!_networkModel.isVmPartOfNetwork(router.getId(), gateway.getNetworkId())) { - s_logger.debug("Router doesn't have nic for gateway " + gateway + " so no need to removed it"); + logger.debug("Router doesn't have nic for gateway " + gateway + " so no need to removed it"); return true; } final Network privateNetwork = _networkModel.getNetwork(gateway.getNetworkId()); - s_logger.debug("Releasing private ip for gateway " + gateway + " from " + router); + logger.debug("Releasing private ip for gateway " + gateway + " from " + router); boolean result = setupVpcPrivateNetwork(router, false, _networkModel.getNicProfile(router, privateNetwork.getId(), null)); if (!result) { - s_logger.warn("Failed to release private ip for gateway " + gateway + " on router " + router); + logger.warn("Failed to release private ip for gateway " + gateway + " on router " + router); return false; } // revoke network acl on the private gateway. if (!_networkACLMgr.revokeACLItemsForPrivateGw(gateway)) { - s_logger.debug("Failed to delete network acl items on " + gateway + " from router " + router); + logger.debug("Failed to delete network acl items on " + gateway + " from router " + router); return false; } - s_logger.debug("Removing router " + router + " from private network " + privateNetwork + " as a part of delete private gateway"); + logger.debug("Removing router " + router + " from private network " + privateNetwork + " as a part of delete private gateway"); result = result && _itMgr.removeVmFromNetwork(router, privateNetwork, null); - s_logger.debug("Private gateawy " + gateway + " is removed from router " + router); + logger.debug("Private gateawy " + gateway + " is removed from router " + router); return result; } @@ -574,7 +572,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian final ArrayList publicIps = getPublicIpsToApply(domainRouterVO, provider, guestNetworkId, IpAddress.State.Releasing); if (publicIps != null && !publicIps.isEmpty()) { - s_logger.debug("Found " + publicIps.size() + " ip(s) to apply as a part of domR " + domainRouterVO + " start."); + logger.debug("Found " + publicIps.size() + " ip(s) to apply as a part of domR " + domainRouterVO + " start."); // Re-apply public ip addresses - should come before PF/LB/VPN _commandSetupHelper.createVpcAssociatePublicIPCommands(domainRouterVO, publicIps, cmds, vlanMacAddress); } @@ -584,7 +582,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian @Override public boolean startSite2SiteVpn(final Site2SiteVpnConnection conn, final VirtualRouter router) throws ResourceUnavailableException { if (router.getState() != State.Running) { - s_logger.warn("Unable to apply site-to-site VPN configuration, virtual router is not in the right state " + router.getState()); + logger.warn("Unable to apply site-to-site VPN configuration, virtual router is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply site 2 site VPN configuration," + " virtual router is not in the right state", DataCenter.class, router.getDataCenterId()); } @@ -595,7 +593,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian @Override public boolean stopSite2SiteVpn(final Site2SiteVpnConnection conn, final VirtualRouter router) throws ResourceUnavailableException { if (router.getState() != State.Running) { - s_logger.warn("Unable to apply site-to-site VPN configuration, virtual router is not in the right state " + router.getState()); + logger.warn("Unable to apply site-to-site VPN configuration, virtual router is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply site 2 site VPN configuration," + " virtual router is not in the right state", DataCenter.class, router.getDataCenterId()); } @@ -629,7 +627,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian final Nic nic = _nicDao.findByIp4AddressAndNetworkIdAndInstanceId(publicNtwkId, router.getId(), ip.getAddress().addr()); if (nic != null) { nicsToUnplug.put(ip.getVlanTag(), ip); - s_logger.debug("Need to unplug the nic for ip=" + ip + "; vlan=" + ip.getVlanTag() + " in public network id =" + publicNtwkId); + logger.debug("Need to unplug the nic for ip=" + ip + "; vlan=" + ip.getVlanTag() + " in public network id =" + publicNtwkId); } } } @@ -652,14 +650,14 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian if (nic == null && nicsToPlug.get(ip.getVlanTag()) == null) { nicsToPlug.put(ip.getVlanTag(), ip); - s_logger.debug("Need to plug the nic for ip=" + ip + "; vlan=" + ip.getVlanTag() + " in public network id =" + publicNtwkId); + logger.debug("Need to plug the nic for ip=" + ip + "; vlan=" + ip.getVlanTag() + " in public network id =" + publicNtwkId); } else { final PublicIpAddress nicToUnplug = nicsToUnplug.get(ip.getVlanTag()); if (nicToUnplug != null) { final NicVO nicVO = _nicDao.findByIp4AddressAndNetworkIdAndInstanceId(publicNtwkId, router.getId(), nicToUnplug.getAddress().addr()); nicVO.setIPv4Address(ip.getAddress().addr()); _nicDao.update(nicVO.getId(), nicVO); - s_logger.debug("Updated the nic " + nicVO + " with the new ip address " + ip.getAddress().addr()); + logger.debug("Updated the nic " + nicVO + " with the new ip address " + ip.getAddress().addr()); nicsToUnplug.remove(ip.getVlanTag()); } } @@ -690,7 +688,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian @Override public boolean startRemoteAccessVpn(final RemoteAccessVpn vpn, final VirtualRouter router) throws ResourceUnavailableException { if (router.getState() != State.Running) { - s_logger.warn("Unable to apply remote access VPN configuration, virtual router is not in the right state " + router.getState()); + logger.warn("Unable to apply remote access VPN configuration, virtual router is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply remote access VPN configuration," + " virtual router is not in the right state", DataCenter.class, router.getDataCenterId()); } @@ -701,19 +699,19 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian try { _agentMgr.send(router.getHostId(), cmds); } catch (final OperationTimedoutException e) { - s_logger.debug("Failed to start remote access VPN: ", e); + logger.debug("Failed to start remote access VPN: ", e); throw new AgentUnavailableException("Unable to send commands to virtual router ", router.getHostId(), e); } Answer answer = cmds.getAnswer("users"); if (!answer.getResult()) { - s_logger.error("Unable to start vpn: unable add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + logger.error("Unable to start vpn: unable add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + answer.getDetails()); throw new ResourceUnavailableException("Unable to start vpn: Unable to add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + answer.getDetails(), DataCenter.class, router.getDataCenterId()); } answer = cmds.getAnswer("startVpn"); if (!answer.getResult()) { - s_logger.error("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + logger.error("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + answer.getDetails()); throw new ResourceUnavailableException("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + answer.getDetails(), DataCenter.class, router.getDataCenterId()); @@ -731,9 +729,9 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian _commandSetupHelper.createApplyVpnCommands(false, vpn, router, cmds); result = result && _nwHelper.sendCommandsToRouter(router, cmds); } else if (router.getState() == State.Stopped) { - s_logger.debug("Router " + router + " is in Stopped state, not sending deleteRemoteAccessVpn command to it"); + logger.debug("Router " + router + " is in Stopped state, not sending deleteRemoteAccessVpn command to it"); } else { - s_logger.warn("Failed to delete remote access VPN: domR " + router + " is not in right state " + router.getState()); + logger.warn("Failed to delete remote access VPN: domR " + router + " is not in right state " + router.getState()); throw new ResourceUnavailableException("Failed to delete remote access VPN: domR is not in right state " + router.getState(), DataCenter.class, router.getDataCenterId()); } diff --git a/server/src/com/cloud/network/rules/RulesManagerImpl.java b/server/src/com/cloud/network/rules/RulesManagerImpl.java index 3e2b15af22e..38cd27cc994 100644 --- a/server/src/com/cloud/network/rules/RulesManagerImpl.java +++ b/server/src/com/cloud/network/rules/RulesManagerImpl.java @@ -28,7 +28,6 @@ import javax.inject.Inject; import org.apache.cloudstack.api.command.user.firewall.ListPortForwardingRulesCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; import com.cloud.configuration.ConfigurationManager; import com.cloud.domain.dao.DomainDao; @@ -99,7 +98,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Local(value = {RulesManager.class, RulesService.class}) public class RulesManagerImpl extends ManagerBase implements RulesManager, RulesService { - private static final Logger s_logger = Logger.getLogger(RulesManagerImpl.class); @Inject IpAddressManager _ipAddrMgr; @@ -229,7 +227,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules if (assignToVpcNtwk) { _networkModel.checkIpForService(ipAddress, Service.PortForwarding, networkId); - s_logger.debug("The ip is not associated with the VPC network id=" + networkId + ", so assigning"); + logger.debug("The ip is not associated with the VPC network id=" + networkId + ", so assigning"); try { ipAddress = _ipAddrMgr.associateIPToGuestNetwork(ipAddrId, networkId, false); performedIpAssoc = true; @@ -499,15 +497,15 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules if (assignToVpcNtwk) { _networkModel.checkIpForService(ipAddress, Service.StaticNat, networkId); - s_logger.debug("The ip is not associated with the VPC network id=" + networkId + ", so assigning"); + logger.debug("The ip is not associated with the VPC network id=" + networkId + ", so assigning"); try { ipAddress = _ipAddrMgr.associateIPToGuestNetwork(ipId, networkId, false); } catch (Exception ex) { - s_logger.warn("Failed to associate ip id=" + ipId + " to VPC network id=" + networkId + " as " + "a part of enable static nat"); + logger.warn("Failed to associate ip id=" + ipId + " to VPC network id=" + networkId + " as " + "a part of enable static nat"); return false; } } else if (ipAddress.isPortable()) { - s_logger.info("Portable IP " + ipAddress.getUuid() + " is not associated with the network yet " + " so associate IP with the network " + + logger.info("Portable IP " + ipAddress.getUuid() + " is not associated with the network yet " + " so associate IP with the network " + networkId); try { // check if StaticNat service is enabled in the network @@ -521,7 +519,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules // associate portable IP with guest network ipAddress = _ipAddrMgr.associatePortableIPToGuestNetwork(ipId, networkId, false); } catch (Exception e) { - s_logger.warn("Failed to associate portable id=" + ipId + " to network id=" + networkId + " as " + "a part of enable static nat"); + logger.warn("Failed to associate portable id=" + ipId + " to network id=" + networkId + " as " + "a part of enable static nat"); return false; } } @@ -537,7 +535,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules _ipAddrMgr.transferPortableIP(ipId, ipAddress.getAssociatedWithNetworkId(), networkId); ipAddress = _ipAddressDao.findById(ipId); } catch (Exception e) { - s_logger.warn("Failed to associate portable id=" + ipId + " to network id=" + networkId + " as " + "a part of enable static nat"); + logger.warn("Failed to associate portable id=" + ipId + " to network id=" + networkId + " as " + "a part of enable static nat"); return false; } } else { @@ -596,19 +594,19 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules ipAddress.setVmIp(dstIp); if (_ipAddressDao.update(ipAddress.getId(), ipAddress)) { // enable static nat on the backend - s_logger.trace("Enabling static nat for ip address " + ipAddress + " and vm id=" + vmId + " on the backend"); + logger.trace("Enabling static nat for ip address " + ipAddress + " and vm id=" + vmId + " on the backend"); if (applyStaticNatForIp(ipId, false, caller, false)) { performedIpAssoc = false; // ignor unassignIPFromVpcNetwork in finally block return true; } else { - s_logger.warn("Failed to enable static nat rule for ip address " + ipId + " on the backend"); + logger.warn("Failed to enable static nat rule for ip address " + ipId + " on the backend"); ipAddress.setOneToOneNat(isOneToOneNat); ipAddress.setAssociatedWithVmId(associatedWithVmId); ipAddress.setVmIp(null); _ipAddressDao.update(ipAddress.getId(), ipAddress); } } else { - s_logger.warn("Failed to update ip address " + ipAddress + " in the DB as a part of enableStaticNat"); + logger.warn("Failed to update ip address " + ipAddress + " in the DB as a part of enableStaticNat"); } } finally { @@ -668,7 +666,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules oldIP.getUuid()); } // unassign old static nat rule - s_logger.debug("Disassociating static nat for ip " + oldIP); + logger.debug("Disassociating static nat for ip " + oldIP); if (!disableStaticNat(oldIP.getId(), caller, callerUserId, true)) { throw new CloudRuntimeException("Failed to disable old static nat rule for vm "+ vm.getInstanceName() + " with id "+vm.getUuid() +" and public ip " + oldIP); @@ -758,7 +756,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules Set ipsToReprogram = new HashSet(); if (rules == null || rules.isEmpty()) { - s_logger.debug("No port forwarding rules are found for vm id=" + vmId); + logger.debug("No port forwarding rules are found for vm id=" + vmId); return true; } @@ -770,9 +768,9 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules // apply rules for all ip addresses for (Long ipId : ipsToReprogram) { - s_logger.debug("Applying port forwarding rules for ip address id=" + ipId + " as a part of vm expunge"); + logger.debug("Applying port forwarding rules for ip address id=" + ipId + " as a part of vm expunge"); if (!applyPortForwardingRules(ipId, true, _accountMgr.getSystemAccount())) { - s_logger.warn("Failed to apply port forwarding rules for ip id=" + ipId); + logger.warn("Failed to apply port forwarding rules for ip id=" + ipId); success = false; } } @@ -866,7 +864,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules List rules = _portForwardingDao.listForApplication(ipId); if (rules.size() == 0) { - s_logger.debug("There are no port forwarding rules to apply for ip id=" + ipId); + logger.debug("There are no port forwarding rules to apply for ip id=" + ipId); return true; } @@ -879,7 +877,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to apply port forwarding rules for ip due to ", ex); + logger.warn("Failed to apply port forwarding rules for ip due to ", ex); return false; } @@ -891,7 +889,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules List staticNatRules = new ArrayList(); if (rules.size() == 0) { - s_logger.debug("There are no static nat rules to apply for ip id=" + sourceIpId); + logger.debug("There are no static nat rules to apply for ip id=" + sourceIpId); return true; } @@ -908,7 +906,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to apply static nat rules for ip due to ", ex); + logger.warn("Failed to apply static nat rules for ip due to ", ex); return false; } @@ -919,7 +917,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules public boolean applyPortForwardingRulesForNetwork(long networkId, boolean continueOnError, Account caller) { List rules = listByNetworkId(networkId); if (rules.size() == 0) { - s_logger.debug("There are no port forwarding rules to apply for network id=" + networkId); + logger.debug("There are no port forwarding rules to apply for network id=" + networkId); return true; } @@ -932,7 +930,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to apply port forwarding rules for network due to ", ex); + logger.warn("Failed to apply port forwarding rules for network due to ", ex); return false; } @@ -945,7 +943,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules List staticNatRules = new ArrayList(); if (rules.size() == 0) { - s_logger.debug("There are no static nat rules to apply for network id=" + networkId); + logger.debug("There are no static nat rules to apply for network id=" + networkId); return true; } @@ -962,7 +960,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to apply static nat rules for network due to ", ex); + logger.warn("Failed to apply static nat rules for network due to ", ex); return false; } @@ -973,7 +971,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules public boolean applyStaticNatsForNetwork(long networkId, boolean continueOnError, Account caller) { List ips = _ipAddressDao.listStaticNatPublicIps(networkId); if (ips.isEmpty()) { - s_logger.debug("There are no static nat to apply for network id=" + networkId); + logger.debug("There are no static nat to apply for network id=" + networkId); return true; } @@ -994,7 +992,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to create static nat for network due to ", ex); + logger.warn("Failed to create static nat for network due to ", ex); return false; } @@ -1078,8 +1076,8 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules List rules = new ArrayList(); List pfRules = _portForwardingDao.listByIpAndNotRevoked(ipId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + pfRules.size() + " port forwarding rules for ip id=" + ipId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + pfRules.size() + " port forwarding rules for ip id=" + ipId); } for (PortForwardingRuleVO rule : pfRules) { @@ -1088,8 +1086,8 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules } List staticNatRules = _firewallDao.listByIpAndPurposeAndNotRevoked(ipId, Purpose.StaticNat); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + staticNatRules.size() + " static nat rules for ip id=" + ipId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + staticNatRules.size() + " static nat rules for ip id=" + ipId); } for (FirewallRuleVO rule : staticNatRules) { @@ -1112,8 +1110,8 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules rules.addAll(_portForwardingDao.listByIpAndNotRevoked(ipId)); rules.addAll(_firewallDao.listByIpAndPurposeAndNotRevoked(ipId, Purpose.StaticNat)); - if (s_logger.isDebugEnabled() && success) { - s_logger.debug("Successfully released rules for ip id=" + ipId + " and # of rules now = " + rules.size()); + if (logger.isDebugEnabled() && success) { + logger.debug("Successfully released rules for ip id=" + ipId + " and # of rules now = " + rules.size()); } return (rules.size() == 0 && success); @@ -1124,13 +1122,13 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules List rules = new ArrayList(); List pfRules = _portForwardingDao.listByNetwork(networkId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + pfRules.size() + " port forwarding rules for network id=" + networkId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + pfRules.size() + " port forwarding rules for network id=" + networkId); } List staticNatRules = _firewallDao.listByNetworkAndPurpose(networkId, Purpose.StaticNat); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + staticNatRules.size() + " static nat rules for network id=" + networkId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + staticNatRules.size() + " static nat rules for network id=" + networkId); } // Mark all pf rules (Active and non-Active) to be revoked, but don't revoke it yet - pass apply=false @@ -1154,8 +1152,8 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules rules.addAll(_portForwardingDao.listByNetworkAndNotRevoked(networkId)); rules.addAll(_firewallDao.listByNetworkAndPurposeAndNotRevoked(networkId, Purpose.StaticNat)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully released rules for network id=" + networkId + " and # of rules now = " + rules.size()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully released rules for network id=" + networkId + " and # of rules now = " + rules.size()); } return success && rules.size() == 0; @@ -1261,18 +1259,18 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules // Revoke all firewall rules for the ip try { - s_logger.debug("Revoking all " + Purpose.Firewall + "rules as a part of disabling static nat for public IP id=" + ipId); + logger.debug("Revoking all " + Purpose.Firewall + "rules as a part of disabling static nat for public IP id=" + ipId); if (!_firewallMgr.revokeFirewallRulesForIp(ipId, callerUserId, caller)) { - s_logger.warn("Unable to revoke all the firewall rules for ip id=" + ipId + " as a part of disable statis nat"); + logger.warn("Unable to revoke all the firewall rules for ip id=" + ipId + " as a part of disable statis nat"); success = false; } } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to revoke all firewall rules for ip id=" + ipId + " as a part of ip release", e); + logger.warn("Unable to revoke all firewall rules for ip id=" + ipId + " as a part of ip release", e); success = false; } if (!revokeAllPFAndStaticNatRulesForIp(ipId, callerUserId, caller)) { - s_logger.warn("Unable to revoke all static nat rules for ip " + ipAddress); + logger.warn("Unable to revoke all static nat rules for ip " + ipAddress); success = false; } @@ -1288,13 +1286,13 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules _vpcMgr.unassignIPFromVpcNetwork(ipAddress.getId(), networkId); if (isIpSystem && releaseIpIfElastic && !_ipAddrMgr.handleSystemIpRelease(ipAddress)) { - s_logger.warn("Failed to release system ip address " + ipAddress); + logger.warn("Failed to release system ip address " + ipAddress); success = false; } return true; } else { - s_logger.warn("Failed to disable one to one nat for the ip address id" + ipId); + logger.warn("Failed to disable one to one nat for the ip address id" + ipId); return false; } } @@ -1331,7 +1329,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to create static nat rule due to ", ex); + logger.warn("Failed to create static nat rule due to ", ex); return false; } } @@ -1350,18 +1348,18 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules if (staticNats != null && !staticNats.isEmpty()) { if (forRevoke) { - s_logger.debug("Found " + staticNats.size() + " static nats to disable for network id " + networkId); + logger.debug("Found " + staticNats.size() + " static nats to disable for network id " + networkId); } try { if (!_ipAddrMgr.applyStaticNats(staticNats, continueOnError, forRevoke)) { return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to create static nat rule due to ", ex); + logger.warn("Failed to create static nat rule due to ", ex); return false; } } else { - s_logger.debug("Found 0 static nat rules to apply for network id " + networkId); + logger.debug("Found 0 static nat rules to apply for network id " + networkId); } return true; @@ -1370,7 +1368,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules protected List createStaticNatForIp(IpAddress sourceIp, Account caller, boolean forRevoke) { List staticNats = new ArrayList(); if (!sourceIp.isOneToOneNat()) { - s_logger.debug("Source ip id=" + sourceIp + " is not one to one nat"); + logger.debug("Source ip id=" + sourceIp + " is not one to one nat"); return staticNats; } @@ -1434,36 +1432,36 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules } // check if there is already static nat enabled if (_ipAddressDao.findByAssociatedVmId(vm.getId()) != null && !getNewIp) { - s_logger.debug("Vm " + vm + " already has ip associated with it in guest network " + guestNetwork); + logger.debug("Vm " + vm + " already has ip associated with it in guest network " + guestNetwork); continue; } - s_logger.debug("Allocating system ip and enabling static nat for it for the vm " + vm + " in guest network " + guestNetwork); + logger.debug("Allocating system ip and enabling static nat for it for the vm " + vm + " in guest network " + guestNetwork); IpAddress ip = _ipAddrMgr.assignSystemIp(guestNetwork.getId(), _accountMgr.getAccount(vm.getAccountId()), false, true); if (ip == null) { throw new CloudRuntimeException("Failed to allocate system ip for vm " + vm + " in guest network " + guestNetwork); } - s_logger.debug("Allocated system ip " + ip + ", now enabling static nat on it for vm " + vm); + logger.debug("Allocated system ip " + ip + ", now enabling static nat on it for vm " + vm); try { success = enableStaticNat(ip.getId(), vm.getId(), guestNetwork.getId(), isSystemVM, null); } catch (NetworkRuleConflictException ex) { - s_logger.warn("Failed to enable static nat as a part of enabling elasticIp and staticNat for vm " + vm + " in guest network " + guestNetwork + + logger.warn("Failed to enable static nat as a part of enabling elasticIp and staticNat for vm " + vm + " in guest network " + guestNetwork + " due to exception ", ex); success = false; } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to enable static nat as a part of enabling elasticIp and staticNat for vm " + vm + " in guest network " + guestNetwork + + logger.warn("Failed to enable static nat as a part of enabling elasticIp and staticNat for vm " + vm + " in guest network " + guestNetwork + " due to exception ", ex); success = false; } if (!success) { - s_logger.warn("Failed to enable static nat on system ip " + ip + " for the vm " + vm + ", releasing the ip..."); + logger.warn("Failed to enable static nat on system ip " + ip + " for the vm " + vm + ", releasing the ip..."); _ipAddrMgr.handleSystemIpRelease(ip); throw new CloudRuntimeException("Failed to enable static nat on system ip for the vm " + vm); } else { - s_logger.warn("Succesfully enabled static nat on system ip " + ip + " for the vm " + vm); + logger.warn("Succesfully enabled static nat on system ip " + ip + " for the vm " + vm); } } } @@ -1475,19 +1473,19 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules @Override public List listAssociatedRulesForGuestNic(Nic nic) { - s_logger.debug("Checking if PF/StaticNat/LoadBalancer rules are configured for nic " + nic.getId()); + logger.debug("Checking if PF/StaticNat/LoadBalancer rules are configured for nic " + nic.getId()); List result = new ArrayList(); // add PF rules result.addAll(_portForwardingDao.listByNetworkAndDestIpAddr(nic.getIPv4Address(), nic.getNetworkId())); if(result.size() > 0) { - s_logger.debug("Found " + result.size() + " portforwarding rule configured for the nic in the network " + nic.getNetworkId()); + logger.debug("Found " + result.size() + " portforwarding rule configured for the nic in the network " + nic.getNetworkId()); } // add static NAT rules List staticNatRules = _firewallDao.listStaticNatByVmId(nic.getInstanceId()); for (FirewallRuleVO rule : staticNatRules) { if (rule.getNetworkId() == nic.getNetworkId()) { result.add(rule); - s_logger.debug("Found rule " + rule.getId() + " " + rule.getPurpose() + " configured"); + logger.debug("Found rule " + rule.getId() + " " + rule.getPurpose() + " configured"); } } List staticNatIps = _ipAddressDao.listStaticNatPublicIps(nic.getNetworkId()); @@ -1500,7 +1498,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules new FirewallRuleVO(null, ip.getId(), 0, 65535, NetUtils.ALL_PROTO.toString(), nic.getNetworkId(), vm.getAccountId(), vm.getDomainId(), Purpose.StaticNat, null, null, null, null, null); result.add(staticNatRule); - s_logger.debug("Found rule " + staticNatRule.getId() + " " + staticNatRule.getPurpose() + " configured"); + logger.debug("Found rule " + staticNatRule.getId() + " " + staticNatRule.getPurpose() + " configured"); } } // add LB rules @@ -1509,7 +1507,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules FirewallRuleVO lbRule = _firewallDao.findById(lb.getLoadBalancerId()); if (lbRule.getNetworkId() == nic.getNetworkId()) { result.add(lbRule); - s_logger.debug("Found rule " + lbRule.getId() + " " + lbRule.getPurpose() + " configured"); + logger.debug("Found rule " + lbRule.getId() + " " + lbRule.getPurpose() + " configured"); } } return result; diff --git a/server/src/com/cloud/network/security/SecurityGroupManagerImpl2.java b/server/src/com/cloud/network/security/SecurityGroupManagerImpl2.java index 47f6e774bac..434a8e422ad 100644 --- a/server/src/com/cloud/network/security/SecurityGroupManagerImpl2.java +++ b/server/src/com/cloud/network/security/SecurityGroupManagerImpl2.java @@ -77,7 +77,7 @@ public class SecurityGroupManagerImpl2 extends SecurityGroupManagerImpl { } }); } catch (final Throwable th) { - s_logger.error("SG Work: Caught this throwable, ", th); + logger.error("SG Work: Caught this throwable, ", th); } } } @@ -99,15 +99,15 @@ public class SecurityGroupManagerImpl2 extends SecurityGroupManagerImpl { return; } if (_schedulerDisabled) { - s_logger.debug("Security Group Mgr v2: scheduler disabled, doing nothing for " + affectedVms.size() + " vms"); + logger.debug("Security Group Mgr v2: scheduler disabled, doing nothing for " + affectedVms.size() + " vms"); return; } Set workItems = new TreeSet(); workItems.addAll(affectedVms); workItems.removeAll(_disabledVms); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Security Group Mgr v2: scheduling ruleset updates for " + affectedVms.size() + " vms " + " (unique=" + workItems.size() + + if (logger.isDebugEnabled()) { + logger.debug("Security Group Mgr v2: scheduling ruleset updates for " + affectedVms.size() + " vms " + " (unique=" + workItems.size() + "), current queue size=" + _workQueue.size()); } @@ -123,8 +123,8 @@ public class SecurityGroupManagerImpl2 extends SecurityGroupManagerImpl { int newJobs = _workQueue.submitWorkForVms(workItems); _mBean.logScheduledDetails(workItems); p.stop(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Security Group Mgr v2: done scheduling ruleset updates for " + workItems.size() + " vms: num new jobs=" + newJobs + + if (logger.isDebugEnabled()) { + logger.debug("Security Group Mgr v2: done scheduling ruleset updates for " + workItems.size() + " vms: num new jobs=" + newJobs + " num rows insert or updated=" + updated + " time taken=" + p.getDurationInMillis()); } } @@ -139,31 +139,31 @@ public class SecurityGroupManagerImpl2 extends SecurityGroupManagerImpl { @Override public void work() { - s_logger.trace("Checking the work queue"); + logger.trace("Checking the work queue"); List workItems; try { workItems = _workQueue.getWork(1); for (SecurityGroupWork work : workItems) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Processing " + work.getInstanceId()); + if (logger.isTraceEnabled()) { + logger.trace("Processing " + work.getInstanceId()); } try { VmRulesetLogVO rulesetLog = _rulesetLogDao.findByVmId(work.getInstanceId()); if (rulesetLog == null) { - s_logger.warn("Could not find ruleset log for vm " + work.getInstanceId()); + logger.warn("Could not find ruleset log for vm " + work.getInstanceId()); continue; } work.setLogsequenceNumber(rulesetLog.getLogsequence()); sendRulesetUpdates(work); _mBean.logUpdateDetails(work.getInstanceId(), work.getLogsequenceNumber()); } catch (Exception e) { - s_logger.error("Problem during SG work " + work, e); + logger.error("Problem during SG work " + work, e); work.setStep(Step.Error); } } } catch (InterruptedException e1) { - s_logger.warn("SG work: caught InterruptException", e1); + logger.warn("SG work: caught InterruptException", e1); } } @@ -172,8 +172,8 @@ public class SecurityGroupManagerImpl2 extends SecurityGroupManagerImpl { UserVm vm = _userVMDao.findById(userVmId); if (vm != null && vm.getState() == State.Running) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("SecurityGroupManager v2: found vm, " + userVmId + " state=" + vm.getState()); + if (logger.isTraceEnabled()) { + logger.trace("SecurityGroupManager v2: found vm, " + userVmId + " state=" + vm.getState()); } Map> ingressRules = generateRulesForVM(userVmId, SecurityRuleType.IngressRule); Map> egressRules = generateRulesForVM(userVmId, SecurityRuleType.EgressRule); @@ -193,28 +193,28 @@ public class SecurityGroupManagerImpl2 extends SecurityGroupManagerImpl { generateRulesetCmd(vm.getInstanceName(), vm.getPrivateIpAddress(), vm.getPrivateMacAddress(), vm.getId(), null, work.getLogsequenceNumber(), ingressRules, egressRules, nicSecIps); cmd.setMsId(_serverId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("SecurityGroupManager v2: sending ruleset update for vm " + vm.getInstanceName() + ":ingress num rules=" + + if (logger.isDebugEnabled()) { + logger.debug("SecurityGroupManager v2: sending ruleset update for vm " + vm.getInstanceName() + ":ingress num rules=" + cmd.getIngressRuleSet().length + ":egress num rules=" + cmd.getEgressRuleSet().length + " num cidrs=" + cmd.getTotalNumCidrs() + " sig=" + cmd.getSignature()); } Commands cmds = new Commands(cmd); try { _agentMgr.send(agentId, cmds, _answerListener); - if (s_logger.isTraceEnabled()) { - s_logger.trace("SecurityGroupManager v2: sent ruleset updates for " + vm.getInstanceName() + " curr queue size=" + _workQueue.size()); + if (logger.isTraceEnabled()) { + logger.trace("SecurityGroupManager v2: sent ruleset updates for " + vm.getInstanceName() + " curr queue size=" + _workQueue.size()); } } catch (AgentUnavailableException e) { - s_logger.debug("Unable to send updates for vm: " + userVmId + "(agentid=" + agentId + ")"); + logger.debug("Unable to send updates for vm: " + userVmId + "(agentid=" + agentId + ")"); _workTracker.handleException(agentId); } } } else { - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (vm != null) - s_logger.debug("No rules sent to vm " + vm + "state=" + vm.getState()); + logger.debug("No rules sent to vm " + vm + "state=" + vm.getState()); else - s_logger.debug("Could not find vm: No rules sent to vm " + userVmId); + logger.debug("Could not find vm: No rules sent to vm " + userVmId); } } } @@ -279,7 +279,7 @@ public class SecurityGroupManagerImpl2 extends SecurityGroupManagerImpl { try { JmxUtil.registerMBean("SecurityGroupManager", "SecurityGroupManagerImpl2", _mBean); } catch (Exception e) { - s_logger.error("Failed to register MBean", e); + logger.error("Failed to register MBean", e); } boolean result = super.configure(name, params); Map configs = _configDao.getConfiguration("Network", params); @@ -295,7 +295,7 @@ public class SecurityGroupManagerImpl2 extends SecurityGroupManagerImpl { } else { _disabledVms.remove(vmId); } - s_logger.warn("JMX operation: Scheduler state for vm " + vmId + ": new state disabled=" + disable); + logger.warn("JMX operation: Scheduler state for vm " + vmId + ": new state disabled=" + disable); } @@ -305,13 +305,13 @@ public class SecurityGroupManagerImpl2 extends SecurityGroupManagerImpl { } public void enableAllVmsForScheduler() { - s_logger.warn("Cleared list of disabled VMs (JMX operation?)"); + logger.warn("Cleared list of disabled VMs (JMX operation?)"); _disabledVms.clear(); } public void disableScheduler(boolean disable) { _schedulerDisabled = disable; - s_logger.warn("JMX operation: Scheduler state changed: new state disabled=" + disable); + logger.warn("JMX operation: Scheduler state changed: new state disabled=" + disable); } public boolean isSchedulerDisabled() { @@ -320,7 +320,7 @@ public class SecurityGroupManagerImpl2 extends SecurityGroupManagerImpl { public void clearWorkQueue() { _workQueue.clear(); - s_logger.warn("Cleared the work queue (possible JMX operation)"); + logger.warn("Cleared the work queue (possible JMX operation)"); } } diff --git a/server/src/com/cloud/network/vpc/NetworkACLManagerImpl.java b/server/src/com/cloud/network/vpc/NetworkACLManagerImpl.java index de0465b8e76..d3db49ac6ef 100644 --- a/server/src/com/cloud/network/vpc/NetworkACLManagerImpl.java +++ b/server/src/com/cloud/network/vpc/NetworkACLManagerImpl.java @@ -25,7 +25,6 @@ import javax.inject.Inject; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; -import org.apache.log4j.Logger; import com.cloud.configuration.ConfigurationManager; import com.cloud.event.ActionEvent; @@ -55,7 +54,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Local(value = {NetworkACLManager.class}) public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLManager { - private static final Logger s_logger = Logger.getLogger(NetworkACLManagerImpl.class); @Inject AccountManager _accountMgr; @@ -117,7 +115,7 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana if (!applyACLToPrivateGw(privateGateway)) { aclApplyStatus = false; - s_logger.debug("failed to apply network acl item on private gateway " + privateGateway.getId() + "acl id " + aclId); + logger.debug("failed to apply network acl item on private gateway " + privateGateway.getId() + "acl id " + aclId); break; } } @@ -169,7 +167,7 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana if (aclItems == null || aclItems.isEmpty()) { //Revoke ACL Items of the existing ACL if the new network acl is empty //Other wise existing rules will not be removed on the router elelment - s_logger.debug("New network ACL is empty. Revoke existing rules before applying ACL"); + logger.debug("New network ACL is empty. Revoke existing rules before applying ACL"); if (!revokeACLItemsForPrivateGw(gateway)) { throw new CloudRuntimeException("Failed to replace network ACL. Error while removing existing ACL " + "items for privatewa gateway: " + gateway.getId()); } @@ -202,7 +200,7 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana //Existing rules won't be removed otherwise List aclItems = _networkACLItemDao.listByACL(acl.getId()); if (aclItems == null || aclItems.isEmpty()) { - s_logger.debug("New network ACL is empty. Revoke existing rules before applying ACL"); + logger.debug("New network ACL is empty. Revoke existing rules before applying ACL"); if (!revokeACLItemsForNetwork(network.getId())) { throw new CloudRuntimeException("Failed to replace network ACL. Error while removing existing ACL items for network: " + network.getId()); } @@ -212,7 +210,7 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana network.setNetworkACLId(acl.getId()); //Update Network ACL if (_networkDao.update(network.getId(), network)) { - s_logger.debug("Updated network: " + network.getId() + " with Network ACL Id: " + acl.getId() + ", Applying ACL items"); + logger.debug("Updated network: " + network.getId() + " with Network ACL Id: " + acl.getId() + ", Applying ACL items"); //Apply ACL to network Boolean result = applyACLToNetwork(network.getId()); if (result) { @@ -292,8 +290,8 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana @DB private void revokeRule(NetworkACLItemVO rule) { if (rule.getState() == State.Staged) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found a rule that is still in stage state so just removing it: " + rule); + if (logger.isDebugEnabled()) { + logger.debug("Found a rule that is still in stage state so just removing it: " + rule); } _networkACLItemDao.remove(rule.getId()); } else if (rule.getState() == State.Add || rule.getState() == State.Active) { @@ -310,12 +308,12 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana } List aclItems = _networkACLItemDao.listByACL(network.getNetworkACLId()); if (aclItems.isEmpty()) { - s_logger.debug("Found no network ACL Items for network id=" + networkId); + logger.debug("Found no network ACL Items for network id=" + networkId); return true; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + aclItems.size() + " Network ACL Items for network id=" + networkId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + aclItems.size() + " Network ACL Items for network id=" + networkId); } for (NetworkACLItemVO aclItem : aclItems) { @@ -327,8 +325,8 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana boolean success = applyACLItemsToNetwork(network.getId(), aclItems); - if (s_logger.isDebugEnabled() && success) { - s_logger.debug("Successfully released Network ACLs for network id=" + networkId + " and # of rules now = " + aclItems.size()); + if (logger.isDebugEnabled() && success) { + logger.debug("Successfully released Network ACLs for network id=" + networkId + " and # of rules now = " + aclItems.size()); } return success; @@ -339,12 +337,12 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana List aclItems = _networkACLItemDao.listByACL(gateway.getNetworkACLId()); if (aclItems.isEmpty()) { - s_logger.debug("Found no network ACL Items for private gateway id=" + gateway.getId()); + logger.debug("Found no network ACL Items for private gateway id=" + gateway.getId()); return true; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + aclItems.size() + " Network ACL Items for private gateway id=" + gateway.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + aclItems.size() + " Network ACL Items for private gateway id=" + gateway.getId()); } for (NetworkACLItemVO aclItem : aclItems) { @@ -356,8 +354,8 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana boolean success = applyACLToPrivateGw(gateway, aclItems); - if (s_logger.isDebugEnabled() && success) { - s_logger.debug("Successfully released Network ACLs for private gateway id=" + gateway.getId() + " and # of rules now = " + aclItems.size()); + if (logger.isDebugEnabled() && success) { + logger.debug("Successfully released Network ACLs for private gateway id=" + gateway.getId() + " and # of rules now = " + aclItems.size()); } return success; @@ -398,7 +396,7 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana return provider.applyACLItemsToPrivateGw(gateway, rules); } } catch(Exception ex) { - s_logger.debug("Failed to apply acl to private gateway " + gateway); + logger.debug("Failed to apply acl to private gateway " + gateway); } return false; } @@ -488,7 +486,7 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana continue; } foundProvider = true; - s_logger.debug("Applying NetworkACL for network: " + network.getId() + " with Network ACL service provider"); + logger.debug("Applying NetworkACL for network: " + network.getId() + " with Network ACL service provider"); handled = element.applyNetworkACLs(network, rules); if (handled) { // publish message on message bus, so that network elements implementing distributed routing @@ -498,7 +496,7 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana } } if (!foundProvider) { - s_logger.debug("Unable to find NetworkACL service provider for network: " + network.getId()); + logger.debug("Unable to find NetworkACL service provider for network: " + network.getId()); } return handled; } diff --git a/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java b/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java index 9f61f08c4cb..b146595510f 100644 --- a/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java +++ b/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java @@ -24,7 +24,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.ApiErrorCode; @@ -67,7 +66,6 @@ import com.cloud.utils.net.NetUtils; @Component @Local(value = {NetworkACLService.class}) public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLService { - private static final Logger s_logger = Logger.getLogger(NetworkACLServiceImpl.class); @Inject AccountManager _accountMgr; @@ -324,7 +322,7 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ if (aclId == null) { //Network is not associated with any ACL. Create a new ACL and add aclItem in it for backward compatibility - s_logger.debug("Network " + network.getId() + " is not associated with any ACL. Creating an ACL before adding acl item"); + logger.debug("Network " + network.getId() + " is not associated with any ACL. Creating an ACL before adding acl item"); //verify that ACLProvider is supported by network offering if (!_networkModel.areServicesSupportedByNetworkOffering(network.getNetworkOfferingId(), Network.Service.NetworkACL)) { @@ -343,14 +341,14 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ if (acl == null) { throw new CloudRuntimeException("Error while create ACL before adding ACL Item for network " + network.getId()); } - s_logger.debug("Created ACL: " + aclName + " for network " + network.getId()); + logger.debug("Created ACL: " + aclName + " for network " + network.getId()); aclId = acl.getId(); //Apply acl to network try { if (!_networkAclMgr.replaceNetworkACL(acl, (NetworkVO)network)) { throw new CloudRuntimeException("Unable to apply auto created ACL to network " + network.getId()); } - s_logger.debug("Created ACL is applied to network " + network.getId()); + logger.debug("Created ACL is applied to network " + network.getId()); } catch (ResourceUnavailableException e) { throw new CloudRuntimeException("Unable to apply auto created ACL to network " + network.getId(), e); } diff --git a/server/src/com/cloud/network/vpc/VpcManagerImpl.java b/server/src/com/cloud/network/vpc/VpcManagerImpl.java index 2a078954bde..04d0fea7864 100644 --- a/server/src/com/cloud/network/vpc/VpcManagerImpl.java +++ b/server/src/com/cloud/network/vpc/VpcManagerImpl.java @@ -48,7 +48,6 @@ import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationSe import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.managed.context.ManagedContextRunnable; -import org.apache.log4j.Logger; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; @@ -145,7 +144,6 @@ import com.cloud.vm.dao.DomainRouterDao; @Local(value = {VpcManager.class, VpcService.class, VpcProvisioningService.class}) public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvisioningService, VpcService { - private static final Logger s_logger = Logger.getLogger(VpcManagerImpl.class); public static final String SERVICE = "service"; public static final String CAPABILITYTYPE = "capabilitytype"; @@ -252,7 +250,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis public void doInTransactionWithoutResult(final TransactionStatus status) { if (_vpcOffDao.findByUniqueName(VpcOffering.defaultVPCOfferingName) == null) { - s_logger.debug("Creating default VPC offering " + VpcOffering.defaultVPCOfferingName); + logger.debug("Creating default VPC offering " + VpcOffering.defaultVPCOfferingName); final Map> svcProviderMap = new HashMap>(); final Set defaultProviders = new HashSet(); @@ -273,7 +271,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis //configure default vpc offering with Netscaler as LB Provider if (_vpcOffDao.findByUniqueName(VpcOffering.defaultVPCNSOfferingName) == null) { - s_logger.debug("Creating default VPC offering with Netscaler as LB Provider" + VpcOffering.defaultVPCNSOfferingName); + logger.debug("Creating default VPC offering with Netscaler as LB Provider" + VpcOffering.defaultVPCNSOfferingName); final Map> svcProviderMap = new HashMap>(); final Set defaultProviders = new HashSet(); defaultProviders.add(Provider.VPCVirtualRouter); @@ -293,7 +291,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } if (_vpcOffDao.findByUniqueName(VpcOffering.redundantVPCOfferingName) == null) { - s_logger.debug("Creating Redundant VPC offering " + VpcOffering.redundantVPCOfferingName); + logger.debug("Creating Redundant VPC offering " + VpcOffering.redundantVPCOfferingName); final Map> svcProviderMap = new HashMap>(); final Set defaultProviders = new HashSet(); @@ -383,7 +381,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } if (service == Service.Connectivity) { - s_logger.debug("Applying Connectivity workaround, setting provider to NiciraNvp"); + logger.debug("Applying Connectivity workaround, setting provider to NiciraNvp"); svcProviderMap.put(service, sdnProviders); } else { svcProviderMap.put(service, defaultProviders); @@ -398,12 +396,12 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } if (!sourceNatSvc) { - s_logger.debug("Automatically adding source nat service to the list of VPC services"); + logger.debug("Automatically adding source nat service to the list of VPC services"); svcProviderMap.put(Service.SourceNat, defaultProviders); } if (!firewallSvs) { - s_logger.debug("Automatically adding network ACL service to the list of VPC services"); + logger.debug("Automatically adding network ACL service to the list of VPC services"); svcProviderMap.put(Service.NetworkACL, defaultProviders); } @@ -464,7 +462,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis if (state != null) { offering.setState(state); } - s_logger.debug("Adding vpc offering " + offering); + logger.debug("Adding vpc offering " + offering); offering = _vpcOffDao.persist(offering); // populate services and providers if (svcProviderMap != null) { @@ -474,7 +472,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis for (final Network.Provider provider : providers) { final VpcOfferingServiceMapVO offService = new VpcOfferingServiceMapVO(offering.getId(), service, provider); _vpcOffSvcMapDao.persist(offService); - s_logger.trace("Added service for the vpc offering: " + offService + " with provider " + provider.getName()); + logger.trace("Added service for the vpc offering: " + offService + " with provider " + provider.getName()); } } else { throw new InvalidParameterValueException("Provider is missing for the VPC offering service " + service.getName()); @@ -754,7 +752,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } if (_vpcOffDao.update(vpcOffId, offering)) { - s_logger.debug("Updated VPC offeirng id=" + vpcOffId); + logger.debug("Updated VPC offeirng id=" + vpcOffId); return _vpcOffDao.findById(vpcOffId); } else { return null; @@ -850,7 +848,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis final VpcVO persistedVpc = _vpcDao.persist(vpc, finalizeServicesAndProvidersForVpc(vpc.getZoneId(), vpc.getVpcOfferingId())); _resourceLimitMgr.incrementResourceCount(vpc.getAccountId(), ResourceType.vpc); - s_logger.debug("Created VPC " + persistedVpc); + logger.debug("Created VPC " + persistedVpc); return persistedVpc; } @@ -908,7 +906,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @Override @DB public boolean destroyVpc(final Vpc vpc, final Account caller, final Long callerUserId) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("Destroying vpc " + vpc); + logger.debug("Destroying vpc " + vpc); //don't allow to delete vpc if it's in use by existing non system networks (system networks are networks of a private gateway of the VPC, //and they will get removed as a part of VPC cleanup @@ -919,7 +917,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis //mark VPC as inactive if (vpc.getState() != Vpc.State.Inactive) { - s_logger.debug("Updating VPC " + vpc + " with state " + Vpc.State.Inactive + " as a part of vpc delete"); + logger.debug("Updating VPC " + vpc + " with state " + Vpc.State.Inactive + " as a part of vpc delete"); final VpcVO vpcVO = _vpcDao.findById(vpc.getId()); vpcVO.setState(Vpc.State.Inactive); @@ -936,22 +934,22 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis //shutdown VPC if (!shutdownVpc(vpc.getId())) { - s_logger.warn("Failed to shutdown vpc " + vpc + " as a part of vpc destroy process"); + logger.warn("Failed to shutdown vpc " + vpc + " as a part of vpc destroy process"); return false; } //cleanup vpc resources if (!cleanupVpcResources(vpc.getId(), caller, callerUserId)) { - s_logger.warn("Failed to cleanup resources for vpc " + vpc); + logger.warn("Failed to cleanup resources for vpc " + vpc); return false; } //update the instance with removed flag only when the cleanup is executed successfully if (_vpcDao.remove(vpc.getId())) { - s_logger.debug("Vpc " + vpc + " is destroyed succesfully"); + logger.debug("Vpc " + vpc + " is destroyed succesfully"); return true; } else { - s_logger.warn("Vpc " + vpc + " failed to destroy"); + logger.warn("Vpc " + vpc + " failed to destroy"); return false; } } @@ -989,7 +987,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } if (_vpcDao.update(vpcId, vpc)) { - s_logger.debug("Updated VPC id=" + vpcId); + logger.debug("Updated VPC id=" + vpcId); return _vpcDao.findById(vpcId); } else { return null; @@ -1178,20 +1176,20 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis boolean result = true; try { if (!startVpc(vpc, dest, context)) { - s_logger.warn("Failed to start vpc " + vpc); + logger.warn("Failed to start vpc " + vpc); result = false; } } catch (final Exception ex) { - s_logger.warn("Failed to start vpc " + vpc + " due to ", ex); + logger.warn("Failed to start vpc " + vpc + " due to ", ex); result = false; } finally { //do cleanup if (!result && destroyOnFailure) { - s_logger.debug("Destroying vpc " + vpc + " that failed to start"); + logger.debug("Destroying vpc " + vpc + " that failed to start"); if (destroyVpc(vpc, caller, callerUser.getId())) { - s_logger.warn("Successfully destroyed vpc " + vpc + " that failed to start"); + logger.warn("Successfully destroyed vpc " + vpc + " that failed to start"); } else { - s_logger.warn("Failed to destroy vpc " + vpc + " that failed to start"); + logger.warn("Failed to destroy vpc " + vpc + " that failed to start"); } } } @@ -1206,9 +1204,9 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis for (final VpcProvider element : getVpcElements()) { if (providersToImplement.contains(element.getProvider())) { if (element.implementVpc(vpc, dest, context)) { - s_logger.debug("Vpc " + vpc + " has started succesfully"); + logger.debug("Vpc " + vpc + " has started succesfully"); } else { - s_logger.warn("Vpc " + vpc + " failed to start"); + logger.warn("Vpc " + vpc + " failed to start"); success = false; } } @@ -1231,7 +1229,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis _accountMgr.checkAccess(caller, null, false, vpc); //shutdown provider - s_logger.debug("Shutting down vpc " + vpc); + logger.debug("Shutting down vpc " + vpc); //TODO - shutdown all vpc resources here (ACLs, gateways, etc) boolean success = true; @@ -1240,9 +1238,9 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis for (final VpcProvider element : getVpcElements()) { if (providersToImplement.contains(element.getProvider())) { if (element.shutdownVpc(vpc, context)) { - s_logger.debug("Vpc " + vpc + " has been shutdown succesfully"); + logger.debug("Vpc " + vpc + " has been shutdown succesfully"); } else { - s_logger.warn("Vpc " + vpc + " failed to shutdown"); + logger.warn("Vpc " + vpc + " failed to shutdown"); success = false; } } @@ -1396,7 +1394,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis throw new InvalidParameterValueException("Invalid gateway specified. It should never be equal to the cidr subnet value"); } } finally { - s_logger.debug("Releasing lock for " + locked); + logger.debug("Releasing lock for " + locked); _vpcDao.releaseFromLockTable(locked.getId()); } } @@ -1425,18 +1423,18 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } public boolean cleanupVpcResources(final long vpcId, final Account caller, final long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException { - s_logger.debug("Cleaning up resources for vpc id=" + vpcId); + logger.debug("Cleaning up resources for vpc id=" + vpcId); boolean success = true; //1) Remove VPN connections and VPN gateway - s_logger.debug("Cleaning up existed site to site VPN connections"); + logger.debug("Cleaning up existed site to site VPN connections"); _s2sVpnMgr.cleanupVpnConnectionByVpc(vpcId); - s_logger.debug("Cleaning up existed site to site VPN gateways"); + logger.debug("Cleaning up existed site to site VPN gateways"); _s2sVpnMgr.cleanupVpnGatewayByVpc(vpcId); //2) release all ip addresses final List ipsToRelease = _ipAddressDao.listByAssociatedVpc(vpcId, null); - s_logger.debug("Releasing ips for vpc id=" + vpcId + " as a part of vpc cleanup"); + logger.debug("Releasing ips for vpc id=" + vpcId + " as a part of vpc cleanup"); for (final IPAddressVO ipToRelease : ipsToRelease) { if (ipToRelease.isPortable()) { // portable IP address are associated with owner, until explicitly requested to be disassociated. @@ -1444,25 +1442,25 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis ipToRelease.setVpcId(null); ipToRelease.setAssociatedWithNetworkId(null); _ipAddressDao.update(ipToRelease.getId(), ipToRelease); - s_logger.debug("Portable IP address " + ipToRelease + " is no longer associated with any VPC"); + logger.debug("Portable IP address " + ipToRelease + " is no longer associated with any VPC"); } else { success = success && _ipAddrMgr.disassociatePublicIpAddress(ipToRelease.getId(), callerUserId, caller); if (!success) { - s_logger.warn("Failed to cleanup ip " + ipToRelease + " as a part of vpc id=" + vpcId + " cleanup"); + logger.warn("Failed to cleanup ip " + ipToRelease + " as a part of vpc id=" + vpcId + " cleanup"); } } } if (success) { - s_logger.debug("Released ip addresses for vpc id=" + vpcId + " as a part of cleanup vpc process"); + logger.debug("Released ip addresses for vpc id=" + vpcId + " as a part of cleanup vpc process"); } else { - s_logger.warn("Failed to release ip addresses for vpc id=" + vpcId + " as a part of cleanup vpc process"); + logger.warn("Failed to release ip addresses for vpc id=" + vpcId + " as a part of cleanup vpc process"); //although it failed, proceed to the next cleanup step as it doesn't depend on the public ip release } //3) Delete all static route rules if (!revokeStaticRoutesForVpc(vpcId, caller)) { - s_logger.warn("Failed to revoke static routes for vpc " + vpcId + " as a part of cleanup vpc process"); + logger.warn("Failed to revoke static routes for vpc " + vpcId + " as a part of cleanup vpc process"); return false; } @@ -1471,12 +1469,12 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis if (gateways != null) { for (final PrivateGateway gateway : gateways) { if (gateway != null) { - s_logger.debug("Deleting private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); + logger.debug("Deleting private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); if (!deleteVpcPrivateGateway(gateway.getId())) { success = false; - s_logger.debug("Failed to delete private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); + logger.debug("Failed to delete private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); } else { - s_logger.debug("Deleted private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); + logger.debug("Deleted private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); } } } @@ -1502,7 +1500,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis _accountMgr.checkAccess(caller, null, false, vpc); - s_logger.debug("Restarting VPC " + vpc); + logger.debug("Restarting VPC " + vpc); boolean restartRequired = false; try { @@ -1522,26 +1520,26 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } if (forceCleanup) { - s_logger.debug("Shutting down VPC " + vpc + " as a part of VPC restart process"); + logger.debug("Shutting down VPC " + vpc + " as a part of VPC restart process"); if (!shutdownVpc(vpcId)) { - s_logger.warn("Failed to shutdown vpc as a part of VPC " + vpc + " restart process"); + logger.warn("Failed to shutdown vpc as a part of VPC " + vpc + " restart process"); restartRequired = true; return false; } } else { - s_logger.info("Will not shutdown vpc as a part of VPC " + vpc + " restart process."); + logger.info("Will not shutdown vpc as a part of VPC " + vpc + " restart process."); } - s_logger.debug("Starting VPC " + vpc + " as a part of VPC restart process"); + logger.debug("Starting VPC " + vpc + " as a part of VPC restart process"); if (!startVpc(vpcId, false)) { - s_logger.warn("Failed to start vpc as a part of VPC " + vpc + " restart process"); + logger.warn("Failed to start vpc as a part of VPC " + vpc + " restart process"); restartRequired = true; return false; } - s_logger.debug("VPC " + vpc + " was restarted successfully"); + logger.debug("VPC " + vpc + " was restarted successfully"); return true; } finally { - s_logger.debug("Updating VPC " + vpc + " with restartRequired=" + restartRequired); + logger.debug("Updating VPC " + vpc + " with restartRequired=" + restartRequired); final VpcVO vo = _vpcDao.findById(vpcId); vo.setRestartRequired(restartRequired); _vpcDao.update(vpc.getId(), vo); @@ -1617,7 +1615,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @Override public VpcGatewayVO doInTransaction(final TransactionStatus status) throws ResourceAllocationException, ConcurrentOperationException, InsufficientCapacityException { - s_logger.debug("Creating Private gateway for VPC " + vpc); + logger.debug("Creating Private gateway for VPC " + vpc); //1) create private network unless it is existing and lswitch'd Network privateNtwk = null; if (BroadcastDomainType.getSchemeValue(BroadcastDomainType.fromString(broadcastUri)) == BroadcastDomainType.Lswitch) { @@ -1626,13 +1624,13 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis // if the dcid is different we get no network so next we try to create it } if (privateNtwk == null) { - s_logger.info("creating new network for vpc " + vpc + " using broadcast uri: " + broadcastUri); + logger.info("creating new network for vpc " + vpc + " using broadcast uri: " + broadcastUri); final String networkName = "vpc-" + vpc.getName() + "-privateNetwork"; privateNtwk = _ntwkSvc.createPrivateNetwork(networkName, networkName, physicalNetworkIdFinal, broadcastUri, ipAddress, null, gateway, netmask, gatewayOwnerId, vpcId, isSourceNat, networkOfferingId); } else { // create the nic/ip as createPrivateNetwork doesn''t do that work for us now - s_logger.info("found and using existing network for vpc " + vpc + ": " + broadcastUri); + logger.info("found and using existing network for vpc " + vpc + ": " + broadcastUri); final DataCenterVO dc = _dcDao.lockRow(physNetFinal.getDataCenterId(), true); //add entry to private_ip_address table @@ -1646,7 +1644,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis final Long nextMac = mac + 1; dc.setMacAddress(nextMac); - s_logger.info("creating private ip adress for vpc (" + ipAddress + ", " + privateNtwk.getId() + ", " + nextMac + ", " + vpcId + ", " + isSourceNat + ")"); + logger.info("creating private ip adress for vpc (" + ipAddress + ", " + privateNtwk.getId() + ", " + nextMac + ", " + vpcId + ", " + isSourceNat + ")"); privateIp = new PrivateIpVO(ipAddress, privateNtwk.getId(), nextMac, vpcId, isSourceNat); _privateIpDao.persist(privateIp); @@ -1684,7 +1682,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis vpc.getAccountId(), vpc.getDomainId(), isSourceNat, networkAclId); _vpcGatewayDao.persist(gatewayVO); - s_logger.debug("Created vpc gateway entry " + gatewayVO); + logger.debug("Created vpc gateway entry " + gatewayVO); return gatewayVO; } @@ -1718,28 +1716,28 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } } if (success) { - s_logger.debug("Private gateway " + gateway + " was applied succesfully on the backend"); + logger.debug("Private gateway " + gateway + " was applied succesfully on the backend"); if (vo.getState() != VpcGateway.State.Ready) { vo.setState(VpcGateway.State.Ready); _vpcGatewayDao.update(vo.getId(), vo); - s_logger.debug("Marke gateway " + gateway + " with state " + VpcGateway.State.Ready); + logger.debug("Marke gateway " + gateway + " with state " + VpcGateway.State.Ready); } CallContext.current().setEventDetails("Private Gateway Id: " + gatewayId); return getVpcPrivateGateway(gatewayId); } else { - s_logger.warn("Private gateway " + gateway + " failed to apply on the backend"); + logger.warn("Private gateway " + gateway + " failed to apply on the backend"); return null; } } finally { //do cleanup if (!success) { if (destroyOnFailure) { - s_logger.debug("Destroying private gateway " + vo + " that failed to start"); + logger.debug("Destroying private gateway " + vo + " that failed to start"); // calling deleting from db because on createprivategateway fail, destroyPrivateGateway is already called if (deletePrivateGatewayFromTheDB(getVpcPrivateGateway(gatewayId))) { - s_logger.warn("Successfully destroyed vpc " + vo + " that failed to start"); + logger.warn("Successfully destroyed vpc " + vo + " that failed to start"); } else { - s_logger.warn("Failed to destroy vpc " + vo + " that failed to start"); + logger.warn("Failed to destroy vpc " + vo + " that failed to start"); } } } @@ -1769,7 +1767,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis gatewayVO.setState(VpcGateway.State.Deleting); _vpcGatewayDao.update(gatewayVO.getId(), gatewayVO); - s_logger.debug("Marked gateway " + gatewayVO + " with state " + VpcGateway.State.Deleting); + logger.debug("Marked gateway " + gatewayVO + " with state " + VpcGateway.State.Deleting); } }); @@ -1779,12 +1777,12 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis for (final VpcProvider provider : getVpcElements()) { if (providersToImplement.contains(provider.getProvider())) { if (provider.deletePrivateGateway(gateway)) { - s_logger.debug("Private gateway " + gateway + " was applied succesfully on the backend"); + logger.debug("Private gateway " + gateway + " was applied succesfully on the backend"); } else { - s_logger.warn("Private gateway " + gateway + " failed to apply on the backend"); + logger.warn("Private gateway " + gateway + " failed to apply on the backend"); gatewayVO.setState(VpcGateway.State.Ready); _vpcGatewayDao.update(gatewayVO.getId(), gatewayVO); - s_logger.debug("Marked gateway " + gatewayVO + " with state " + VpcGateway.State.Ready); + logger.debug("Marked gateway " + gatewayVO + " with state " + VpcGateway.State.Ready); return false; } @@ -1819,12 +1817,12 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis final Account owner = _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM); final ReservationContext context = new ReservationContextImpl(null, null, callerUser, owner); _ntwkMgr.destroyNetwork(networkId, context, false); - s_logger.debug("Deleted private network id=" + networkId); + logger.debug("Deleted private network id=" + networkId); } } catch (final InterruptedException e) { - s_logger.error("deletePrivateGatewayFromTheDB failed to delete network id " + networkId + "due to => ", e); + logger.error("deletePrivateGatewayFromTheDB failed to delete network id " + networkId + "due to => ", e); } catch (final ExecutionException e) { - s_logger.error("deletePrivateGatewayFromTheDB failed to delete network id " + networkId + "due to => ", e); + logger.error("deletePrivateGatewayFromTheDB failed to delete network id " + networkId + "due to => ", e); } return true; @@ -1918,19 +1916,19 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis staticRouteProfiles.add(new StaticRouteProfile(route, gateway)); } if (!applyStaticRoutes(staticRouteProfiles)) { - s_logger.warn("Routes are not completely applied"); + logger.warn("Routes are not completely applied"); return false; } else { if (updateRoutesInDB) { for (final StaticRoute route : routes) { if (route.getState() == StaticRoute.State.Revoke) { _staticRouteDao.remove(route.getId()); - s_logger.debug("Removed route " + route + " from the DB"); + logger.debug("Removed route " + route + " from the DB"); } else if (route.getState() == StaticRoute.State.Add) { final StaticRouteVO ruleVO = _staticRouteDao.findById(route.getId()); ruleVO.setState(StaticRoute.State.Active); _staticRouteDao.update(ruleVO.getId(), ruleVO); - s_logger.debug("Marked route " + route + " with state " + StaticRoute.State.Active); + logger.debug("Marked route " + route + " with state " + StaticRoute.State.Active); } } } @@ -1941,12 +1939,12 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis protected boolean applyStaticRoutes(final List routes) throws ResourceUnavailableException { if (routes.isEmpty()) { - s_logger.debug("No static routes to apply"); + logger.debug("No static routes to apply"); return true; } final Vpc vpc = _vpcDao.findById(routes.get(0).getVpcId()); - s_logger.debug("Applying static routes for vpc " + vpc); + logger.debug("Applying static routes for vpc " + vpc); final String staticNatProvider = _vpcSrvcDao.getProviderForServiceInVpc(vpc.getId(), Service.StaticNat); for (final VpcProvider provider : getVpcElements()) { @@ -1955,9 +1953,9 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } if (provider.applyStaticRoutes(vpc, routes)) { - s_logger.debug("Applied static routes for vpc " + vpc); + logger.debug("Applied static routes for vpc " + vpc); } else { - s_logger.warn("Failed to apply static routes for vpc " + vpc); + logger.warn("Failed to apply static routes for vpc " + vpc); return false; } } @@ -1986,7 +1984,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis protected boolean revokeStaticRoutesForVpc(final long vpcId, final Account caller) throws ResourceUnavailableException { //get all static routes for the vpc final List routes = _staticRouteDao.listByVpcId(vpcId); - s_logger.debug("Found " + routes.size() + " to revoke for the vpc " + vpcId); + logger.debug("Found " + routes.size() + " to revoke for the vpc " + vpcId); if (!routes.isEmpty()) { //mark all of them as revoke Transaction.execute(new TransactionCallbackNoReturn() { @@ -2049,7 +2047,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @Override public StaticRouteVO doInTransaction(final TransactionStatus status) throws NetworkRuleConflictException { StaticRouteVO newRoute = new StaticRouteVO(gateway.getId(), cidr, vpc.getId(), vpc.getAccountId(), vpc.getDomainId()); - s_logger.debug("Adding static route " + newRoute); + logger.debug("Adding static route " + newRoute); newRoute = _staticRouteDao.persist(newRoute); detectRoutesConflict(newRoute); @@ -2170,20 +2168,20 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } protected void markStaticRouteForRevoke(final StaticRouteVO route, final Account caller) { - s_logger.debug("Revoking static route " + route); + logger.debug("Revoking static route " + route); if (caller != null) { _accountMgr.checkAccess(caller, null, false, route); } if (route.getState() == StaticRoute.State.Staged) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found a static route that is still in stage state so just removing it: " + route); + if (logger.isDebugEnabled()) { + logger.debug("Found a static route that is still in stage state so just removing it: " + route); } _staticRouteDao.remove(route.getId()); } else if (route.getState() == StaticRoute.State.Add || route.getState() == StaticRoute.State.Active) { route.setState(StaticRoute.State.Revoke); _staticRouteDao.update(route.getId(), route); - s_logger.debug("Marked static route " + route + " with state " + StaticRoute.State.Revoke); + logger.debug("Marked static route " + route + " with state " + StaticRoute.State.Revoke); } } @@ -2193,12 +2191,12 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis try { final GlobalLock lock = GlobalLock.getInternLock("VpcCleanup"); if (lock == null) { - s_logger.debug("Couldn't get the global lock"); + logger.debug("Couldn't get the global lock"); return; } if (!lock.lock(30)) { - s_logger.debug("Couldn't lock the db"); + logger.debug("Couldn't lock the db"); return; } @@ -2206,19 +2204,19 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis // Cleanup inactive VPCs final List inactiveVpcs = _vpcDao.listInactiveVpcs(); if (inactiveVpcs != null) { - s_logger.info("Found " + inactiveVpcs.size() + " removed VPCs to cleanup"); + logger.info("Found " + inactiveVpcs.size() + " removed VPCs to cleanup"); for (final VpcVO vpc : inactiveVpcs) { - s_logger.debug("Cleaning up " + vpc); + logger.debug("Cleaning up " + vpc); destroyVpc(vpc, _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM), User.UID_SYSTEM); } } } catch (final Exception e) { - s_logger.error("Exception ", e); + logger.error("Exception ", e); } finally { lock.unlock(); } } catch (final Exception e) { - s_logger.error("Exception ", e); + logger.error("Exception ", e); } } } @@ -2236,7 +2234,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis _accountMgr.checkAccess(caller, null, true, ipToAssoc); owner = _accountMgr.getAccount(ipToAssoc.getAllocatedToAccountId()); } else { - s_logger.debug("Unable to find ip address by id: " + ipId); + logger.debug("Unable to find ip address by id: " + ipId); return null; } @@ -2253,7 +2251,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis isSourceNat = true; } - s_logger.debug("Associating ip " + ipToAssoc + " to vpc " + vpc); + logger.debug("Associating ip " + ipToAssoc + " to vpc " + vpc); final boolean isSourceNatFinal = isSourceNat; Transaction.execute(new TransactionCallbackNoReturn() { @@ -2271,7 +2269,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } }); - s_logger.debug("Successfully assigned ip " + ipToAssoc + " to vpc " + vpc); + logger.debug("Successfully assigned ip " + ipToAssoc + " to vpc " + vpc); return _ipAddressDao.findById(ipId); } @@ -2287,7 +2285,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis return; } - s_logger.debug("Releasing VPC ip address " + ip + " from vpc network id=" + networkId); + logger.debug("Releasing VPC ip address " + ip + " from vpc network id=" + networkId); final long vpcId = ip.getVpcId(); boolean success = false; @@ -2301,11 +2299,11 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis if (success) { ip.setAssociatedWithNetworkId(null); _ipAddressDao.update(ipId, ip); - s_logger.debug("IP address " + ip + " is no longer associated with the network inside vpc id=" + vpcId); + logger.debug("IP address " + ip + " is no longer associated with the network inside vpc id=" + vpcId); } else { throw new CloudRuntimeException("Failed to apply ip associations for network id=" + networkId + " as a part of unassigning ip " + ipId + " from vpc"); } - s_logger.debug("Successfully released VPC ip address " + ip + " back to VPC pool "); + logger.debug("Successfully released VPC ip address " + ip + " back to VPC pool "); } @Override diff --git a/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java b/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java index 3c0188d6486..cdd0986e9bc 100644 --- a/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java +++ b/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java @@ -25,7 +25,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.command.user.vpn.ListRemoteAccessVpnsCmd; @@ -94,7 +93,6 @@ import com.cloud.utils.net.NetUtils; @Local(value = RemoteAccessVpnService.class) public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAccessVpnService, Configurable { - private final static Logger s_logger = Logger.getLogger(RemoteAccessVpnManagerImpl.class); static final ConfigKey RemoteAccessVpnClientIpRange = new ConfigKey("Network", String.class, RemoteAccessVpnClientIpRangeCK, "10.1.2.1-10.1.2.8", "The range of ips to be allocated to remote access vpn clients. The first ip in the range is used by the VPN server", false, ConfigKey.Scope.Account); @@ -260,7 +258,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc private void validateRemoteAccessVpnConfiguration() throws ConfigurationException { String ipRange = RemoteAccessVpnClientIpRange.value(); if (ipRange == null) { - s_logger.warn("Remote Access VPN global configuration missing client ip range -- ignoring"); + logger.warn("Remote Access VPN global configuration missing client ip range -- ignoring"); return; } Integer pskLength = _pskLength; @@ -286,7 +284,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc public boolean destroyRemoteAccessVpnForIp(long ipId, Account caller) throws ResourceUnavailableException { final RemoteAccessVpnVO vpn = _remoteAccessVpnDao.findByPublicIpAddress(ipId); if (vpn == null) { - s_logger.debug("there are no Remote access vpns for public ip address id=" + ipId); + logger.debug("there are no Remote access vpns for public ip address id=" + ipId); return true; } @@ -307,7 +305,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc }catch (ResourceUnavailableException ex) { vpn.setState(prevState); _remoteAccessVpnDao.update(vpn.getId(), vpn); - s_logger.debug("Failed to stop the vpn " + vpn.getId() + " , so reverted state to "+ + logger.debug("Failed to stop the vpn " + vpn.getId() + " , so reverted state to "+ RemoteAccessVpn.State.Running); success = false; } finally { @@ -332,12 +330,12 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc fwRules.add(_rulesDao.findByRelatedId(vpnFwRule.getId())); } - s_logger.debug("Marked " + fwRules.size() + " firewall rules as Revoked as a part of disable remote access vpn"); + logger.debug("Marked " + fwRules.size() + " firewall rules as Revoked as a part of disable remote access vpn"); } }); //now apply vpn rules on the backend - s_logger.debug("Reapplying firewall rules for ip id=" + ipId + " as a part of disable remote access vpn"); + logger.debug("Reapplying firewall rules for ip id=" + ipId + " as a part of disable remote access vpn"); success = _firewallMgr.applyIngressFirewallRules(ipId, caller); } @@ -359,14 +357,14 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc if (vpnFwRules != null) { for (FirewallRule vpnFwRule : vpnFwRules) { _rulesDao.remove(vpnFwRule.getId()); - s_logger.debug("Successfully removed firewall rule with ip id=" + vpnFwRule.getSourceIpAddressId() + " and port " + + logger.debug("Successfully removed firewall rule with ip id=" + vpnFwRule.getSourceIpAddressId() + " and port " + vpnFwRule.getSourcePortStart().intValue() + " as a part of vpn cleanup"); } } } }); } catch (Exception ex) { - s_logger.warn("Unable to release the three vpn ports from the firewall rules", ex); + logger.warn("Unable to release the three vpn ports from the firewall rules", ex); } } } @@ -508,7 +506,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc Account owner = _accountDao.findById(vpnOwnerId); _accountMgr.checkAccess(caller, null, true, owner); - s_logger.debug("Applying vpn users for " + owner); + logger.debug("Applying vpn users for " + owner); List vpns = _remoteAccessVpnDao.findByAccount(vpnOwnerId); List users = _vpnUsersDao.listByAccount(vpnOwnerId); @@ -525,13 +523,13 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc boolean[] finals = new boolean[users.size()]; for (RemoteAccessVPNServiceProvider element : _vpnServiceProviders) { - s_logger.debug("Applying vpn access to " + element.getName()); + logger.debug("Applying vpn access to " + element.getName()); for (RemoteAccessVpnVO vpn : vpns) { try { String[] results = element.applyVpnUsers(vpn, users); if (results != null) { for (int i = 0; i < results.length; i++) { - s_logger.debug("VPN User " + users.get(i) + (results[i] == null ? " is set on " : (" couldn't be set due to " + results[i]) + " on ") + vpn); + logger.debug("VPN User " + users.get(i) + (results[i] == null ? " is set on " : (" couldn't be set due to " + results[i]) + " on ") + vpn); if (results[i] == null) { if (!finals[i]) { finals[i] = true; @@ -543,7 +541,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc } } } catch (Exception e) { - s_logger.warn("Unable to apply vpn users ", e); + logger.warn("Unable to apply vpn users ", e); success = false; for (int i = 0; i < finals.length; i++) { @@ -573,7 +571,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc } }); } - s_logger.warn("Failed to apply vpn for user " + user.getUsername() + ", accountId=" + user.getAccountId()); + logger.warn("Failed to apply vpn for user " + user.getUsername() + ", accountId=" + user.getAccountId()); } } diff --git a/server/src/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java b/server/src/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java index bf4b8a36acf..f8d4fe734af 100644 --- a/server/src/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java +++ b/server/src/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java @@ -24,7 +24,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.user.vpn.CreateVpnConnectionCmd; @@ -83,7 +82,6 @@ import com.cloud.vm.DomainRouterVO; @Component @Local(value = {Site2SiteVpnManager.class, Site2SiteVpnService.class}) public class Site2SiteVpnManagerImpl extends ManagerBase implements Site2SiteVpnManager { - private static final Logger s_logger = Logger.getLogger(Site2SiteVpnManagerImpl.class); List _s2sProviders; @Inject @@ -786,7 +784,7 @@ public class Site2SiteVpnManagerImpl extends ManagerBase implements Site2SiteVpn startVpnConnection(conn.getId()); } catch (ResourceUnavailableException e) { Site2SiteCustomerGatewayVO gw = _customerGatewayDao.findById(conn.getCustomerGatewayId()); - s_logger.warn("Site2SiteVpnManager: Fail to re-initiate VPN connection " + conn.getId() + " which connect to " + gw.getName()); + logger.warn("Site2SiteVpnManager: Fail to re-initiate VPN connection " + conn.getId() + " which connect to " + gw.getName()); } } } diff --git a/server/src/com/cloud/projects/ProjectManagerImpl.java b/server/src/com/cloud/projects/ProjectManagerImpl.java index 510b55ca1a2..ff80fd381cf 100644 --- a/server/src/com/cloud/projects/ProjectManagerImpl.java +++ b/server/src/com/cloud/projects/ProjectManagerImpl.java @@ -39,7 +39,6 @@ import javax.mail.URLName; import javax.mail.internet.InternetAddress; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.sun.mail.smtp.SMTPMessage; @@ -95,7 +94,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value = {ProjectService.class, ProjectManager.class}) public class ProjectManagerImpl extends ManagerBase implements ProjectManager { - public static final Logger s_logger = Logger.getLogger(ProjectManagerImpl.class); private EmailInvite _emailInvite; @Inject @@ -276,7 +274,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { boolean updateResult = Transaction.execute(new TransactionCallback() { @Override public Boolean doInTransaction(TransactionStatus status) { - s_logger.debug("Marking project id=" + project.getId() + " with state " + State.Disabled + " as a part of project delete..."); + logger.debug("Marking project id=" + project.getId() + " with state " + State.Disabled + " as a part of project delete..."); project.setState(State.Disabled); boolean updateResult = _projectDao.update(project.getId(), project); //owner can be already removed at this point, so adding the conditional check @@ -292,13 +290,13 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { if (updateResult) { //pass system caller when clenaup projects account if (!cleanupProject(project, _accountDao.findById(Account.ACCOUNT_ID_SYSTEM), User.UID_SYSTEM)) { - s_logger.warn("Failed to cleanup project's id=" + project.getId() + " resources, not removing the project yet"); + logger.warn("Failed to cleanup project's id=" + project.getId() + " resources, not removing the project yet"); return false; } else { return _projectDao.remove(project.getId()); } } else { - s_logger.warn("Failed to mark the project id=" + project.getId() + " with state " + State.Disabled); + logger.warn("Failed to mark the project id=" + project.getId() + " with state " + State.Disabled); return false; } } @@ -308,7 +306,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { boolean result = true; //Delete project's account AccountVO account = _accountDao.findById(project.getProjectAccountId()); - s_logger.debug("Deleting projects " + project + " internal account id=" + account.getId() + " as a part of project cleanup..."); + logger.debug("Deleting projects " + project + " internal account id=" + account.getId() + " as a part of project cleanup..."); result = result && _accountMgr.deleteAccount(account, callerUserId, caller); @@ -318,23 +316,23 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { @Override public Boolean doInTransaction(TransactionStatus status) { boolean result = true; - s_logger.debug("Unassigning all accounts from project " + project + " as a part of project cleanup..."); + logger.debug("Unassigning all accounts from project " + project + " as a part of project cleanup..."); List projectAccounts = _projectAccountDao.listByProjectId(project.getId()); for (ProjectAccount projectAccount : projectAccounts) { result = result && unassignAccountFromProject(projectAccount.getProjectId(), projectAccount.getAccountId()); } - s_logger.debug("Removing all invitations for the project " + project + " as a part of project cleanup..."); + logger.debug("Removing all invitations for the project " + project + " as a part of project cleanup..."); _projectInvitationDao.cleanupInvitations(project.getId()); return result; } }); if (result) { - s_logger.debug("Accounts are unassign successfully from project " + project + " as a part of project cleanup..."); + logger.debug("Accounts are unassign successfully from project " + project + " as a part of project cleanup..."); } } else { - s_logger.warn("Failed to cleanup project's internal account"); + logger.warn("Failed to cleanup project's internal account"); } return result; @@ -344,14 +342,14 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { public boolean unassignAccountFromProject(long projectId, long accountId) { ProjectAccountVO projectAccount = _projectAccountDao.findByProjectIdAccountId(projectId, accountId); if (projectAccount == null) { - s_logger.debug("Account id=" + accountId + " is not assigned to project id=" + projectId + " so no need to unassign"); + logger.debug("Account id=" + accountId + " is not assigned to project id=" + projectId + " so no need to unassign"); return true; } if (_projectAccountDao.remove(projectAccount.getId())) { return true; } else { - s_logger.warn("Failed to unassign account id=" + accountId + " from the project id=" + projectId); + logger.warn("Failed to unassign account id=" + accountId + " from the project id=" + projectId); return false; } } @@ -385,7 +383,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { //remove all invitations for account if (success) { - s_logger.debug("Removed account " + accountId + " from project " + projectId + " , cleaning up old invitations for account/project..."); + logger.debug("Removed account " + accountId + " from project " + projectId + " , cleaning up old invitations for account/project..."); ProjectInvitation invite = _projectInvitationDao.findByAccountIdProjectId(accountId, projectId); if (invite != null) { success = success && _projectInvitationDao.remove(invite.getId()); @@ -502,7 +500,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { _resourceLimitMgr.incrementResourceCount(futureOwnerAccount.getId(), ResourceType.project); } else { - s_logger.trace("Future owner " + newOwnerName + "is already the owner of the project id=" + projectId); + logger.trace("Future owner " + newOwnerName + "is already the owner of the project id=" + projectId); } } } @@ -555,7 +553,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { //Check if the account already added to the project ProjectAccount projectAccount = _projectAccountDao.findByProjectIdAccountId(projectId, account.getId()); if (projectAccount != null) { - s_logger.debug("Account " + accountName + " already added to the project id=" + projectId); + logger.debug("Account " + accountName + " already added to the project id=" + projectId); return true; } } @@ -569,7 +567,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { if (assignAccountToProject(project, account.getId(), ProjectAccount.Role.Regular) != null) { return true; } else { - s_logger.warn("Failed to add account " + accountName + " to project id=" + projectId); + logger.warn("Failed to add account " + accountName + " to project id=" + projectId); return false; } } @@ -580,7 +578,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { if (createAccountInvitation(project, account.getId()) != null) { return true; } else { - s_logger.warn("Failed to generate invitation for account " + account.getAccountName() + " to project id=" + project); + logger.warn("Failed to generate invitation for account " + account.getAccountName() + " to project id=" + project); return false; } } @@ -591,7 +589,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { if (generateTokenBasedInvitation(project, email, token) != null) { return true; } else { - s_logger.warn("Failed to generate invitation for email " + email + " to project id=" + project); + logger.warn("Failed to generate invitation for email " + email + " to project id=" + project); return false; } } @@ -684,9 +682,9 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { } //remove the expired/declined invitation if (accountId != null) { - s_logger.debug("Removing invitation in state " + invite.getState() + " for account id=" + accountId + " to project " + project); + logger.debug("Removing invitation in state " + invite.getState() + " for account id=" + accountId + " to project " + project); } else if (email != null) { - s_logger.debug("Removing invitation in state " + invite.getState() + " for email " + email + " to project " + project); + logger.debug("Removing invitation in state " + invite.getState() + " for email " + email + " to project " + project); } _projectInvitationDao.expunge(invite.getId()); @@ -708,7 +706,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { try { _emailInvite.sendInvite(token, email, project.getId()); } catch (Exception ex) { - s_logger.warn("Failed to send project id=" + project + " invitation to the email " + email + "; removing the invitation record from the db", ex); + logger.warn("Failed to send project id=" + project + " invitation to the email " + email + "; removing the invitation record from the db", ex); _projectInvitationDao.remove(projectInvitation.getId()); return null; } @@ -717,7 +715,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { } private boolean expireInvitation(ProjectInvitationVO invite) { - s_logger.debug("Expiring invitation id=" + invite.getId()); + logger.debug("Expiring invitation id=" + invite.getId()); invite.setState(ProjectInvitation.State.Expired); return _projectInvitationDao.update(invite.getId(), invite); } @@ -782,7 +780,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { ProjectInvitation.State newState = accept ? ProjectInvitation.State.Completed : ProjectInvitation.State.Declined; //update invitation - s_logger.debug("Marking invitation " + inviteFinal + " with state " + newState); + logger.debug("Marking invitation " + inviteFinal + " with state " + newState); inviteFinal.setState(newState); result = _projectInvitationDao.update(inviteFinal.getId(), inviteFinal); @@ -790,12 +788,12 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { //check if account already exists for the project (was added before invitation got accepted) ProjectAccount projectAccount = _projectAccountDao.findByProjectIdAccountId(projectId, accountIdFinal); if (projectAccount != null) { - s_logger.debug("Account " + accountNameFinal + " already added to the project id=" + projectId); + logger.debug("Account " + accountNameFinal + " already added to the project id=" + projectId); } else { assignAccountToProject(project, accountIdFinal, ProjectAccount.Role.Regular); } } else { - s_logger.warn("Failed to update project invitation " + inviteFinal + " with state " + newState); + logger.warn("Failed to update project invitation " + inviteFinal + " with state " + newState); } return result; @@ -836,7 +834,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { Project.State currentState = project.getState(); if (currentState == State.Active) { - s_logger.debug("The project id=" + projectId + " is already active, no need to activate it again"); + logger.debug("The project id=" + projectId + " is already active, no need to activate it again"); return project; } @@ -873,7 +871,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { _accountMgr.checkAccess(caller, AccessType.ModifyProject, true, _accountMgr.getAccount(project.getProjectAccountId())); if (suspendProject(project)) { - s_logger.debug("Successfully suspended project id=" + projectId); + logger.debug("Successfully suspended project id=" + projectId); return _projectDao.findById(projectId); } else { CloudRuntimeException ex = new CloudRuntimeException("Failed to suspend project with specified id"); @@ -885,14 +883,14 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { private boolean suspendProject(ProjectVO project) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("Marking project " + project + " with state " + State.Suspended + " as a part of project suspend..."); + logger.debug("Marking project " + project + " with state " + State.Suspended + " as a part of project suspend..."); project.setState(State.Suspended); boolean updateResult = _projectDao.update(project.getId(), project); if (updateResult) { long projectAccountId = project.getProjectAccountId(); if (!_accountMgr.disableAccount(projectAccountId)) { - s_logger.warn("Failed to suspend all project's " + project + " resources; the resources will be suspended later by background thread"); + logger.warn("Failed to suspend all project's " + project + " resources; the resources will be suspended later by background thread"); } } else { throw new CloudRuntimeException("Failed to mark the project " + project + " with state " + State.Suspended); @@ -967,7 +965,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { try { address = new InternetAddress(email, email); } catch (Exception ex) { - s_logger.error("Exception creating address for: " + email, ex); + logger.error("Exception creating address for: " + email, ex); } } @@ -1015,10 +1013,10 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { _accountMgr.checkAccess(caller, AccessType.ModifyProject, true, _accountMgr.getAccount(project.getProjectAccountId())); if (_projectInvitationDao.remove(id)) { - s_logger.debug("Project Invitation id=" + id + " is removed"); + logger.debug("Project Invitation id=" + id + " is removed"); return true; } else { - s_logger.debug("Failed to remove project invitation id=" + id); + logger.debug("Failed to remove project invitation id=" + id); return false; } } @@ -1030,15 +1028,15 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { TimeZone.getDefault(); List invitationsToExpire = _projectInvitationDao.listInvitationsToExpire(_invitationTimeOut); if (!invitationsToExpire.isEmpty()) { - s_logger.debug("Found " + invitationsToExpire.size() + " projects to expire"); + logger.debug("Found " + invitationsToExpire.size() + " projects to expire"); for (ProjectInvitationVO invitationToExpire : invitationsToExpire) { invitationToExpire.setState(ProjectInvitation.State.Expired); _projectInvitationDao.update(invitationToExpire.getId(), invitationToExpire); - s_logger.trace("Expired project invitation id=" + invitationToExpire.getId()); + logger.trace("Expired project invitation id=" + invitationToExpire.getId()); } } } catch (Exception ex) { - s_logger.warn("Exception while running expired invitations cleanup", ex); + logger.warn("Exception while running expired invitations cleanup", ex); } } } diff --git a/server/src/com/cloud/resource/DiscovererBase.java b/server/src/com/cloud/resource/DiscovererBase.java index ad32b9fda56..0fdac13897a 100644 --- a/server/src/com/cloud/resource/DiscovererBase.java +++ b/server/src/com/cloud/resource/DiscovererBase.java @@ -26,7 +26,6 @@ import com.cloud.network.NetworkModel; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.net.UrlUtil; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -38,7 +37,6 @@ import java.util.Map; public abstract class DiscovererBase extends AdapterBase implements Discoverer { protected Map _params; - private static final Logger s_logger = Logger.getLogger(DiscovererBase.class); @Inject protected ClusterDao _clusterDao; @Inject @@ -90,19 +88,19 @@ public abstract class DiscovererBase extends AdapterBase implements Discoverer { Constructor constructor = clazz.getConstructor(); resource = (ServerResource)constructor.newInstance(); } catch (ClassNotFoundException e) { - s_logger.warn("Unable to find class " + resourceName, e); + logger.warn("Unable to find class " + resourceName, e); } catch (InstantiationException e) { - s_logger.warn("Unablet to instantiate class " + resourceName, e); + logger.warn("Unablet to instantiate class " + resourceName, e); } catch (IllegalAccessException e) { - s_logger.warn("Illegal access " + resourceName, e); + logger.warn("Illegal access " + resourceName, e); } catch (SecurityException e) { - s_logger.warn("Security error on " + resourceName, e); + logger.warn("Security error on " + resourceName, e); } catch (NoSuchMethodException e) { - s_logger.warn("NoSuchMethodException error on " + resourceName, e); + logger.warn("NoSuchMethodException error on " + resourceName, e); } catch (IllegalArgumentException e) { - s_logger.warn("IllegalArgumentException error on " + resourceName, e); + logger.warn("IllegalArgumentException error on " + resourceName, e); } catch (InvocationTargetException e) { - s_logger.warn("InvocationTargetException error on " + resourceName, e); + logger.warn("InvocationTargetException error on " + resourceName, e); } return resource; @@ -157,11 +155,11 @@ public abstract class DiscovererBase extends AdapterBase implements Discoverer { try { resource.configure(host.getName(), params); } catch (ConfigurationException e) { - s_logger.warn("Unable to configure resource due to " + e.getMessage()); + logger.warn("Unable to configure resource due to " + e.getMessage()); return null; } if (!resource.start()) { - s_logger.warn("Unable to start the resource"); + logger.warn("Unable to start the resource"); return null; } } diff --git a/server/src/com/cloud/resource/DummyHostDiscoverer.java b/server/src/com/cloud/resource/DummyHostDiscoverer.java index 4651498d997..ec026f2d539 100644 --- a/server/src/com/cloud/resource/DummyHostDiscoverer.java +++ b/server/src/com/cloud/resource/DummyHostDiscoverer.java @@ -25,7 +25,6 @@ import java.util.UUID; import javax.ejb.Local; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.host.HostVO; @@ -35,7 +34,6 @@ import com.cloud.utils.component.AdapterBase; @Component @Local(value = Discoverer.class) public class DummyHostDiscoverer extends AdapterBase implements Discoverer { - private static final Logger s_logger = Logger.getLogger(DummyHostDiscoverer.class); @Override public Map> find(long dcId, Long podId, Long clusterId, URI url, String username, String password, List hostTags) { @@ -62,7 +60,7 @@ public class DummyHostDiscoverer extends AdapterBase implements Discoverer { try { resource.configure("Dummy Host Server", params); } catch (ConfigurationException e) { - s_logger.warn("Unable to instantiate dummy host server resource"); + logger.warn("Unable to instantiate dummy host server resource"); } resource.start(); resources.put(resource, details); diff --git a/server/src/com/cloud/resource/ResourceManagerImpl.java b/server/src/com/cloud/resource/ResourceManagerImpl.java index b450e31f83a..125c3a2ab97 100644 --- a/server/src/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/com/cloud/resource/ResourceManagerImpl.java @@ -46,7 +46,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.commons.lang.ObjectUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -174,7 +173,6 @@ import com.google.gson.Gson; @Component @Local({ResourceManager.class, ResourceService.class}) public class ResourceManagerImpl extends ManagerBase implements ResourceManager, ResourceService, Manager { - private static final Logger s_logger = Logger.getLogger(ResourceManagerImpl.class); Gson _gson; @@ -356,7 +354,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } else { throw new CloudRuntimeException("Unknown resource event:" + event); } - s_logger.debug("Sent resource event " + eventName + " to listener " + l.getClass().getSimpleName()); + logger.debug("Sent resource event " + eventName + " to listener " + l.getClass().getSimpleName()); } } @@ -420,7 +418,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.getType(cmd.getHypervisor()); if (hypervisorType == null) { - s_logger.error("Unable to resolve " + cmd.getHypervisor() + " to a valid supported hypervisor type"); + logger.error("Unable to resolve " + cmd.getHypervisor() + " to a valid supported hypervisor type"); throw new InvalidParameterValueException("Unable to resolve " + cmd.getHypervisor() + " to a supported "); } @@ -534,12 +532,12 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } discoverer.postDiscovery(hosts, _nodeId); } - s_logger.info("External cluster has been successfully discovered by " + discoverer.getName()); + logger.info("External cluster has been successfully discovered by " + discoverer.getName()); success = true; return result; } - s_logger.warn("Unable to find the server resources at " + url); + logger.warn("Unable to find the server resources at " + url); throw new DiscoveryException("Unable to add the external cluster"); } finally { if (!success) { @@ -730,7 +728,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } final List hosts = new ArrayList(); - s_logger.info("Trying to add a new host at " + url + " in data center " + dcId); + logger.info("Trying to add a new host at " + url + " in data center " + dcId); boolean isHypervisorTypeSupported = false; for (final Discoverer discoverer : _discoverers) { if (params != null) { @@ -749,7 +747,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } catch (final DiscoveryException e) { throw e; } catch (final Exception e) { - s_logger.info("Exception in host discovery process with discoverer: " + discoverer.getName() + ", skip to another discoverer if there is any"); + logger.info("Exception in host discovery process with discoverer: " + discoverer.getName() + ", skip to another discoverer if there is any"); } processResourceEvent(ResourceListener.EVENT_DISCOVER_AFTER, resources); @@ -767,8 +765,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, for (final HostVO host : kvmHosts) { if (host.getGuid().equalsIgnoreCase(guid)) { if (hostTags != null) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Adding Host Tags for KVM host, tags: :" + hostTags); + if (logger.isTraceEnabled()) { + logger.trace("Adding Host Tags for KVM host, tags: :" + hostTags); } _hostTagsDao.persist(host.getId(), hostTags); } @@ -791,16 +789,16 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, discoverer.postDiscovery(hosts, _nodeId); } - s_logger.info("server resources successfully discovered by " + discoverer.getName()); + logger.info("server resources successfully discovered by " + discoverer.getName()); return hosts; } } if (!isHypervisorTypeSupported) { final String msg = "Do not support HypervisorType " + hypervisorType + " for " + url; - s_logger.warn(msg); + logger.warn(msg); throw new DiscoveryException(msg); } - s_logger.warn("Unable to find the server resources at " + url); + logger.warn("Unable to find the server resources at " + url); throw new DiscoveryException("Unable to add the host"); } @@ -879,7 +877,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, try { resourceStateTransitTo(host, ResourceState.Event.DeleteHost, _nodeId); } catch (final NoTransitionException e) { - s_logger.debug("Cannot transmit host " + host.getId() + " to Enabled state", e); + logger.debug("Cannot transmit host " + host.getId() + " to Enabled state", e); } // Delete the associated entries in host ref table @@ -904,7 +902,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, storagePool.setClusterId(null); _storagePoolDao.update(poolId, storagePool); _storagePoolDao.remove(poolId); - s_logger.debug("Local storage id=" + poolId + " is removed as a part of host removal id=" + hostId); + logger.debug("Local storage id=" + poolId + " is removed as a part of host removal id=" + hostId); } } @@ -948,8 +946,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, public void doInTransactionWithoutResult(final TransactionStatus status) { final ClusterVO cluster = _clusterDao.lockRow(cmd.getId(), true); if (cluster == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cluster: " + cmd.getId() + " does not even exist. Delete call is ignored."); + if (logger.isDebugEnabled()) { + logger.debug("Cluster: " + cmd.getId() + " does not even exist. Delete call is ignored."); } throw new CloudRuntimeException("Cluster: " + cmd.getId() + " does not exist"); } @@ -958,8 +956,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final List hosts = listAllHostsInCluster(cmd.getId()); if (hosts.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cluster: " + cmd.getId() + " still has hosts, can't remove"); + if (logger.isDebugEnabled()) { + logger.debug("Cluster: " + cmd.getId() + " still has hosts, can't remove"); } throw new CloudRuntimeException("Cluster: " + cmd.getId() + " cannot be removed. Cluster still has hosts"); } @@ -968,8 +966,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, // pools final List storagePools = _storagePoolDao.listPoolsByCluster(cmd.getId()); if (storagePools.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cluster: " + cmd.getId() + " still has storage pools, can't remove"); + if (logger.isDebugEnabled()) { + logger.debug("Cluster: " + cmd.getId() + " still has storage pools, can't remove"); } throw new CloudRuntimeException("Cluster: " + cmd.getId() + " cannot be removed. Cluster still has storage pools"); } @@ -995,7 +993,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } catch (final CloudRuntimeException e) { throw e; } catch (final Throwable t) { - s_logger.error("Unable to delete cluster: " + cmd.getId(), t); + logger.error("Unable to delete cluster: " + cmd.getId(), t); return false; } } @@ -1011,7 +1009,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, if (hypervisor != null && !hypervisor.isEmpty()) { final Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.getType(hypervisor); if (hypervisorType == null) { - s_logger.error("Unable to resolve " + hypervisor + " to a valid supported hypervisor type"); + logger.error("Unable to resolve " + hypervisor + " to a valid supported hypervisor type"); throw new InvalidParameterValueException("Unable to resolve " + hypervisor + " to a supported type"); } else { cluster.setHypervisorType(hypervisor); @@ -1027,7 +1025,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, throw new InvalidParameterValueException("Unable to resolve " + clusterType + " to a supported type"); } if (newClusterType == null) { - s_logger.error("Unable to resolve " + clusterType + " to a valid supported cluster type"); + logger.error("Unable to resolve " + clusterType + " to a valid supported cluster type"); throw new InvalidParameterValueException("Unable to resolve " + clusterType + " to a supported type"); } else { cluster.setClusterType(newClusterType); @@ -1043,7 +1041,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, throw new InvalidParameterValueException("Unable to resolve Allocation State '" + allocationState + "' to a supported state"); } if (newAllocationState == null) { - s_logger.error("Unable to resolve " + allocationState + " to a valid supported allocation State"); + logger.error("Unable to resolve " + allocationState + " to a valid supported allocation State"); throw new InvalidParameterValueException("Unable to resolve " + allocationState + " to a supported state"); } else { cluster.setAllocationState(newAllocationState); @@ -1060,7 +1058,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, throw new InvalidParameterValueException("Unable to resolve Managed State '" + managedstate + "' to a supported state"); } if (newManagedState == null) { - s_logger.error("Unable to resolve Managed State '" + managedstate + "' to a supported state"); + logger.error("Unable to resolve Managed State '" + managedstate + "' to a supported state"); throw new InvalidParameterValueException("Unable to resolve Managed State '" + managedstate + "' to a supported state"); } else { doUpdate = true; @@ -1179,7 +1177,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final HostVO host = _hostDao.findById(hostId); final MaintainAnswer answer = (MaintainAnswer)_agentMgr.easySend(hostId, new MaintainCommand()); if (answer == null || !answer.getResult()) { - s_logger.warn("Unable to send MaintainCommand to host: " + hostId); + logger.warn("Unable to send MaintainCommand to host: " + hostId); return false; } @@ -1187,7 +1185,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, resourceStateTransitTo(host, ResourceState.Event.AdminAskMaintenace, _nodeId); } catch (final NoTransitionException e) { final String err = "Cannot transmit resource state of host " + host.getId() + " to " + ResourceState.Maintenance; - s_logger.debug(err, e); + logger.debug(err, e); throw new CloudRuntimeException(err + e.getMessage()); } @@ -1236,7 +1234,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final HostVO host = _hostDao.findById(hostId); if (host == null) { - s_logger.debug("Unable to find host " + hostId); + logger.debug("Unable to find host " + hostId); throw new InvalidParameterValueException("Unable to find host with ID: " + hostId + ". Please specify a valid host ID."); } @@ -1277,7 +1275,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } } } catch (final NoTransitionException e) { - s_logger.debug("Cannot transmit host " + host.getId() + "to Maintenance state", e); + logger.debug("Cannot transmit host " + host.getId() + "to Maintenance state", e); } return hostInMaintenance; } @@ -1331,8 +1329,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final List hostTags = cmd.getHostTags(); if (hostTags != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Updating Host Tags to :" + hostTags); + if (logger.isDebugEnabled()) { + logger.debug("Updating Host Tags to :" + hostTags); } _hostTagsDao.persist(hostId, hostTags); } @@ -1480,7 +1478,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final ResourceStateAdapter adapter = item.getValue(); final String msg = "Dispatching resource state event " + event + " to " + item.getKey(); - s_logger.debug(msg); + logger.debug(msg); if (event == ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_CONNECTED) { result = adapter.createHostVOForConnectedAgent((HostVO)args[0], (StartupCommand[])args[1]); @@ -1501,7 +1499,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, break; } } catch (final UnableDeleteHostException e) { - s_logger.debug("Adapter " + adapter.getName() + " says unable to delete host", e); + logger.debug("Adapter " + adapter.getName() + " says unable to delete host", e); result = new ResourceStateAdapter.DeleteHostAnswer(false, true); } } else { @@ -1527,7 +1525,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final String cidrSubnet = NetUtils.getCidrSubNet(cidrAddress, cidrSize); final String serverSubnet = NetUtils.getSubNet(serverPrivateIP, serverPrivateNetmask); if (!cidrSubnet.equals(serverSubnet)) { - s_logger.warn("The private ip address of the server (" + serverPrivateIP + ") is not compatible with the CIDR of pod: " + pod.getName() + " and zone: " + + logger.warn("The private ip address of the server (" + serverPrivateIP + ") is not compatible with the CIDR of pod: " + pod.getName() + " and zone: " + dc.getName()); throw new IllegalArgumentException("The private ip address of the server (" + serverPrivateIP + ") is not compatible with the CIDR of pod: " + pod.getName() + " and zone: " + dc.getName()); @@ -1607,7 +1605,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, dcId = Long.parseLong(dataCenter); dc = _dcDao.findById(dcId); } catch (final NumberFormatException e) { - s_logger.debug("Cannot parse " + dataCenter + " into Long."); + logger.debug("Cannot parse " + dataCenter + " into Long."); } } if (dc == null) { @@ -1621,7 +1619,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final long podId = Long.parseLong(pod); p = _podDao.findById(podId); } catch (final NumberFormatException e) { - s_logger.debug("Cannot parse " + pod + " into Long."); + logger.debug("Cannot parse " + pod + " into Long."); } } /* @@ -1708,12 +1706,12 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, /* Agent goes to Connecting status */ _agentMgr.agentStatusTransitTo(host, Status.Event.AgentConnected, _nodeId); } catch (final Exception e) { - s_logger.debug("Cannot transmit host " + host.getId() + " to Creating state", e); + logger.debug("Cannot transmit host " + host.getId() + " to Creating state", e); _agentMgr.agentStatusTransitTo(host, Status.Event.Error, _nodeId); try { resourceStateTransitTo(host, ResourceState.Event.Error, _nodeId); } catch (final NoTransitionException e1) { - s_logger.debug("Cannot transmit host " + host.getId() + "to Error state", e); + logger.debug("Cannot transmit host " + host.getId() + "to Error state", e); } } @@ -1766,7 +1764,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, try { cmds = resource.initialize(); if (cmds == null) { - s_logger.info("Unable to fully initialize the agent because no StartupCommands are returned"); + logger.info("Unable to fully initialize the agent because no StartupCommands are returned"); return null; } @@ -1779,7 +1777,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } } - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { new Request(-1l, -1l, cmds, true, false).logD("Startup request from directly connected host: ", true); } @@ -1790,7 +1788,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, host = findHostByGuid(firstCmd.getGuidWithoutResource()); } if (host != null && host.getRemoved() == null) { // host already added, no need to add again - s_logger.debug("Found the host " + host.getId() + " by guid: " + firstCmd.getGuid() + ", old host reconnected as new"); + logger.debug("Found the host " + host.getId() + " by guid: " + firstCmd.getGuid() + ", old host reconnected as new"); hostExists = true; // ensures that host status is left unchanged in case of adding same one again return null; } @@ -1803,7 +1801,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, host = _hostDao.findById(host.getId()); } } catch (final Exception e) { - s_logger.warn("Unable to connect due to ", e); + logger.warn("Unable to connect due to ", e); } finally { if (hostExists) { if (cmds != null) { @@ -1832,7 +1830,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, try { cmds = resource.initialize(); if (cmds == null) { - s_logger.info("Unable to fully initialize the agent because no StartupCommands are returned"); + logger.info("Unable to fully initialize the agent because no StartupCommands are returned"); return null; } @@ -1845,7 +1843,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } } - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { new Request(-1l, -1l, cmds, true, false).logD("Startup request from directly connected host: ", true); } @@ -1859,7 +1857,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, // added, no // need to add // again - s_logger.debug("Found the host " + host.getId() + " by guid: " + firstCmd.getGuid() + ", old host reconnected as new"); + logger.debug("Found the host " + host.getId() + " by guid: " + firstCmd.getGuid() + ", old host reconnected as new"); hostExists = true; // ensures that host status is left // unchanged in case of adding same one // again @@ -1903,7 +1901,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } } } catch (final Exception e) { - s_logger.warn("Unable to connect due to ", e); + logger.warn("Unable to connect due to ", e); } finally { if (hostExists) { if (cmds != null) { @@ -1990,7 +1988,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, @Override public HostVO fillRoutingHostVO(final HostVO host, final StartupRoutingCommand ssCmd, final HypervisorType hyType, Map details, final List hostTags) { if (host.getPodId() == null) { - s_logger.error("Host " + ssCmd.getPrivateIpAddress() + " sent incorrect pod, pod id is null"); + logger.error("Host " + ssCmd.getPrivateIpAddress() + " sent incorrect pod, pod id is null"); throw new IllegalArgumentException("Host " + ssCmd.getPrivateIpAddress() + " sent incorrect pod, pod id is null"); } @@ -2031,8 +2029,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, throw new CloudRuntimeException("Non-Routing host gets in deleteRoutingHost, id is " + host.getId()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deleting Host: " + host.getId() + " Guid:" + host.getGuid()); + if (logger.isDebugEnabled()) { + logger.debug("Deleting Host: " + host.getId() + " Guid:" + host.getGuid()); } if (forceDestroyStorage) { @@ -2044,12 +2042,12 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, try { final StoragePool pool = _storageSvr.preparePrimaryStorageForMaintenance(storagePool.getId()); if (pool == null) { - s_logger.debug("Failed to set primary storage into maintenance mode"); + logger.debug("Failed to set primary storage into maintenance mode"); throw new UnableDeleteHostException("Failed to set primary storage into maintenance mode"); } } catch (final Exception e) { - s_logger.debug("Failed to set primary storage into maintenance mode, due to: " + e.toString()); + logger.debug("Failed to set primary storage into maintenance mode, due to: " + e.toString()); throw new UnableDeleteHostException("Failed to set primary storage into maintenance mode, due to: " + e.toString()); } } @@ -2060,7 +2058,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, _vmMgr.destroy(vm.getUuid()); } catch (final Exception e) { final String errorMsg = "There was an error Destory the vm: " + vm + " as a part of hostDelete id=" + host.getId(); - s_logger.debug(errorMsg, e); + logger.debug(errorMsg, e); throw new UnableDeleteHostException(errorMsg + "," + e.getMessage()); } } @@ -2074,16 +2072,16 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, // Restart HA enabled vms for (final VMInstanceVO vm : vms) { if (!vm.isHaEnabled() || vm.getState() == State.Stopping) { - s_logger.debug("Stopping vm: " + vm + " as a part of deleteHost id=" + host.getId()); + logger.debug("Stopping vm: " + vm + " as a part of deleteHost id=" + host.getId()); try { _vmMgr.advanceStop(vm.getUuid(), false); } catch (final Exception e) { final String errorMsg = "There was an error stopping the vm: " + vm + " as a part of hostDelete id=" + host.getId(); - s_logger.debug(errorMsg, e); + logger.debug(errorMsg, e); throw new UnableDeleteHostException(errorMsg + "," + e.getMessage()); } } else if (vm.isHaEnabled() && (vm.getState() == State.Running || vm.getState() == State.Starting)) { - s_logger.debug("Scheduling restart for vm: " + vm + " " + vm.getState() + " on the host id=" + host.getId()); + logger.debug("Scheduling restart for vm: " + vm + " " + vm.getState() + " on the host id=" + host.getId()); _haMgr.scheduleRestart(vm, false); } } @@ -2099,7 +2097,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, HostVO host; host = _hostDao.findById(hostId); if (host == null || host.getRemoved() != null) { - s_logger.warn("Unable to find host " + hostId); + logger.warn("Unable to find host " + hostId); return true; } @@ -2117,7 +2115,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final List vms = _haMgr.findTakenMigrationWork(); for (final VMInstanceVO vm : vms) { if (vm != null && vm.getHostId() != null && vm.getHostId() == hostId) { - s_logger.info("Unable to cancel migration because the vm is being migrated: " + vm); + logger.info("Unable to cancel migration because the vm is being migrated: " + vm); return false; } } @@ -2131,7 +2129,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final boolean sshToAgent = Boolean.parseBoolean(_configDao.getValue(Config.KvmSshToAgentEnabled.key())); if (!sshToAgent) { - s_logger.info("Configuration tells us not to SSH into Agents. Please restart the Agent (" + hostId + ") manually"); + logger.info("Configuration tells us not to SSH into Agents. Please restart the Agent (" + hostId + ") manually"); return true; } @@ -2139,12 +2137,12 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final String password = host.getDetail("password"); final String username = host.getDetail("username"); if (password == null || username == null) { - s_logger.debug("Can't find password/username"); + logger.debug("Can't find password/username"); return false; } final com.trilead.ssh2.Connection connection = SSHCmdHelper.acquireAuthorizedConnection(host.getPrivateIpAddress(), 22, username, password); if (connection == null) { - s_logger.debug("Failed to connect to host: " + host.getPrivateIpAddress()); + logger.debug("Failed to connect to host: " + host.getPrivateIpAddress()); return false; } @@ -2157,7 +2155,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, return true; } catch (final NoTransitionException e) { - s_logger.debug("Cannot transmit host " + host.getId() + "to Enabled state", e); + logger.debug("Cannot transmit host " + host.getId() + "to Enabled state", e); return false; } } @@ -2197,7 +2195,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, private boolean doUmanageHost(final long hostId) { final HostVO host = _hostDao.findById(hostId); if (host == null) { - s_logger.debug("Cannot find host " + hostId + ", assuming it has been deleted, skip umanage"); + logger.debug("Cannot find host " + hostId + ", assuming it has been deleted, skip umanage"); return true; } @@ -2241,7 +2239,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final UpdateHostPasswordCommand cmd = new UpdateHostPasswordCommand(username, password, hostIpAddress); final Answer answer = _agentMgr.easySend(hostId, cmd); - s_logger.info("Result returned from update host password ==> " + answer.getDetails()); + logger.info("Result returned from update host password ==> " + answer.getDetails()); return answer.getResult(); } @@ -2261,7 +2259,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, return result; } } catch (final AgentUnavailableException e) { - s_logger.error("Agent is not availbale!", e); + logger.error("Agent is not availbale!", e); } if (shouldUpdateHostPasswd) { @@ -2284,7 +2282,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, return result; } } catch (final AgentUnavailableException e) { - s_logger.error("Agent is not availbale!", e); + logger.error("Agent is not availbale!", e); } final boolean shouldUpdateHostPasswd = command.getUpdatePasswdOnHost(); @@ -2311,8 +2309,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, return null; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Propagating agent change request event:" + event.toString() + " to agent:" + agentId); + if (logger.isDebugEnabled()) { + logger.debug("Propagating agent change request event:" + event.toString() + " to agent:" + agentId); } final Command[] cmds = new Command[1]; cmds[0] = new PropagateResourceEventCommand(agentId, event); @@ -2324,8 +2322,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final Answer[] answers = _gson.fromJson(AnsStr, Answer[].class); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Result for agent change is " + answers[0].getResult()); + if (logger.isDebugEnabled()) { + logger.debug("Result for agent change is " + answers[0].getResult()); } return answers[0].getResult(); @@ -2335,15 +2333,15 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, public boolean maintenanceFailed(final long hostId) { final HostVO host = _hostDao.findById(hostId); if (host == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cant not find host " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Cant not find host " + hostId); } return false; } else { try { return resourceStateTransitTo(host, ResourceState.Event.UnableToMigrate, _nodeId); } catch (final NoTransitionException e) { - s_logger.debug("No next resource state for host " + host.getId() + " while current state is " + host.getResourceState() + " with event " + + logger.debug("No next resource state for host " + host.getId() + " while current state is " + host.getResourceState() + " with event " + ResourceState.Event.UnableToMigrate, e); return false; } @@ -2490,7 +2488,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, if (answer == null || !answer.getResult()) { final String msg = "Unable to obtain host " + hostId + " statistics. "; - s_logger.warn(msg); + logger.warn(msg); return null; } else { @@ -2589,8 +2587,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, if(!listAvailableGPUDevice(hostId, groupName, vgpuType).isEmpty()) { return true; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host ID: "+ hostId +" does not have GPU device available"); + if (logger.isDebugEnabled()) { + logger.debug("Host ID: "+ hostId +" does not have GPU device available"); } return false; } @@ -2620,7 +2618,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } if (answer == null || !answer.getResult()) { final String msg = "Unable to obtain GPU stats for host " + host.getName(); - s_logger.warn(msg); + logger.warn(msg); return null; } else { // now construct the result object @@ -2644,8 +2642,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final long id = reservationEntry.getId(); final PlannerHostReservationVO hostReservation = _plannerHostReserveDao.lockRow(id, true); if (hostReservation == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored."); + if (logger.isDebugEnabled()) { + logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored."); } return false; } @@ -2654,8 +2652,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, return true; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored."); + if (logger.isDebugEnabled()) { + logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored."); } return false; @@ -2664,7 +2662,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } catch (final CloudRuntimeException e) { throw e; } catch (final Throwable t) { - s_logger.error("Unable to release host reservation for host: " + hostId, t); + logger.error("Unable to release host reservation for host: " + hostId, t); return false; } } diff --git a/server/src/com/cloud/resourcelimit/ResourceLimitManagerImpl.java b/server/src/com/cloud/resourcelimit/ResourceLimitManagerImpl.java index 1651ad7ba28..4a048a24789 100644 --- a/server/src/com/cloud/resourcelimit/ResourceLimitManagerImpl.java +++ b/server/src/com/cloud/resourcelimit/ResourceLimitManagerImpl.java @@ -30,7 +30,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -110,7 +109,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Component @Local(value = {ResourceLimitService.class}) public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLimitService { - public static final Logger s_logger = Logger.getLogger(ResourceLimitManagerImpl.class); @Inject private DomainDao _domainDao; @@ -249,7 +247,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim domainResourceLimitMap.put(Resource.ResourceType.primary_storage, Long.parseLong(_configDao.getValue(Config.DefaultMaxDomainPrimaryStorage.key()))); domainResourceLimitMap.put(Resource.ResourceType.secondary_storage, Long.parseLong(_configDao.getValue(Config.DefaultMaxDomainSecondaryStorage.key()))); } catch (NumberFormatException e) { - s_logger.error("NumberFormatException during configuration", e); + logger.error("NumberFormatException during configuration", e); throw new ConfigurationException("Configuration failed due to NumberFormatException, see log for the stacktrace"); } @@ -260,7 +258,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim public void incrementResourceCount(long accountId, ResourceType type, Long... delta) { // don't upgrade resource count for system account if (accountId == Account.ACCOUNT_ID_SYSTEM) { - s_logger.trace("Not incrementing resource count for system accounts, returning"); + logger.trace("Not incrementing resource count for system accounts, returning"); return; } @@ -276,7 +274,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim public void decrementResourceCount(long accountId, ResourceType type, Long... delta) { // don't upgrade resource count for system account if (accountId == Account.ACCOUNT_ID_SYSTEM) { - s_logger.trace("Not decrementing resource count for system accounts, returning"); + logger.trace("Not decrementing resource count for system accounts, returning"); return; } long numToDecrement = (delta.length == 0) ? 1 : delta[0].longValue(); @@ -450,7 +448,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim " has been exceeded."; } ResourceAllocationException e= new ResourceAllocationException(message, type);; - s_logger.error(message, e); + logger.error(message, e); throw e; } @@ -795,7 +793,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim for (ResourceCountVO rowToUpdate : rowsToUpdate) { if (!_resourceCountDao.updateById(rowToUpdate.getId(), increment, delta)) { - s_logger.trace("Unable to update resource count for the row " + rowToUpdate); + logger.trace("Unable to update resource count for the row " + rowToUpdate); result = false; } } @@ -804,7 +802,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim } }); } catch (Exception ex) { - s_logger.error("Failed to update resource count for account id=" + accountId); + logger.error("Failed to update resource count for account id=" + accountId); return false; } } @@ -850,7 +848,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim _resourceCountDao.setResourceCount(domainId, ResourceOwnerType.Domain, type, newCount); if (oldCount != newCount) { - s_logger.info("Discrepency in the resource count " + "(original count=" + oldCount + " correct count = " + newCount + ") for type " + type + + logger.info("Discrepency in the resource count " + "(original count=" + oldCount + " correct count = " + newCount + ") for type " + type + " for domain ID " + domainId + " is fixed during resource count recalculation."); } @@ -912,7 +910,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim // No need to log message for primary and secondary storage because both are recalculating the resource count which will not lead to any discrepancy. if (!Long.valueOf(oldCount).equals(newCount) && (type != Resource.ResourceType.primary_storage && type != Resource.ResourceType.secondary_storage)) { - s_logger.info("Discrepency in the resource count " + "(original count=" + oldCount + " correct count = " + newCount + ") for type " + type + + logger.info("Discrepency in the resource count " + "(original count=" + oldCount + " correct count = " + newCount + ") for type " + type + " for account ID " + accountId + " is fixed during resource count recalculation."); } @@ -1070,7 +1068,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim @Override protected void runInContext() { - s_logger.info("Running resource count check periodic task"); + logger.info("Running resource count check periodic task"); List domains = _domainDao.findImmediateChildrenForParent(Domain.ROOT_DOMAIN); // recalculateDomainResourceCount will take care of re-calculation of resource counts for sub-domains diff --git a/server/src/com/cloud/server/ConfigurationServerImpl.java b/server/src/com/cloud/server/ConfigurationServerImpl.java index 5ff548c7f3e..6cf5516a189 100644 --- a/server/src/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/com/cloud/server/ConfigurationServerImpl.java @@ -46,7 +46,6 @@ import javax.naming.ConfigurationException; import org.apache.commons.codec.binary.Base64; import org.apache.commons.io.FileUtils; import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.framework.config.ConfigDepot; @@ -121,7 +120,6 @@ import com.cloud.utils.nio.Link; import com.cloud.utils.script.Script; public class ConfigurationServerImpl extends ManagerBase implements ConfigurationServer { - public static final Logger s_logger = Logger.getLogger(ConfigurationServerImpl.class); @Inject private ConfigurationDao _configDao; @@ -182,7 +180,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio String init = _configDao.getValue("init"); if (init == null || init.equals("false")) { - s_logger.debug("ConfigurationServer is saving default values to the database."); + logger.debug("ConfigurationServer is saving default values to the database."); // Save default Configuration Table values List categories = Config.getCategories(); @@ -212,19 +210,19 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } _configDao.update(Config.UseSecondaryStorageVm.key(), Config.UseSecondaryStorageVm.getCategory(), "true"); - s_logger.debug("ConfigurationServer made secondary storage vm required."); + logger.debug("ConfigurationServer made secondary storage vm required."); _configDao.update(Config.SecStorageEncryptCopy.key(), Config.SecStorageEncryptCopy.getCategory(), "false"); - s_logger.debug("ConfigurationServer made secondary storage copy encrypt set to false."); + logger.debug("ConfigurationServer made secondary storage copy encrypt set to false."); _configDao.update("secstorage.secure.copy.cert", "realhostip"); - s_logger.debug("ConfigurationServer made secondary storage copy use realhostip."); + logger.debug("ConfigurationServer made secondary storage copy use realhostip."); _configDao.update("user.password.encoders.exclude", "MD5,LDAP,PLAINTEXT"); - s_logger.debug("Configuration server excluded insecure encoders"); + logger.debug("Configuration server excluded insecure encoders"); _configDao.update("user.authenticators.exclude", "PLAINTEXT"); - s_logger.debug("Configuration server excluded plaintext authenticator"); + logger.debug("Configuration server excluded plaintext authenticator"); // Save default service offerings createServiceOffering(User.UID_SYSTEM, "Small Instance", 1, 512, 500, "Small Instance", ProvisioningType.THIN, false, false, null); @@ -240,9 +238,9 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio String mountParent = getMountParent(); if (mountParent != null) { _configDao.update(Config.MountParent.key(), Config.MountParent.getCategory(), mountParent); - s_logger.debug("ConfigurationServer saved \"" + mountParent + "\" as mount.parent."); + logger.debug("ConfigurationServer saved \"" + mountParent + "\" as mount.parent."); } else { - s_logger.debug("ConfigurationServer could not detect mount.parent."); + logger.debug("ConfigurationServer could not detect mount.parent."); } String hostIpAdr = NetUtils.getDefaultHostIp(); @@ -258,7 +256,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio if (needUpdateHostIp) { _configDepot.createOrUpdateConfigObject(ApiServiceConfiguration.class.getSimpleName(), ApiServiceConfiguration.ManagementHostIPAdr, hostIpAdr); - s_logger.debug("ConfigurationServer saved \"" + hostIpAdr + "\" as host."); + logger.debug("ConfigurationServer saved \"" + hostIpAdr + "\" as host."); } } @@ -362,7 +360,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } txn.commit(); } catch (Exception e) { - s_logger.warn("Unable to init template " + id + " datails: " + name, e); + logger.warn("Unable to init template " + id + " datails: " + name, e); throw new CloudRuntimeException("Unable to init template " + id + " datails: " + name); } } @@ -409,7 +407,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } } } catch (Exception e) { - s_logger.debug("initiateXenServerPVDriverVersion failed due to " + e.toString()); + logger.debug("initiateXenServerPVDriverVersion failed due to " + e.toString()); // ignore } } @@ -455,7 +453,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio try(final FileInputStream finputstream = new FileInputStream(propsFile);) { props.load(finputstream); }catch (IOException e) { - s_logger.error("getEnvironmentProperty:Exception:" + e.getMessage()); + logger.error("getEnvironmentProperty:Exception:" + e.getMessage()); } return props.getProperty("mount.parent"); } @@ -477,7 +475,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.debug("Looks like system account already exists"); + logger.debug("Looks like system account already exists"); } // insert system user insertSql = "INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, lastname, created, user.default)" @@ -487,7 +485,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.debug("Looks like system user already exists"); + logger.debug("Looks like system user already exists"); } // insert admin user, but leave the account disabled until we set a @@ -504,7 +502,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.debug("Looks like admin account already exists"); + logger.debug("Looks like admin account already exists"); } // now insert the user @@ -515,7 +513,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.debug("Looks like admin user already exists"); + logger.debug("Looks like admin user already exists"); } try { @@ -546,12 +544,12 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.warn("Failed to create default security group for default admin account due to ", ex); + logger.warn("Failed to create default security group for default admin account due to ", ex); } } rs.close(); } catch (Exception ex) { - s_logger.warn("Failed to create default security group for default admin account due to ", ex); + logger.warn("Failed to create default security group for default admin account due to ", ex); } } }); @@ -593,7 +591,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio ou = group[i] + "." + ou; } } catch (UnknownHostException ex) { - s_logger.info("Fail to get user's domain name. Would use cloud.com. ", ex); + logger.info("Fail to get user's domain name. Would use cloud.com. ", ex); ou = "cloud.com"; } @@ -615,8 +613,8 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } protected void updateSSLKeystore() { - if (s_logger.isInfoEnabled()) { - s_logger.info("Processing updateSSLKeyStore"); + if (logger.isInfoEnabled()) { + logger.info("Processing updateSSLKeyStore"); } String dbString = _configDao.getValue("ssl.keystore"); @@ -634,19 +632,19 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio boolean dbExisted = (dbString != null && !dbString.isEmpty()); - s_logger.info("SSL keystore located at " + keystorePath); + logger.info("SSL keystore located at " + keystorePath); try { if (!dbExisted && null != confFile) { if (!keystoreFile.exists()) { generateDefaultKeystore(keystorePath); - s_logger.info("Generated SSL keystore."); + logger.info("Generated SSL keystore."); } String base64Keystore = getBase64Keystore(keystorePath); ConfigurationVO configVO = new ConfigurationVO("Hidden", "DEFAULT", "management-server", "ssl.keystore", base64Keystore, "SSL Keystore for the management servers"); _configDao.persist(configVO); - s_logger.info("Stored SSL keystore to database."); + logger.info("Stored SSL keystore to database."); } else { // !keystoreFile.exists() and dbExisted // Export keystore to local file byte[] storeBytes = Base64.decodeBase64(dbString); @@ -670,10 +668,10 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } catch (Exception e) { throw new IOException("Fail to create keystore file!", e); } - s_logger.info("Stored database keystore to local."); + logger.info("Stored database keystore to local."); } } catch (Exception ex) { - s_logger.warn("Would use fail-safe keystore to continue.", ex); + logger.warn("Would use fail-safe keystore to continue.", ex); } } @@ -698,9 +696,9 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio PreparedStatement stmt = txn.prepareAutoCloseStatement(wSql); stmt.setString(1, DBEncryptionUtil.encrypt(rpassword)); stmt.executeUpdate(); - s_logger.info("Updated systemvm password in database"); + logger.info("Updated systemvm password in database"); } catch (SQLException e) { - s_logger.error("Cannot retrieve systemvm password", e); + logger.error("Cannot retrieve systemvm password", e); } } @@ -714,7 +712,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio String username = System.getProperty("user.name"); Boolean devel = Boolean.valueOf(_configDao.getValue("developer")); if (!username.equalsIgnoreCase("cloud") && !devel) { - s_logger.warn("Systemvm keypairs could not be set. Management server should be run as cloud user, or in development mode."); + logger.warn("Systemvm keypairs could not be set. Management server should be run as cloud user, or in development mode."); return; } String already = _configDao.getValue("ssh.privatekey"); @@ -723,12 +721,12 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio throw new CloudRuntimeException("Cannot get home directory for account: " + username); } - if (s_logger.isInfoEnabled()) { - s_logger.info("Processing updateKeyPairs"); + if (logger.isInfoEnabled()) { + logger.info("Processing updateKeyPairs"); } if (homeDir != null && homeDir.startsWith("~")) { - s_logger.error("No home directory was detected for the user '" + username + "'. Please check the profile of this user."); + logger.error("No home directory was detected for the user '" + username + "'. Please check the profile of this user."); throw new CloudRuntimeException("No home directory was detected for the user '" + username + "'. Please check the profile of this user."); } @@ -744,8 +742,8 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } if (already == null || already.isEmpty()) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Systemvm keypairs not found in database. Need to store them in the database"); + if (logger.isInfoEnabled()) { + logger.info("Systemvm keypairs not found in database. Need to store them in the database"); } // FIXME: take a global database lock here for safety. boolean onWindows = isOnWindows(); @@ -757,9 +755,9 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio try (DataInputStream dis = new DataInputStream(new FileInputStream(privkeyfile))) { dis.readFully(arr1); } catch (EOFException e) { - s_logger.info("[ignored] eof reached"); + logger.info("[ignored] eof reached"); } catch (Exception e) { - s_logger.error("Cannot read the private key file", e); + logger.error("Cannot read the private key file", e); throw new CloudRuntimeException("Cannot read the private key file"); } String privateKey = new String(arr1).trim(); @@ -767,9 +765,9 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio try (DataInputStream dis = new DataInputStream(new FileInputStream(pubkeyfile))) { dis.readFully(arr2); } catch (EOFException e) { - s_logger.info("[ignored] eof reached"); + logger.info("[ignored] eof reached"); } catch (Exception e) { - s_logger.warn("Cannot read the public key file", e); + logger.warn("Cannot read the public key file", e); throw new CloudRuntimeException("Cannot read the public key file"); } String publicKey = new String(arr2).trim(); @@ -791,32 +789,32 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio try { PreparedStatement stmt1 = txn.prepareAutoCloseStatement(insertSql1); stmt1.executeUpdate(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Private key inserted into database"); + if (logger.isDebugEnabled()) { + logger.debug("Private key inserted into database"); } } catch (SQLException ex) { - s_logger.error("SQL of the private key failed", ex); + logger.error("SQL of the private key failed", ex); throw new CloudRuntimeException("SQL of the private key failed"); } try { PreparedStatement stmt2 = txn.prepareAutoCloseStatement(insertSql2); stmt2.executeUpdate(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Public key inserted into database"); + if (logger.isDebugEnabled()) { + logger.debug("Public key inserted into database"); } } catch (SQLException ex) { - s_logger.error("SQL of the public key failed", ex); + logger.error("SQL of the public key failed", ex); throw new CloudRuntimeException("SQL of the public key failed"); } } }); } else { - s_logger.info("Keypairs already in database, updating local copy"); + logger.info("Keypairs already in database, updating local copy"); updateKeyPairsOnDisk(homeDir); } - s_logger.info("Going to update systemvm iso with generated keypairs if needed"); + logger.info("Going to update systemvm iso with generated keypairs if needed"); try { injectSshKeysIntoSystemVmIsoPatch(pubkeyfile.getAbsolutePath(), privkeyfile.getAbsolutePath()); } catch (CloudRuntimeException e) { @@ -846,7 +844,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio try { keyfile.createNewFile(); } catch (IOException e) { - s_logger.warn("Failed to create file: " + e.toString()); + logger.warn("Failed to create file: " + e.toString()); throw new CloudRuntimeException("Failed to update keypairs on disk: cannot create key file " + keyPath); } } @@ -857,10 +855,10 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio kStream.write(key.getBytes()); } } catch (FileNotFoundException e) { - s_logger.warn("Failed to write key to " + keyfile.getAbsolutePath()); + logger.warn("Failed to write key to " + keyfile.getAbsolutePath()); throw new CloudRuntimeException("Failed to update keypairs on disk: cannot find key file " + keyPath); } catch (IOException e) { - s_logger.warn("Failed to write key to " + keyfile.getAbsolutePath()); + logger.warn("Failed to write key to " + keyfile.getAbsolutePath()); throw new CloudRuntimeException("Failed to update keypairs on disk: cannot write to key file " + keyPath); } } @@ -871,7 +869,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio File keyDir = new File(homeDir + "/.ssh"); Boolean devel = Boolean.valueOf(_configDao.getValue("developer")); if (!keyDir.isDirectory()) { - s_logger.warn("Failed to create " + homeDir + "/.ssh for storing the SSH keypars"); + logger.warn("Failed to create " + homeDir + "/.ssh for storing the SSH keypars"); keyDir.mkdir(); } String pubKey = _configDao.getValue("ssh.publickey"); @@ -888,7 +886,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } protected void injectSshKeysIntoSystemVmIsoPatch(String publicKeyPath, String privKeyPath) { - s_logger.info("Trying to inject public and private keys into systemvm iso"); + logger.info("Trying to inject public and private keys into systemvm iso"); String injectScript = getInjectScript(); String scriptPath = Script.findScript("", injectScript); String systemVmIsoPath = Script.findScript("", "vms/systemvm.iso"); @@ -900,9 +898,9 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } Script command = null; if(isOnWindows()) { - command = new Script("python", s_logger); + command = new Script("python", logger); } else { - command = new Script("/bin/bash", s_logger); + command = new Script("/bin/bash", logger); } if (isOnWindows()) { scriptPath = scriptPath.replaceAll("\\\\" ,"/" ); @@ -916,9 +914,9 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio command.add(systemVmIsoPath); final String result = command.execute(); - s_logger.info("Injected public and private keys into systemvm iso with result : " + result); + logger.info("Injected public and private keys into systemvm iso with result : " + result); if (result != null) { - s_logger.warn("Failed to inject generated public key into systemvm iso " + result); + logger.warn("Failed to inject generated public key into systemvm iso " + result); throw new CloudRuntimeException("Failed to inject generated public key into systemvm iso " + result); } } @@ -946,7 +944,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio if (already == null) { - s_logger.info("Need to store secondary storage vm copy password in the database"); + logger.info("Need to store secondary storage vm copy password in the database"); String password = PasswordGenerator.generateRandomPassword(12); final String insertSql1 = @@ -961,9 +959,9 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio try { PreparedStatement stmt1 = txn.prepareAutoCloseStatement(insertSql1); stmt1.executeUpdate(); - s_logger.debug("secondary storage vm copy password inserted into database"); + logger.debug("secondary storage vm copy password inserted into database"); } catch (SQLException ex) { - s_logger.warn("Failed to insert secondary storage vm copy password", ex); + logger.warn("Failed to insert secondary storage vm copy password", ex); } } }); @@ -974,7 +972,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio try { _configDao.update(Config.SSOKey.key(), Config.SSOKey.getCategory(), getPrivateKey()); } catch (NoSuchAlgorithmException ex) { - s_logger.error("error generating sso key", ex); + logger.error("error generating sso key", ex); } } @@ -987,14 +985,14 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio if(configInDB == null) { ConfigurationVO configVO = new ConfigurationVO(Config.SSVMPSK.getCategory(), "DEFAULT", Config.SSVMPSK.getComponent(), Config.SSVMPSK.key(), getPrivateKey(), Config.SSVMPSK.getDescription()); - s_logger.info("generating a new SSVM PSK. This goes to SSVM on Start"); + logger.info("generating a new SSVM PSK. This goes to SSVM on Start"); _configDao.persist(configVO); } else if (StringUtils.isEmpty(configInDB.getValue())) { - s_logger.info("updating the SSVM PSK with new value. This goes to SSVM on Start"); + logger.info("updating the SSVM PSK with new value. This goes to SSVM on Start"); _configDao.update(Config.SSVMPSK.key(), Config.SSVMPSK.getCategory(), getPrivateKey()); } } catch (NoSuchAlgorithmException ex) { - s_logger.error("error generating ssvm psk", ex); + logger.error("error generating ssvm psk", ex); } } @@ -1062,7 +1060,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } }); } catch (Exception e) { - s_logger.error("Unable to create new pod due to " + e.getMessage(), e); + logger.error("Unable to create new pod due to " + e.getMessage(), e); throw new InternalErrorException("Failed to create new pod. Please contact Cloud Support."); } @@ -1171,7 +1169,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultSharedSGNetworkOffering.getId(), service, defaultSharedSGNetworkOfferingProviders.get(service)); _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } // Offering #2 @@ -1186,7 +1184,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultSharedNetworkOffering.getId(), service, defaultSharedNetworkOfferingProviders.get(service)); _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } // Offering #3 @@ -1203,7 +1201,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio new NetworkOfferingServiceMapVO(defaultIsolatedSourceNatEnabledNetworkOffering.getId(), service, defaultIsolatedSourceNatEnabledNetworkOfferingProviders.get(service)); _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } // Offering #4 @@ -1218,7 +1216,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultIsolatedEnabledNetworkOffering.getId(), service, defaultIsolatedNetworkOfferingProviders.get(service)); _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } // Offering #5 @@ -1234,7 +1232,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultNetscalerNetworkOffering.getId(), service, netscalerServiceProviders.get(service)); _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } // Offering #6 @@ -1262,7 +1260,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultNetworkOfferingForVpcNetworks.getId(), entry.getKey(), entry.getValue()); _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } // Offering #7 @@ -1289,7 +1287,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultNetworkOfferingForVpcNetworksNoLB.getId(), entry.getKey(), entry.getValue()); _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } //offering #8 - network offering with internal lb service @@ -1313,7 +1311,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio for (Service service : internalLbOffProviders.keySet()) { NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(internalLbOff.getId(), service, internalLbOffProviders.get(service)); _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } } }); @@ -1446,7 +1444,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio final int domainExpectedCount = domainSupportedResourceTypes.size(); if ((domainResourceCount.size() < domainExpectedCount * domains.size())) { - s_logger.debug("resource_count table has records missing for some domains...going to insert them"); + logger.debug("resource_count table has records missing for some domains...going to insert them"); for (final DomainVO domain : domains) { // Lock domain Transaction.execute(new TransactionCallbackNoReturn() { @@ -1463,7 +1461,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio for (ResourceType resourceType : domainSupportedResourceTypes) { if (!domainCountStr.contains(resourceType.toString())) { ResourceCountVO resourceCountVO = new ResourceCountVO(resourceType, 0, domain.getId(), ResourceOwnerType.Domain); - s_logger.debug("Inserting resource count of type " + resourceType + " for domain id=" + domain.getId()); + logger.debug("Inserting resource count of type " + resourceType + " for domain id=" + domain.getId()); _resourceCountDao.persist(resourceCountVO); } } @@ -1475,7 +1473,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } if ((accountResourceCount.size() < accountExpectedCount * accounts.size())) { - s_logger.debug("resource_count table has records missing for some accounts...going to insert them"); + logger.debug("resource_count table has records missing for some accounts...going to insert them"); for (final AccountVO account : accounts) { // lock account Transaction.execute(new TransactionCallbackNoReturn() { @@ -1492,7 +1490,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio for (ResourceType resourceType : accountSupportedResourceTypes) { if (!accountCountStr.contains(resourceType.toString())) { ResourceCountVO resourceCountVO = new ResourceCountVO(resourceType, 0, account.getId(), ResourceOwnerType.Account); - s_logger.debug("Inserting resource count of type " + resourceType + " for account id=" + account.getId()); + logger.debug("Inserting resource count of type " + resourceType + " for account id=" + account.getId()); _resourceCountDao.persist(resourceCountVO); } } diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index 4162bf2f146..21b24e32100 100644 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -514,7 +514,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.GetVncPortAnswer; @@ -672,7 +671,6 @@ import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; public class ManagementServerImpl extends ManagerBase implements ManagementServer { - public static final Logger s_logger = Logger.getLogger(ManagementServerImpl.class.getName()); @Inject public AccountManager _accountMgr; @@ -887,7 +885,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Override public boolean start() { - s_logger.info("Startup CloudStack management server..."); + logger.info("Startup CloudStack management server..."); if (_lockMasterListener == null) { _lockMasterListener = new LockMasterListener(ManagementServerNode.getManagementServerId()); @@ -933,7 +931,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe throw new InvalidParameterValueException("privatePort is an invalid value"); } - // s_logger.debug("Checking if " + privateIp + + // logger.debug("Checking if " + privateIp + // " is a valid private IP address. Guest IP address is: " + // _configs.get("guest.ip.network")); // @@ -1102,8 +1100,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe public Ternary, Integer>, List, Map> listHostsForMigrationOfVM(final Long vmId, final Long startIndex, final Long pageSize) { final Account caller = getCaller(); if (!_accountMgr.isRootAdmin(caller.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Caller is not a root admin, permission denied to migrate the VM"); + if (logger.isDebugEnabled()) { + logger.debug("Caller is not a root admin, permission denied to migrate the VM"); } throw new PermissionDeniedException("No permission to migrate VM, Only Root Admin can migrate a VM!"); } @@ -1115,8 +1113,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } if (vm.getState() != State.Running) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is not running, cannot migrate the vm" + vm); + if (logger.isDebugEnabled()) { + logger.debug("VM is not running, cannot migrate the vm" + vm); } final InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Running, cannot " + "migrate the vm with specified id"); ex.addProxyObject(vm.getUuid(), "vmId"); @@ -1124,7 +1122,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } if(_serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) { - s_logger.info(" Live Migration of GPU enabled VM : " + vm.getInstanceName()+ " is not supported"); + logger.info(" Live Migration of GPU enabled VM : " + vm.getInstanceName()+ " is not supported"); // Return empty list. return new Ternary, Integer>, List, Map>(new Pair, Integer>(new ArrayList(), new Integer(0)), new ArrayList(), new HashMap()); @@ -1133,8 +1131,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (!vm.getHypervisorType().equals(HypervisorType.XenServer) && !vm.getHypervisorType().equals(HypervisorType.VMware) && !vm.getHypervisorType().equals(HypervisorType.KVM) && !vm.getHypervisorType().equals(HypervisorType.Ovm) && !vm.getHypervisorType().equals(HypervisorType.Hyperv) && !vm.getHypervisorType().equals(HypervisorType.LXC) && !vm.getHypervisorType().equals(HypervisorType.Simulator) && !vm.getHypervisorType().equals(HypervisorType.Ovm3)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(vm + " is not XenServer/VMware/KVM/Ovm/Hyperv/Ovm3, cannot migrate this VM."); + if (logger.isDebugEnabled()) { + logger.debug(vm + " is not XenServer/VMware/KVM/Ovm/Hyperv/Ovm3, cannot migrate this VM."); } throw new InvalidParameterValueException("Unsupported Hypervisor Type for VM migration, we support " + "XenServer/VMware/KVM/Ovm/Hyperv/Ovm3 only"); } @@ -1146,8 +1144,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe final long srcHostId = vm.getHostId(); final Host srcHost = _hostDao.findById(srcHostId); if (srcHost == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find the host with id: " + srcHostId + " of this VM:" + vm); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find the host with id: " + srcHostId + " of this VM:" + vm); } final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find the host (with specified id) of VM with specified id"); ex.addProxyObject(String.valueOf(srcHostId), "hostId"); @@ -1212,8 +1210,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe plan = new DataCenterDeployment(srcHost.getDataCenterId(), null, null, null, null, null); } else { final Long cluster = srcHost.getClusterId(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Searching for all hosts in cluster " + cluster + " for migrating VM " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Searching for all hosts in cluster " + cluster + " for migrating VM " + vm); } allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, null, null, cluster, null, null, null, null, null, null); // Filter out the current host. @@ -1248,11 +1246,11 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } } - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (suitableHosts.isEmpty()) { - s_logger.debug("No suitable hosts found"); + logger.debug("No suitable hosts found"); } else { - s_logger.debug("Hosts having capacity and suitable for migration: " + suitableHosts); + logger.debug("Hosts having capacity and suitable for migration: " + suitableHosts); } } @@ -1279,8 +1277,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe public Pair, List> listStoragePoolsForMigrationOfVolume(final Long volumeId) { final Account caller = getCaller(); if (!_accountMgr.isRootAdmin(caller.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Caller is not a root admin, permission denied to migrate the volume"); + if (logger.isDebugEnabled()) { + logger.debug("Caller is not a root admin, permission denied to migrate the volume"); } throw new PermissionDeniedException("No permission to migrate volume, only root admin can migrate a volume"); } @@ -1298,12 +1296,12 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe // Volume must be in Ready state to be migrated. if (!Volume.State.Ready.equals(volume.getState())) { - s_logger.info("Volume " + volume + " must be in ready state for migration."); + logger.info("Volume " + volume + " must be in ready state for migration."); return new Pair, List>(allPools, suitablePools); } if (!_volumeMgr.volumeOnSharedStoragePool(volume)) { - s_logger.info("Volume " + volume + " is on local storage. It cannot be migrated to another pool."); + logger.info("Volume " + volume + " is on local storage. It cannot be migrated to another pool."); return new Pair, List>(allPools, suitablePools); } @@ -1314,11 +1312,11 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } if (vm == null) { - s_logger.info("Volume " + volume + " isn't attached to any vm. Looking for storage pools in the " + "zone to which this volumes can be migrated."); + logger.info("Volume " + volume + " isn't attached to any vm. Looking for storage pools in the " + "zone to which this volumes can be migrated."); } else if (vm.getState() != State.Running) { - s_logger.info("Volume " + volume + " isn't attached to any running vm. Looking for storage pools in the " + "cluster to which this volumes can be migrated."); + logger.info("Volume " + volume + " isn't attached to any running vm. Looking for storage pools in the " + "cluster to which this volumes can be migrated."); } else { - s_logger.info("Volume " + volume + " is attached to any running vm. Looking for storage pools in the " + "cluster to which this volumes can be migrated."); + logger.info("Volume " + volume + " is attached to any running vm. Looking for storage pools in the " + "cluster to which this volumes can be migrated."); boolean storageMotionSupported = false; // Check if the underlying hypervisor supports storage motion. final Long hostId = vm.getHostId(); @@ -1328,18 +1326,18 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (host != null) { capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(host.getHypervisorType(), host.getHypervisorVersion()); } else { - s_logger.error("Details of the host on which the vm " + vm + ", to which volume " + volume + " is " + "attached, couldn't be retrieved."); + logger.error("Details of the host on which the vm " + vm + ", to which volume " + volume + " is " + "attached, couldn't be retrieved."); } if (capabilities != null) { storageMotionSupported = capabilities.isStorageMotionSupported(); } else { - s_logger.error("Capabilities for host " + host + " couldn't be retrieved."); + logger.error("Capabilities for host " + host + " couldn't be retrieved."); } } if (!storageMotionSupported) { - s_logger.info("Volume " + volume + " is attached to a running vm and the hypervisor doesn't support" + " storage motion."); + logger.info("Volume " + volume + " is attached to a running vm and the hypervisor doesn't support" + " storage motion."); return new Pair, List>(allPools, suitablePools); } } @@ -1719,10 +1717,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe configVo.setValue(key.valueIn(id).toString()); configVOList.add(configVo); } else { - s_logger.warn("ConfigDepot could not find parameter " + param.getName() + " for scope " + scope); + logger.warn("ConfigDepot could not find parameter " + param.getName() + " for scope " + scope); } } else { - s_logger.warn("Configuration item " + param.getName() + " not found in " + scope); + logger.warn("Configuration item " + param.getName() + " not found in " + scope); } } @@ -2224,12 +2222,12 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Override public Pair getVncPort(final VirtualMachine vm) { if (vm.getHostId() == null) { - s_logger.warn("VM " + vm.getHostName() + " does not have host, return -1 for its VNC port"); + logger.warn("VM " + vm.getHostName() + " does not have host, return -1 for its VNC port"); return new Pair(null, -1); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Trying to retrieve VNC port from agent about VM " + vm.getHostName()); + if (logger.isTraceEnabled()) { + logger.trace("Trying to retrieve VNC port from agent about VM " + vm.getHostName()); } final GetVncPortAnswer answer = (GetVncPortAnswer)_agentMgr.easySend(vm.getHostId(), new GetVncPortCommand(vm.getId(), vm.getInstanceName())); @@ -3027,30 +3025,30 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe try { final GlobalLock lock = GlobalLock.getInternLock("EventPurge"); if (lock == null) { - s_logger.debug("Couldn't get the global lock"); + logger.debug("Couldn't get the global lock"); return; } if (!lock.lock(30)) { - s_logger.debug("Couldn't lock the db"); + logger.debug("Couldn't lock the db"); return; } try { final Calendar purgeCal = Calendar.getInstance(); purgeCal.add(Calendar.DAY_OF_YEAR, -_purgeDelay); final Date purgeTime = purgeCal.getTime(); - s_logger.debug("Deleting events older than: " + purgeTime.toString()); + logger.debug("Deleting events older than: " + purgeTime.toString()); final List oldEvents = _eventDao.listOlderEvents(purgeTime); - s_logger.debug("Found " + oldEvents.size() + " events to be purged"); + logger.debug("Found " + oldEvents.size() + " events to be purged"); for (final EventVO event : oldEvents) { _eventDao.expunge(event.getId()); } } catch (final Exception e) { - s_logger.error("Exception ", e); + logger.error("Exception ", e); } finally { lock.unlock(); } } catch (final Exception e) { - s_logger.error("Exception ", e); + logger.error("Exception ", e); } } } @@ -3061,30 +3059,30 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe try { final GlobalLock lock = GlobalLock.getInternLock("AlertPurge"); if (lock == null) { - s_logger.debug("Couldn't get the global lock"); + logger.debug("Couldn't get the global lock"); return; } if (!lock.lock(30)) { - s_logger.debug("Couldn't lock the db"); + logger.debug("Couldn't lock the db"); return; } try { final Calendar purgeCal = Calendar.getInstance(); purgeCal.add(Calendar.DAY_OF_YEAR, -_alertPurgeDelay); final Date purgeTime = purgeCal.getTime(); - s_logger.debug("Deleting alerts older than: " + purgeTime.toString()); + logger.debug("Deleting alerts older than: " + purgeTime.toString()); final List oldAlerts = _alertDao.listOlderAlerts(purgeTime); - s_logger.debug("Found " + oldAlerts.size() + " events to be purged"); + logger.debug("Found " + oldAlerts.size() + " events to be purged"); for (final AlertVO alert : oldAlerts) { _alertDao.expunge(alert.getId()); } } catch (final Exception e) { - s_logger.error("Exception ", e); + logger.error("Exception ", e); } finally { lock.unlock(); } } catch (final Exception e) { - s_logger.error("Exception ", e); + logger.error("Exception ", e); } } } @@ -3291,8 +3289,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe private String signRequest(final String request, final String key) { try { - s_logger.info("Request: " + request); - s_logger.info("Key: " + key); + logger.info("Request: " + request); + logger.info("Key: " + key); if (key != null && request != null) { final Mac mac = Mac.getInstance("HmacSHA1"); @@ -3303,7 +3301,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe return new String(Base64.encodeBase64(encryptedBytes)); } } catch (final Exception ex) { - s_logger.error("unable to sign request", ex); + logger.error("unable to sign request", ex); } return null; } @@ -3336,7 +3334,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe final String input = cloudIdentifier; signature = signRequest(input, secretKey); } catch (final Exception e) { - s_logger.warn("Exception whilst creating a signature:" + e); + logger.warn("Exception whilst creating a signature:" + e); } final ArrayList cloudParams = new ArrayList(); @@ -3751,8 +3749,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Override public void doInTransactionWithoutResult(final TransactionStatus status) { for (final HostVO h : hosts) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Changing password for host name = " + h.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Changing password for host name = " + h.getName()); } // update password for this host final DetailVO nv = _detailsDao.findDetail(h.getId(), ApiConstants.USERNAME); @@ -3807,8 +3805,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Changing password for host name = " + host.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Changing password for host name = " + host.getName()); } // update password for this host final DetailVO nv = _detailsDao.findDetail(host.getId(), ApiConstants.USERNAME); @@ -3839,9 +3837,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } return eventTypes; } catch (final IllegalArgumentException e) { - s_logger.error("Error while listing Event Types", e); + logger.error("Error while listing Event Types", e); } catch (final IllegalAccessException e) { - s_logger.error("Error while listing Event Types", e); + logger.error("Error while listing Event Types", e); } return null; } @@ -3974,7 +3972,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe final UserVO adminUser = _userDao.getUser(2); if (adminUser == null) { final String msg = "CANNOT find admin user"; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } if (adminUser.getState() == Account.State.disabled) { @@ -3991,7 +3989,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe adminUser.setPassword(encodedPassword); adminUser.setState(Account.State.enabled); _userDao.persist(adminUser); - s_logger.info("Admin user enabled"); + logger.info("Admin user enabled"); } } @@ -4008,8 +4006,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Override public void cleanupVMReservations() { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Processing cleanupVMReservations"); + if (logger.isDebugEnabled()) { + logger.debug("Processing cleanupVMReservations"); } _dpMgr.cleanupVMReservations(); diff --git a/server/src/com/cloud/server/StatsCollector.java b/server/src/com/cloud/server/StatsCollector.java index 9f3c8cb89ab..0be8af6a8e8 100644 --- a/server/src/com/cloud/server/StatsCollector.java +++ b/server/src/com/cloud/server/StatsCollector.java @@ -34,7 +34,6 @@ import java.util.concurrent.TimeUnit; import javax.inject.Inject; import org.apache.cloudstack.utils.usage.UsageUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -139,7 +138,6 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } } - public static final Logger s_logger = Logger.getLogger(StatsCollector.class.getName()); private static StatsCollector s_instance = null; @@ -268,7 +266,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc try { externalStatsType = externalStatsProtocol.valueOf(scheme.toUpperCase()); } catch (IllegalArgumentException e) { - s_logger.info(scheme + " is not a valid protocol for external statistics. No statistics will be send."); + logger.info(scheme + " is not a valid protocol for external statistics. No statistics will be send."); } externalStatsHost = uri.getHost(); @@ -284,7 +282,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc externalStatsEnabled = true; } catch (URISyntaxException e) { - s_logger.debug("Failed to parse external statistics URI: " + e.getMessage()); + logger.debug("Failed to parse external statistics URI: " + e.getMessage()); } } @@ -346,7 +344,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc _dailyOrHourly = false; } if (_usageAggregationRange < UsageUtils.USAGE_AGGREGATION_RANGE_MIN) { - s_logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + UsageUtils.USAGE_AGGREGATION_RANGE_MIN); + logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + UsageUtils.USAGE_AGGREGATION_RANGE_MIN); _usageAggregationRange = UsageUtils.USAGE_AGGREGATION_RANGE_MIN; } _diskStatsUpdateExecutor.scheduleAtFixedRate(new VmDiskStatsUpdaterTask(), (endDate - System.currentTimeMillis()), (_usageAggregationRange * 60 * 1000), @@ -358,7 +356,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc @Override protected void runInContext() { try { - s_logger.debug("HostStatsCollector is running..."); + logger.debug("HostStatsCollector is running..."); SearchCriteria sc = _hostDao.createSearchCriteria(); sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString()); @@ -381,7 +379,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc if (stats != null) { hostStats.put(host.getId(), stats); } else { - s_logger.warn("Received invalid host stats for host: " + host.getId()); + logger.warn("Received invalid host stats for host: " + host.getId()); } } _hostStats = hostStats; @@ -405,7 +403,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } hostIds = _hostGpuGroupsDao.listHostIds(); } catch (Throwable t) { - s_logger.error("Error trying to retrieve host stats", t); + logger.error("Error trying to retrieve host stats", t); } } } @@ -414,7 +412,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc @Override protected void runInContext() { try { - s_logger.debug("VmStatsCollector is running..."); + logger.debug("VmStatsCollector is running..."); SearchCriteria sc = _hostDao.createSearchCriteria(); sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString()); @@ -498,13 +496,13 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc externalStatsPort = 2003; } - s_logger.debug("Sending VmStats of host " + host.getId() + " to Graphite host " + externalStatsHost + ":" + externalStatsPort); + logger.debug("Sending VmStats of host " + host.getId() + " to Graphite host " + externalStatsHost + ":" + externalStatsPort); try { GraphiteClient g = new GraphiteClient(externalStatsHost, externalStatsPort); g.sendMetrics(metrics); } catch (GraphiteException e) { - s_logger.debug("Failed sending VmStats to Graphite host " + externalStatsHost + ":" + externalStatsPort + ": " + e.getMessage()); + logger.debug("Failed sending VmStats to Graphite host " + externalStatsHost + ":" + externalStatsPort + ": " + e.getMessage()); } metrics.clear(); @@ -513,13 +511,13 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } } catch (Exception e) { - s_logger.debug("Failed to get VM stats for host with ID: " + host.getId()); + logger.debug("Failed to get VM stats for host with ID: " + host.getId()); continue; } } } catch (Throwable t) { - s_logger.error("Error trying to retrieve VM stats", t); + logger.error("Error trying to retrieve VM stats", t); } } } @@ -538,7 +536,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc //msHost in UP state with min id should run the job ManagementServerHostVO msHost = _msHostDao.findOneInUpState(new Filter(ManagementServerHostVO.class, "id", true, 0L, 1L)); if (msHost == null || (msHost.getMsid() != mgmtSrvrId)) { - s_logger.debug("Skipping aggregate disk stats update"); + logger.debug("Skipping aggregate disk stats update"); scanLock.unlock(); return; } @@ -558,17 +556,17 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc _vmDiskStatsDao.update(stat.getId(), stat); } } - s_logger.debug("Successfully updated aggregate vm disk stats"); + logger.debug("Successfully updated aggregate vm disk stats"); } }); } catch (Exception e) { - s_logger.debug("Failed to update aggregate disk stats", e); + logger.debug("Failed to update aggregate disk stats", e); } finally { scanLock.unlock(); } } } catch (Exception e) { - s_logger.debug("Exception while trying to acquire disk stats lock", e); + logger.debug("Exception while trying to acquire disk stats lock", e); } finally { scanLock.releaseRef(); } @@ -623,12 +621,12 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc if ((vmDiskStat.getBytesRead() == 0) && (vmDiskStat.getBytesWrite() == 0) && (vmDiskStat.getIORead() == 0) && (vmDiskStat.getIOWrite() == 0)) { - s_logger.debug("IO/bytes read and write are all 0. Not updating vm_disk_statistics"); + logger.debug("IO/bytes read and write are all 0. Not updating vm_disk_statistics"); continue; } if (vmDiskStat_lock == null) { - s_logger.warn("unable to find vm disk stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId() + + logger.warn("unable to find vm disk stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId() + " and volumeId:" + volume.getId()); continue; } @@ -637,15 +635,15 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc ((previousVmDiskStats.getCurrentBytesRead() != vmDiskStat_lock.getCurrentBytesRead()) || (previousVmDiskStats.getCurrentBytesWrite() != vmDiskStat_lock.getCurrentBytesWrite()) || (previousVmDiskStats.getCurrentIORead() != vmDiskStat_lock.getCurrentIORead()) || (previousVmDiskStats.getCurrentIOWrite() != vmDiskStat_lock.getCurrentIOWrite()))) { - s_logger.debug("vm disk stats changed from the time GetVmDiskStatsCommand was sent. " + "Ignoring current answer. Host: " + + logger.debug("vm disk stats changed from the time GetVmDiskStatsCommand was sent. " + "Ignoring current answer. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + " Read(Bytes): " + vmDiskStat.getBytesRead() + " write(Bytes): " + vmDiskStat.getBytesWrite() + " Read(IO): " + vmDiskStat.getIORead() + " write(IO): " + vmDiskStat.getIOWrite()); continue; } if (vmDiskStat_lock.getCurrentBytesRead() > vmDiskStat.getBytesRead()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Read # of bytes that's less than the last one. " + + if (logger.isDebugEnabled()) { + logger.debug("Read # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getBytesRead() + " Stored: " + vmDiskStat_lock.getCurrentBytesRead()); } @@ -653,8 +651,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } vmDiskStat_lock.setCurrentBytesRead(vmDiskStat.getBytesRead()); if (vmDiskStat_lock.getCurrentBytesWrite() > vmDiskStat.getBytesWrite()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Write # of bytes that's less than the last one. " + + if (logger.isDebugEnabled()) { + logger.debug("Write # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getBytesWrite() + " Stored: " + vmDiskStat_lock.getCurrentBytesWrite()); } @@ -662,8 +660,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } vmDiskStat_lock.setCurrentBytesWrite(vmDiskStat.getBytesWrite()); if (vmDiskStat_lock.getCurrentIORead() > vmDiskStat.getIORead()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Read # of IO that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + + if (logger.isDebugEnabled()) { + logger.debug("Read # of IO that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getIORead() + " Stored: " + vmDiskStat_lock.getCurrentIORead()); } @@ -671,8 +669,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } vmDiskStat_lock.setCurrentIORead(vmDiskStat.getIORead()); if (vmDiskStat_lock.getCurrentIOWrite() > vmDiskStat.getIOWrite()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Write # of IO that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + + if (logger.isDebugEnabled()) { + logger.debug("Write # of IO that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getIOWrite() + " Stored: " + vmDiskStat_lock.getCurrentIOWrite()); } @@ -695,7 +693,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } }); } catch (Exception e) { - s_logger.warn("Error while collecting vm disk stats from hosts", e); + logger.warn("Error while collecting vm disk stats from hosts", e); } } } @@ -704,8 +702,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc @Override protected void runInContext() { try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("StorageCollector is running..."); + if (logger.isDebugEnabled()) { + logger.debug("StorageCollector is running..."); } List stores = _dataStoreMgr.listImageStores(); @@ -718,14 +716,14 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc GetStorageStatsCommand command = new GetStorageStatsCommand(store.getTO()); EndPoint ssAhost = _epSelector.select(store); if (ssAhost == null) { - s_logger.debug("There is no secondary storage VM for secondary storage host " + store.getName()); + logger.debug("There is no secondary storage VM for secondary storage host " + store.getName()); continue; } long storeId = store.getId(); Answer answer = ssAhost.sendMessage(command); if (answer != null && answer.getResult()) { storageStats.put(storeId, (StorageStats)answer); - s_logger.trace("HostId: " + storeId + " Used: " + ((StorageStats)answer).getByteUsed() + " Total Available: " + + logger.trace("HostId: " + storeId + " Used: " + ((StorageStats)answer).getByteUsed() + " Total Available: " + ((StorageStats)answer).getCapacityBytes()); } } @@ -752,14 +750,14 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } } } catch (StorageUnavailableException e) { - s_logger.info("Unable to reach " + pool, e); + logger.info("Unable to reach " + pool, e); } catch (Exception e) { - s_logger.warn("Unable to get stats for " + pool, e); + logger.warn("Unable to get stats for " + pool, e); } } _storagePoolStats = storagePoolStats; } catch (Throwable t) { - s_logger.error("Error trying to retrieve storage stats", t); + logger.error("Error trying to retrieve storage stats", t); } } } @@ -768,8 +766,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc @Override protected void runInContext() { try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("AutoScaling Monitor is running..."); + if (logger.isDebugEnabled()) { + logger.debug("AutoScaling Monitor is running..."); } // list all AS VMGroups List asGroups = _asGroupDao.listAll(); @@ -796,8 +794,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc _asGroupDao.persist(asGroup); // collect RRDs data for this group - if (s_logger.isDebugEnabled()) { - s_logger.debug("[AutoScale] Collecting RRDs data..."); + if (logger.isDebugEnabled()) { + logger.debug("[AutoScale] Collecting RRDs data..."); } Map params = new HashMap(); List asGroupVmVOs = _asGroupVmDao.listByGroup(asGroup.getId()); @@ -838,10 +836,10 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc try { Answer answer = _agentMgr.send(receiveHost, perfMon); if (answer == null || !answer.getResult()) { - s_logger.debug("Failed to send data to node !"); + logger.debug("Failed to send data to node !"); } else { String result = answer.getDetails(); - s_logger.debug("[AutoScale] RRDs collection answer: " + result); + logger.debug("[AutoScale] RRDs collection answer: " + result); HashMap avgCounter = new HashMap(); // extract data @@ -887,7 +885,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc String scaleAction = getAutoscaleAction(avgCounter, asGroup.getId(), currentVM, params); if (scaleAction != null) { - s_logger.debug("[AutoScale] Doing scale action: " + scaleAction + " for group " + asGroup.getId()); + logger.debug("[AutoScale] Doing scale action: " + scaleAction + " for group " + asGroup.getId()); if (scaleAction.equals("scaleup")) { _asManager.doScaleUp(asGroup.getId(), 1); } else { @@ -905,7 +903,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } } catch (Throwable t) { - s_logger.error("Error trying to monitor autoscaling", t); + logger.error("Error trying to monitor autoscaling", t); } } diff --git a/server/src/com/cloud/storage/ImageStoreUploadMonitorImpl.java b/server/src/com/cloud/storage/ImageStoreUploadMonitorImpl.java index 167f19f7cc8..19ce9f02ec1 100755 --- a/server/src/com/cloud/storage/ImageStoreUploadMonitorImpl.java +++ b/server/src/com/cloud/storage/ImageStoreUploadMonitorImpl.java @@ -28,7 +28,6 @@ import javax.naming.ConfigurationException; import com.cloud.configuration.Resource; import com.cloud.user.ResourceLimitService; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -78,7 +77,6 @@ import com.cloud.utils.fsm.StateMachine2; @Local(value = {ImageStoreUploadMonitor.class}) public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageStoreUploadMonitor, Listener, Configurable { - static final Logger s_logger = Logger.getLogger(ImageStoreUploadMonitorImpl.class); @Inject private VolumeDao _volumeDao; @@ -186,12 +184,12 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto DataStore dataStore = storeMgr.getDataStore(volumeDataStore.getDataStoreId(), DataStoreRole.Image); EndPoint ep = _epSelector.select(dataStore, volumeDataStore.getExtractUrl()); if (ep == null) { - s_logger.warn("There is no secondary storage VM for image store " + dataStore.getName()); + logger.warn("There is no secondary storage VM for image store " + dataStore.getName()); continue; } VolumeVO volume = _volumeDao.findById(volumeDataStore.getVolumeId()); if (volume == null) { - s_logger.warn("Volume with id " + volumeDataStore.getVolumeId() + " not found"); + logger.warn("Volume with id " + volumeDataStore.getVolumeId() + " not found"); continue; } Host host = _hostDao.findById(ep.getId()); @@ -202,11 +200,11 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto try { answer = ep.sendMessage(cmd); } catch (CloudRuntimeException e) { - s_logger.warn("Unable to get upload status for volume " + volume.getUuid() + ". Error details: " + e.getMessage()); + logger.warn("Unable to get upload status for volume " + volume.getUuid() + ". Error details: " + e.getMessage()); answer = new UploadStatusAnswer(cmd, UploadStatus.UNKNOWN, e.getMessage()); } if (answer == null || !(answer instanceof UploadStatusAnswer)) { - s_logger.warn("No or invalid answer corresponding to UploadStatusCommand for volume " + volumeDataStore.getVolumeId()); + logger.warn("No or invalid answer corresponding to UploadStatusCommand for volume " + volumeDataStore.getVolumeId()); continue; } handleVolumeStatusResponse((UploadStatusAnswer)answer, volume, volumeDataStore); @@ -216,9 +214,9 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto handleVolumeStatusResponse(new UploadStatusAnswer(cmd, UploadStatus.ERROR, error), volume, volumeDataStore); } } catch (Throwable th) { - s_logger.warn("Exception while checking status for uploaded volume " + volumeDataStore.getExtractUrl() + ". Error details: " + th.getMessage()); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Exception details: ", th); + logger.warn("Exception while checking status for uploaded volume " + volumeDataStore.getExtractUrl() + ". Error details: " + th.getMessage()); + if (logger.isTraceEnabled()) { + logger.trace("Exception details: ", th); } } } @@ -230,12 +228,12 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto DataStore dataStore = storeMgr.getDataStore(templateDataStore.getDataStoreId(), DataStoreRole.Image); EndPoint ep = _epSelector.select(dataStore, templateDataStore.getExtractUrl()); if (ep == null) { - s_logger.warn("There is no secondary storage VM for image store " + dataStore.getName()); + logger.warn("There is no secondary storage VM for image store " + dataStore.getName()); continue; } VMTemplateVO template = _templateDao.findById(templateDataStore.getTemplateId()); if (template == null) { - s_logger.warn("Template with id " + templateDataStore.getTemplateId() + " not found"); + logger.warn("Template with id " + templateDataStore.getTemplateId() + " not found"); continue; } Host host = _hostDao.findById(ep.getId()); @@ -246,11 +244,11 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto try { answer = ep.sendMessage(cmd); } catch (CloudRuntimeException e) { - s_logger.warn("Unable to get upload status for template " + template.getUuid() + ". Error details: " + e.getMessage()); + logger.warn("Unable to get upload status for template " + template.getUuid() + ". Error details: " + e.getMessage()); answer = new UploadStatusAnswer(cmd, UploadStatus.UNKNOWN, e.getMessage()); } if (answer == null || !(answer instanceof UploadStatusAnswer)) { - s_logger.warn("No or invalid answer corresponding to UploadStatusCommand for template " + templateDataStore.getTemplateId()); + logger.warn("No or invalid answer corresponding to UploadStatusCommand for template " + templateDataStore.getTemplateId()); continue; } handleTemplateStatusResponse((UploadStatusAnswer)answer, template, templateDataStore); @@ -260,9 +258,9 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto handleTemplateStatusResponse(new UploadStatusAnswer(cmd, UploadStatus.ERROR, error), template, templateDataStore); } } catch (Throwable th) { - s_logger.warn("Exception while checking status for uploaded template " + templateDataStore.getExtractUrl() + ". Error details: " + th.getMessage()); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Exception details: ", th); + logger.warn("Exception while checking status for uploaded template " + templateDataStore.getExtractUrl() + ". Error details: " + th.getMessage()); + if (logger.isTraceEnabled()) { + logger.trace("Exception details: ", th); } } } @@ -291,8 +289,8 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto stateMachine.transitTo(tmpVolume, Event.OperationSucceeded, null, _volumeDao); _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), Resource.ResourceType.secondary_storage, answer.getVirtualSize()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Volume " + tmpVolume.getUuid() + " uploaded successfully"); + if (logger.isDebugEnabled()) { + logger.debug("Volume " + tmpVolume.getUuid() + " uploaded successfully"); } break; case IN_PROGRESS: @@ -305,8 +303,8 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto tmpVolumeDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); tmpVolumeDataStore.setState(State.Failed); stateMachine.transitTo(tmpVolume, Event.OperationFailed, null, _volumeDao); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Volume " + tmpVolume.getUuid() + " failed to upload due to operation timed out"); + if (logger.isDebugEnabled()) { + logger.debug("Volume " + tmpVolume.getUuid() + " failed to upload due to operation timed out"); } } else { tmpVolumeDataStore.setDownloadPercent(answer.getDownloadPercent()); @@ -317,8 +315,8 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto tmpVolumeDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); tmpVolumeDataStore.setState(State.Failed); stateMachine.transitTo(tmpVolume, Event.OperationFailed, null, _volumeDao); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Volume " + tmpVolume.getUuid() + " failed to upload. Error details: " + answer.getDetails()); + if (logger.isDebugEnabled()) { + logger.debug("Volume " + tmpVolume.getUuid() + " failed to upload. Error details: " + answer.getDetails()); } break; case UNKNOWN: @@ -327,8 +325,8 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto tmpVolumeDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.ABANDONED); tmpVolumeDataStore.setState(State.Failed); stateMachine.transitTo(tmpVolume, Event.OperationTimeout, null, _volumeDao); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Volume " + tmpVolume.getUuid() + " failed to upload due to operation timed out"); + if (logger.isDebugEnabled()) { + logger.debug("Volume " + tmpVolume.getUuid() + " failed to upload due to operation timed out"); } } } @@ -336,7 +334,7 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto } _volumeDataStoreDao.update(tmpVolumeDataStore.getId(), tmpVolumeDataStore); } catch (NoTransitionException e) { - s_logger.error("Unexpected error " + e.getMessage()); + logger.error("Unexpected error " + e.getMessage()); } } }); @@ -366,8 +364,8 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationSucceeded, null, _templateDao); _resourceLimitMgr.incrementResourceCount(template.getAccountId(), Resource.ResourceType.secondary_storage, answer.getVirtualSize()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Template " + tmpTemplate.getUuid() + " uploaded successfully"); + if (logger.isDebugEnabled()) { + logger.debug("Template " + tmpTemplate.getUuid() + " uploaded successfully"); } break; case IN_PROGRESS: @@ -380,8 +378,8 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); tmpTemplateDataStore.setState(State.Failed); stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Template " + tmpTemplate.getUuid() + " failed to upload due to operation timed out"); + if (logger.isDebugEnabled()) { + logger.debug("Template " + tmpTemplate.getUuid() + " failed to upload due to operation timed out"); } } else { tmpTemplateDataStore.setDownloadPercent(answer.getDownloadPercent()); @@ -392,8 +390,8 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); tmpTemplateDataStore.setState(State.Failed); stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Template " + tmpTemplate.getUuid() + " failed to upload. Error details: " + answer.getDetails()); + if (logger.isDebugEnabled()) { + logger.debug("Template " + tmpTemplate.getUuid() + " failed to upload. Error details: " + answer.getDetails()); } break; case UNKNOWN: @@ -402,8 +400,8 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.ABANDONED); tmpTemplateDataStore.setState(State.Failed); stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationTimeout, null, _templateDao); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Template " + tmpTemplate.getUuid() + " failed to upload due to operation timed out"); + if (logger.isDebugEnabled()) { + logger.debug("Template " + tmpTemplate.getUuid() + " failed to upload due to operation timed out"); } } } @@ -411,7 +409,7 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto } _templateDataStoreDao.update(tmpTemplateDataStore.getId(), tmpTemplateDataStore); } catch (NoTransitionException e) { - s_logger.error("Unexpected error " + e.getMessage()); + logger.error("Unexpected error " + e.getMessage()); } } }); diff --git a/server/src/com/cloud/storage/OCFS2ManagerImpl.java b/server/src/com/cloud/storage/OCFS2ManagerImpl.java index 017ce7fac83..ff24947983f 100644 --- a/server/src/com/cloud/storage/OCFS2ManagerImpl.java +++ b/server/src/com/cloud/storage/OCFS2ManagerImpl.java @@ -25,7 +25,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -55,7 +54,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value = {OCFS2Manager.class}) public class OCFS2ManagerImpl extends ManagerBase implements OCFS2Manager, ResourceListener { - private static final Logger s_logger = Logger.getLogger(OCFS2ManagerImpl.class); @Inject ClusterDetailsDao _clusterDetailsDao; @@ -109,11 +107,11 @@ public class OCFS2ManagerImpl extends ManagerBase implements OCFS2Manager, Resou for (HostVO h : hosts) { Answer ans = _agentMgr.easySend(h.getId(), cmd); if (ans == null) { - s_logger.debug("Host " + h.getId() + " is not in UP state, skip preparing OCFS2 node on it"); + logger.debug("Host " + h.getId() + " is not in UP state, skip preparing OCFS2 node on it"); continue; } if (!ans.getResult()) { - s_logger.warn("PrepareOCFS2NodesCommand failed on host " + h.getId() + " " + ans.getDetails()); + logger.warn("PrepareOCFS2NodesCommand failed on host " + h.getId() + " " + ans.getDetails()); return false; } } @@ -154,7 +152,7 @@ public class OCFS2ManagerImpl extends ManagerBase implements OCFS2Manager, Resou sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); List hosts = sc.list(); if (hosts.isEmpty()) { - s_logger.debug("There is no host in cluster " + clusterId + ", no need to prepare OCFS2 nodes"); + logger.debug("There is no host in cluster " + clusterId + ", no need to prepare OCFS2 nodes"); return true; } @@ -202,10 +200,10 @@ public class OCFS2ManagerImpl extends ManagerBase implements OCFS2Manager, Resou if (hasOcfs2) { try { if (!prepareNodes(host.getClusterId())) { - s_logger.warn(errMsg); + logger.warn(errMsg); } } catch (Exception e) { - s_logger.error(errMsg, e); + logger.error(errMsg, e); } } } diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index ba39e1f0fa8..ffcbf861e42 100644 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -43,7 +43,6 @@ import javax.naming.ConfigurationException; import com.cloud.hypervisor.Hypervisor; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd; @@ -191,7 +190,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Component @Local(value = {StorageManager.class, StorageService.class}) public class StorageManagerImpl extends ManagerBase implements StorageManager, ClusterManagerListener { - private static final Logger s_logger = Logger.getLogger(StorageManagerImpl.class); protected String _name; @Inject @@ -325,7 +323,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // available for (VolumeVO vol : vols) { if (vol.getRemoved() != null) { - s_logger.warn("Volume id:" + vol.getId() + " is removed, cannot share on this instance"); + logger.warn("Volume id:" + vol.getId() + " is removed, cannot share on this instance"); // not ok to share return false; } @@ -458,7 +456,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C Map configs = _configDao.getConfiguration("management-server", params); _storagePoolAcquisitionWaitSeconds = NumbersUtil.parseInt(configs.get("pool.acquisition.wait.seconds"), 1800); - s_logger.info("pool.acquisition.wait.seconds is configured as " + _storagePoolAcquisitionWaitSeconds + " seconds"); + logger.info("pool.acquisition.wait.seconds is configured as " + _storagePoolAcquisitionWaitSeconds + " seconds"); _agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _storagePoolDao), true, false, true); @@ -471,7 +469,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C String time = configs.get("storage.cleanup.interval"); _storageCleanupInterval = NumbersUtil.parseInt(time, 86400); - s_logger.info("Storage cleanup enabled: " + _storageCleanupEnabled + ", interval: " + _storageCleanupInterval + ", template cleanup enabled: " + + logger.info("Storage cleanup enabled: " + _storageCleanupEnabled + ", interval: " + _storageCleanupInterval + ", template cleanup enabled: " + _templateCleanupEnabled); String cleanupInterval = configs.get("extract.url.cleanup.interval"); @@ -530,7 +528,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C int initialDelay = generator.nextInt(_storageCleanupInterval); _executor.scheduleWithFixedDelay(new StorageGarbageCollector(), initialDelay, _storageCleanupInterval, TimeUnit.SECONDS); } else { - s_logger.debug("Storage cleanup is not enabled, so the storage cleanup thread is not being scheduled."); + logger.debug("Storage cleanup is not enabled, so the storage cleanup thread is not being scheduled."); } _executor.scheduleWithFixedDelay(new DownloadURLGarbageCollector(), _downloadUrlCleanupInterval, _downloadUrlCleanupInterval, TimeUnit.SECONDS); @@ -584,7 +582,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), hostAddress, null, pInfo.getUuid()); if (pool != null) { - s_logger.debug("Found a storage pool: " + pInfo.getUuid() + ", but with different hostpath " + pInfo.getHostPath() + ", still treat it as the same pool"); + logger.debug("Found a storage pool: " + pInfo.getUuid() + ", but with different hostpath " + pInfo.getHostPath() + ", still treat it as the same pool"); } } @@ -615,7 +613,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } catch (Exception e) { - s_logger.warn("Unable to setup the local storage pool for " + host, e); + logger.warn("Unable to setup the local storage pool for " + host, e); throw new ConnectionException(true, "Unable to setup the local storage pool for " + host, e); } @@ -711,7 +709,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C lifeCycle.attachZone(store, zoneScope, hypervisorType); } } catch (Exception e) { - s_logger.debug("Failed to add data store: "+e.getMessage(), e); + logger.debug("Failed to add data store: "+e.getMessage(), e); try { // clean up the db, just absorb the exception thrown in deletion with error logged, so that user can get error for adding data store // not deleting data store. @@ -719,7 +717,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C lifeCycle.deleteDataStore(store); } } catch (Exception ex) { - s_logger.debug("Failed to clean up storage pool: " + ex.getMessage()); + logger.debug("Failed to clean up storage pool: " + ex.getMessage()); } throw new CloudRuntimeException("Failed to add data store: "+e.getMessage(), e); } @@ -884,15 +882,15 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C StoragePoolVO sPool = _storagePoolDao.findById(id); if (sPool == null) { - s_logger.warn("Unable to find pool:" + id); + logger.warn("Unable to find pool:" + id); throw new InvalidParameterValueException("Unable to find pool by id " + id); } if (sPool.getStatus() != StoragePoolStatus.Maintenance) { - s_logger.warn("Unable to delete storage id: " + id + " due to it is not in Maintenance state"); + logger.warn("Unable to delete storage id: " + id + " due to it is not in Maintenance state"); throw new InvalidParameterValueException("Unable to delete storage due to it is not in Maintenance state, id: " + id); } if (sPool.isLocal()) { - s_logger.warn("Unable to delete local storage id:" + id); + logger.warn("Unable to delete local storage id:" + id); throw new InvalidParameterValueException("Unable to delete local storage id: " + id); } @@ -910,9 +908,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C try { future.get(); } catch (InterruptedException e) { - s_logger.debug("expunge volume failed:" + vol.getId(), e); + logger.debug("expunge volume failed:" + vol.getId(), e); } catch (ExecutionException e) { - s_logger.debug("expunge volume failed:" + vol.getId(), e); + logger.debug("expunge volume failed:" + vol.getId(), e); } } } @@ -928,14 +926,14 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C StoragePoolVO lock = _storagePoolDao.acquireInLockTable(sPool.getId()); if (lock == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to acquire lock when deleting PrimaryDataStoreVO with ID: " + sPool.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Failed to acquire lock when deleting PrimaryDataStoreVO with ID: " + sPool.getId()); } return false; } _storagePoolDao.releaseFromLockTable(lock.getId()); - s_logger.trace("Released lock for storage pool " + id); + logger.trace("Released lock for storage pool " + id); DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(sPool.getStorageProviderName()); DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle(); @@ -947,7 +945,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C public void connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException { StoragePool pool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); assert (pool.isShared()) : "Now, did you actually read the name of this method?"; - s_logger.debug("Adding pool " + pool.getName() + " to host " + hostId); + logger.debug("Adding pool " + pool.getName() + " to host " + hostId); DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); HypervisorHostListener listener = hostListeners.get(provider.getName()); @@ -973,15 +971,15 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // All this is for the inaccuracy of floats for big number multiplication. BigDecimal overProvFactor = getStorageOverProvisioningFactor(storagePool.getId()); totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(storagePool.getCapacityBytes())).longValue(); - s_logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString() + " with overprovisioning factor " + logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString() + " with overprovisioning factor " + overProvFactor.toString()); - s_logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + storagePool.getCapacityBytes()); + logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + storagePool.getCapacityBytes()); } else { - s_logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString()); + logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString()); totalOverProvCapacity = storagePool.getCapacityBytes(); } - s_logger.debug("Total over provisioned capacity of the pool " + storagePool.getName() + " id: " + storagePool.getId() + " is " + totalOverProvCapacity); + logger.debug("Total over provisioned capacity of the pool " + storagePool.getName() + " id: " + storagePool.getId() + " is " + totalOverProvCapacity); CapacityState capacityState = CapacityState.Enabled; if (storagePool.getScope() == ScopeType.ZONE) { DataCenterVO dc = ApiDBUtils.findZoneById(storagePool.getDataCenterId()); @@ -1011,7 +1009,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C _capacityDao.update(capacity.getId(), capacity); } } - s_logger.debug("Successfully set Capacity - " + totalOverProvCapacity + " for capacity type - " + capacityType + " , DataCenterId - " + + logger.debug("Successfully set Capacity - " + totalOverProvCapacity + " for capacity type - " + capacityType + " , DataCenterId - " + storagePool.getDataCenterId() + ", HostOrPoolId - " + storagePool.getId() + ", PodId " + storagePool.getPodId()); } @@ -1054,9 +1052,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } return new Pair(hostId, answers.toArray(new Answer[answers.size()])); } catch (AgentUnavailableException e) { - s_logger.debug("Unable to send storage pool command to " + pool + " via " + hostId, e); + logger.debug("Unable to send storage pool command to " + pool + " via " + hostId, e); } catch (OperationTimedoutException e) { - s_logger.debug("Unable to send storage pool command to " + pool + " via " + hostId, e); + logger.debug("Unable to send storage pool command to " + pool + " via " + hostId, e); } } @@ -1084,11 +1082,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C try { List unusedTemplatesInPool = _tmpltMgr.getUnusedTemplatesInPool(pool); - s_logger.debug("Storage pool garbage collector found " + unusedTemplatesInPool.size() + " templates to clean up in storage pool: " + + logger.debug("Storage pool garbage collector found " + unusedTemplatesInPool.size() + " templates to clean up in storage pool: " + pool.getName()); for (VMTemplateStoragePoolVO templatePoolVO : unusedTemplatesInPool) { if (templatePoolVO.getDownloadState() != VMTemplateStorageResourceAssoc.Status.DOWNLOADED) { - s_logger.debug("Storage pool garbage collector is skipping template with ID: " + templatePoolVO.getTemplateId() + + logger.debug("Storage pool garbage collector is skipping template with ID: " + templatePoolVO.getTemplateId() + " on pool " + templatePoolVO.getPoolId() + " because it is not completely downloaded."); continue; } @@ -1096,7 +1094,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (!templatePoolVO.getMarkedForGC()) { templatePoolVO.setMarkedForGC(true); _vmTemplatePoolDao.update(templatePoolVO.getId(), templatePoolVO); - s_logger.debug("Storage pool garbage collector has marked template with ID: " + templatePoolVO.getTemplateId() + + logger.debug("Storage pool garbage collector has marked template with ID: " + templatePoolVO.getTemplateId() + " on pool " + templatePoolVO.getPoolId() + " for garbage collection."); continue; } @@ -1104,7 +1102,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C _tmpltMgr.evictTemplateFromStoragePool(templatePoolVO); } } catch (Exception e) { - s_logger.warn("Problem cleaning up primary storage pool " + pool, e); + logger.warn("Problem cleaning up primary storage pool " + pool, e); } } } @@ -1116,7 +1114,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C try { volService.expungeVolumeAsync(volFactory.getVolume(vol.getId())); } catch (Exception e) { - s_logger.warn("Unable to destroy volume " + vol.getUuid(), e); + logger.warn("Unable to destroy volume " + vol.getUuid(), e); } } @@ -1130,7 +1128,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } _snapshotDao.expunge(snapshotVO.getId()); } catch (Exception e) { - s_logger.warn("Unable to destroy snapshot " + snapshotVO.getUuid(), e); + logger.warn("Unable to destroy snapshot " + snapshotVO.getUuid(), e); } } @@ -1139,21 +1137,21 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C for (VolumeDataStoreVO volumeDataStore : volumeDataStores) { VolumeVO volume = _volumeDao.findById(volumeDataStore.getVolumeId()); if (volume == null) { - s_logger.warn("Uploaded volume with id " + volumeDataStore.getVolumeId() + " not found, so cannot be destroyed"); + logger.warn("Uploaded volume with id " + volumeDataStore.getVolumeId() + " not found, so cannot be destroyed"); continue; } try { DataStore dataStore = _dataStoreMgr.getDataStore(volumeDataStore.getDataStoreId(), DataStoreRole.Image); EndPoint ep = _epSelector.select(dataStore, volumeDataStore.getExtractUrl()); if (ep == null) { - s_logger.warn("There is no secondary storage VM for image store " + dataStore.getName() + ", cannot destroy uploaded volume " + volume.getUuid()); + logger.warn("There is no secondary storage VM for image store " + dataStore.getName() + ", cannot destroy uploaded volume " + volume.getUuid()); continue; } Host host = _hostDao.findById(ep.getId()); if (host != null && host.getManagementServerId() != null) { if (_serverId == host.getManagementServerId().longValue()) { if (!volService.destroyVolume(volume.getId())) { - s_logger.warn("Unable to destroy uploaded volume " + volume.getUuid()); + logger.warn("Unable to destroy uploaded volume " + volume.getUuid()); continue; } // decrement volume resource count @@ -1161,17 +1159,17 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // expunge volume from secondary if volume is on image store VolumeInfo volOnSecondary = volFactory.getVolume(volume.getId(), DataStoreRole.Image); if (volOnSecondary != null) { - s_logger.info("Expunging volume " + volume.getUuid() + " uploaded using HTTP POST from secondary data store"); + logger.info("Expunging volume " + volume.getUuid() + " uploaded using HTTP POST from secondary data store"); AsyncCallFuture future = volService.expungeVolumeAsync(volOnSecondary); VolumeApiResult result = future.get(); if (!result.isSuccess()) { - s_logger.warn("Failed to expunge volume " + volume.getUuid() + " from the image store " + dataStore.getName() + " due to: " + result.getResult()); + logger.warn("Failed to expunge volume " + volume.getUuid() + " from the image store " + dataStore.getName() + " due to: " + result.getResult()); } } } } } catch (Throwable th) { - s_logger.warn("Unable to destroy uploaded volume " + volume.getUuid() + ". Error details: " + th.getMessage()); + logger.warn("Unable to destroy uploaded volume " + volume.getUuid() + ". Error details: " + th.getMessage()); } } @@ -1180,14 +1178,14 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C for (TemplateDataStoreVO templateDataStore : templateDataStores) { VMTemplateVO template = _templateDao.findById(templateDataStore.getTemplateId()); if (template == null) { - s_logger.warn("Uploaded template with id " + templateDataStore.getTemplateId() + " not found, so cannot be destroyed"); + logger.warn("Uploaded template with id " + templateDataStore.getTemplateId() + " not found, so cannot be destroyed"); continue; } try { DataStore dataStore = _dataStoreMgr.getDataStore(templateDataStore.getDataStoreId(), DataStoreRole.Image); EndPoint ep = _epSelector.select(dataStore, templateDataStore.getExtractUrl()); if (ep == null) { - s_logger.warn("There is no secondary storage VM for image store " + dataStore.getName() + ", cannot destroy uploaded template " + template.getUuid()); + logger.warn("There is no secondary storage VM for image store " + dataStore.getName() + ", cannot destroy uploaded template " + template.getUuid()); continue; } Host host = _hostDao.findById(ep.getId()); @@ -1196,7 +1194,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C AsyncCallFuture future = _imageSrv.deleteTemplateAsync(tmplFactory.getTemplate(template.getId(), dataStore)); TemplateApiResult result = future.get(); if (!result.isSuccess()) { - s_logger.warn("Failed to delete template " + template.getUuid() + " from the image store " + dataStore.getName() + " due to: " + result.getResult()); + logger.warn("Failed to delete template " + template.getUuid() + " from the image store " + dataStore.getName() + " due to: " + result.getResult()); continue; } // remove from template_zone_ref @@ -1220,7 +1218,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } } catch (Throwable th) { - s_logger.warn("Unable to destroy uploaded template " + template.getUuid() + ". Error details: " + th.getMessage()); + logger.warn("Unable to destroy uploaded template " + template.getUuid() + ". Error details: " + th.getMessage()); } } } finally { @@ -1248,7 +1246,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } return list; } catch (Exception e) { - s_logger.debug("failed to get all volumes who has snapshots in secondary storage " + storeId + " due to " + e.getMessage()); + logger.debug("failed to get all volumes who has snapshots in secondary storage " + storeId + " due to " + e.getMessage()); return null; } @@ -1269,7 +1267,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } return list; } catch (Exception e) { - s_logger.debug("failed to get all snapshots for a volume " + volumeId + " due to " + e.getMessage()); + logger.debug("failed to get all snapshots for a volume " + volumeId + " due to " + e.getMessage()); return null; } } @@ -1286,16 +1284,16 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C try { long storeId = store.getId(); List destroyedTemplateStoreVOs = _templateStoreDao.listDestroyed(storeId); - s_logger.debug("Secondary storage garbage collector found " + destroyedTemplateStoreVOs.size() + + logger.debug("Secondary storage garbage collector found " + destroyedTemplateStoreVOs.size() + " templates to cleanup on template_store_ref for store: " + store.getName()); for (TemplateDataStoreVO destroyedTemplateStoreVO : destroyedTemplateStoreVOs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deleting template store DB entry: " + destroyedTemplateStoreVO); + if (logger.isDebugEnabled()) { + logger.debug("Deleting template store DB entry: " + destroyedTemplateStoreVO); } _templateStoreDao.remove(destroyedTemplateStoreVO.getId()); } } catch (Exception e) { - s_logger.warn("problem cleaning up templates in template_store_ref for store: " + store.getName(), e); + logger.warn("problem cleaning up templates in template_store_ref for store: " + store.getName(), e); } } @@ -1303,18 +1301,18 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C for (DataStore store : imageStores) { try { List destroyedSnapshotStoreVOs = _snapshotStoreDao.listDestroyed(store.getId()); - s_logger.debug("Secondary storage garbage collector found " + destroyedSnapshotStoreVOs.size() + + logger.debug("Secondary storage garbage collector found " + destroyedSnapshotStoreVOs.size() + " snapshots to cleanup on snapshot_store_ref for store: " + store.getName()); for (SnapshotDataStoreVO destroyedSnapshotStoreVO : destroyedSnapshotStoreVOs) { // check if this snapshot has child SnapshotInfo snap = snapshotFactory.getSnapshot(destroyedSnapshotStoreVO.getSnapshotId(), store); if (snap.getChild() != null) { - s_logger.debug("Skip snapshot on store: " + destroyedSnapshotStoreVO + " , because it has child"); + logger.debug("Skip snapshot on store: " + destroyedSnapshotStoreVO + " , because it has child"); continue; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deleting snapshot store DB entry: " + destroyedSnapshotStoreVO); + if (logger.isDebugEnabled()) { + logger.debug("Deleting snapshot store DB entry: " + destroyedSnapshotStoreVO); } _snapshotDao.remove(destroyedSnapshotStoreVO.getSnapshotId()); @@ -1326,7 +1324,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } catch (Exception e2) { - s_logger.warn("problem cleaning up snapshots in snapshot_store_ref for store: " + store.getName(), e2); + logger.warn("problem cleaning up snapshots in snapshot_store_ref for store: " + store.getName(), e2); } } @@ -1335,21 +1333,21 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C for (DataStore store : imageStores) { try { List destroyedStoreVOs = _volumeStoreDao.listDestroyed(store.getId()); - s_logger.debug("Secondary storage garbage collector found " + destroyedStoreVOs.size() + " volumes to cleanup on volume_store_ref for store: " + + logger.debug("Secondary storage garbage collector found " + destroyedStoreVOs.size() + " volumes to cleanup on volume_store_ref for store: " + store.getName()); for (VolumeDataStoreVO destroyedStoreVO : destroyedStoreVOs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deleting volume store DB entry: " + destroyedStoreVO); + if (logger.isDebugEnabled()) { + logger.debug("Deleting volume store DB entry: " + destroyedStoreVO); } _volumeStoreDao.remove(destroyedStoreVO.getId()); } } catch (Exception e2) { - s_logger.warn("problem cleaning up volumes in volume_store_ref for store: " + store.getName(), e2); + logger.warn("problem cleaning up volumes in volume_store_ref for store: " + store.getName(), e2); } } } catch (Exception e3) { - s_logger.warn("problem cleaning up secondary storage DB entries. ", e3); + logger.warn("problem cleaning up secondary storage DB entries. ", e3); } } @@ -1373,7 +1371,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (primaryStorage == null) { String msg = "Unable to obtain lock on the storage pool record in preparePrimaryStorageForMaintenance()"; - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } @@ -1400,7 +1398,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (primaryStorage == null) { String msg = "Unable to obtain lock on the storage pool in cancelPrimaryStorageForMaintenance()"; - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } @@ -1426,12 +1424,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override protected void runInContext() { try { - s_logger.trace("Storage Garbage Collection Thread is running."); + logger.trace("Storage Garbage Collection Thread is running."); cleanupStorage(true); } catch (Exception e) { - s_logger.error("Caught the following Exception", e); + logger.error("Caught the following Exception", e); } } } @@ -1446,7 +1444,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C public void onManagementNodeLeft(List nodeList, long selfNodeId) { for (ManagementServerHost vo : nodeList) { if (vo.getMsid() == _serverId) { - s_logger.info("Cleaning up storage maintenance jobs associated with Management server: " + vo.getMsid()); + logger.info("Cleaning up storage maintenance jobs associated with Management server: " + vo.getMsid()); List poolIds = _storagePoolWorkDao.searchForPoolIdsForPendingWorkJobs(vo.getMsid()); if (poolIds.size() > 0) { for (Long poolId : poolIds) { @@ -1610,7 +1608,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C throw new InvalidParameterValueException("can not change old scheme:" + oldUri.getScheme() + " to " + uri.getScheme()); } } catch (URISyntaxException e) { - s_logger.debug("Failed to get uri from " + oldUrl); + logger.debug("Failed to get uri from " + oldUrl); } secHost.setStorageUrl(newUrl); @@ -1653,13 +1651,13 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } if (stats != null) { double usedPercentage = ((double)stats.getByteUsed() / (double)totalSize); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking pool " + pool.getId() + " for storage, totalSize: " + pool.getCapacityBytes() + ", usedBytes: " + stats.getByteUsed() + + if (logger.isDebugEnabled()) { + logger.debug("Checking pool " + pool.getId() + " for storage, totalSize: " + pool.getCapacityBytes() + ", usedBytes: " + stats.getByteUsed() + ", usedPct: " + usedPercentage + ", disable threshold: " + storageUsedThreshold); } if (usedPercentage >= storageUsedThreshold) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Insufficient space on pool: " + pool.getId() + " since its usage percentage: " + usedPercentage + + if (logger.isDebugEnabled()) { + logger.debug("Insufficient space on pool: " + pool.getId() + " since its usage percentage: " + usedPercentage + " has crossed the pool.storage.capacity.disablethreshold: " + storageUsedThreshold); } return false; @@ -1679,7 +1677,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // Only IOPS guaranteed primary storage like SolidFire is using/setting IOPS. // This check returns true for storage that does not specify IOPS. if (pool.getCapacityIops() == null ) { - s_logger.info("Storage pool " + pool.getName() + " (" + pool.getId() + ") does not supply IOPS capacity, assuming enough capacity"); + logger.info("Storage pool " + pool.getName() + " (" + pool.getId() + ") does not supply IOPS capacity, assuming enough capacity"); return true; } @@ -1745,26 +1743,26 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (pool.getPoolType() == StoragePoolType.NetworkFilesystem || pool.getPoolType() == StoragePoolType.VMFS || pool.getPoolType() == StoragePoolType.Filesystem) { BigDecimal overProvFactor = getStorageOverProvisioningFactor(pool.getId()); totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(pool.getCapacityBytes())).longValue(); - s_logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString() + " with overprovisioning factor " + logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString() + " with overprovisioning factor " + overProvFactor.toString()); - s_logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + pool.getCapacityBytes()); + logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + pool.getCapacityBytes()); } else { totalOverProvCapacity = pool.getCapacityBytes(); - s_logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString()); + logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString()); } - s_logger.debug("Total capacity of the pool " + poolVO.getName() + " id: " + pool.getId() + " is " + totalOverProvCapacity); + logger.debug("Total capacity of the pool " + poolVO.getName() + " id: " + pool.getId() + " is " + totalOverProvCapacity); double storageAllocatedThreshold = CapacityManager.StorageAllocatedCapacityDisableThreshold.valueIn(pool.getDataCenterId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking pool: " + pool.getId() + " for volume allocation " + volumes.toString() + ", maxSize : " + totalOverProvCapacity + + if (logger.isDebugEnabled()) { + logger.debug("Checking pool: " + pool.getId() + " for volume allocation " + volumes.toString() + ", maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + allocatedSizeWithtemplate + ", askingSize : " + totalAskingSize + ", allocated disable threshold: " + storageAllocatedThreshold); } double usedPercentage = (allocatedSizeWithtemplate + totalAskingSize) / (double)(totalOverProvCapacity); if (usedPercentage > storageAllocatedThreshold) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + + if (logger.isDebugEnabled()) { + logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + " since its allocated percentage: " + usedPercentage + " has crossed the allocated pool.storage.allocated.capacity.disablethreshold: " + storageAllocatedThreshold + ", skipping this pool"); } @@ -1772,8 +1770,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } if (totalOverProvCapacity < (allocatedSizeWithtemplate + totalAskingSize)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + + if (logger.isDebugEnabled()) { + logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + ", not enough storage, maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + allocatedSizeWithtemplate + ", askingSize : " + totalAskingSize); } @@ -1915,7 +1913,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C try { store = lifeCycle.initialize(params); } catch (Exception e) { - s_logger.debug("Failed to add data store: " + e.getMessage(), e); + logger.debug("Failed to add data store: " + e.getMessage(), e); throw new CloudRuntimeException("Failed to add data store: " + e.getMessage(), e); } @@ -2115,7 +2113,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C try { store = lifeCycle.initialize(params); } catch (Exception e) { - s_logger.debug("Failed to add data store: "+e.getMessage(), e); + logger.debug("Failed to add data store: "+e.getMessage(), e); throw new CloudRuntimeException("Failed to add data store: "+e.getMessage(), e); } @@ -2174,12 +2172,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override public void run() { try { - s_logger.trace("Download URL Garbage Collection Thread is running."); + logger.trace("Download URL Garbage Collection Thread is running."); cleanupDownloadUrls(); } catch (Exception e) { - s_logger.error("Caught the following Exception", e); + logger.error("Caught the following Exception", e); } } } @@ -2197,7 +2195,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C continue; } - s_logger.debug("Removing download url " + volumeOnImageStore.getExtractUrl() + " for volume id " + volumeOnImageStore.getVolumeId()); + logger.debug("Removing download url " + volumeOnImageStore.getExtractUrl() + " for volume id " + volumeOnImageStore.getVolumeId()); // Remove it from image store ImageStoreEntity secStore = (ImageStoreEntity) _dataStoreMgr.getDataStore(volumeOnImageStore.getDataStoreId(), DataStoreRole.Image); @@ -2206,7 +2204,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // Now expunge it from DB since this entry was created only for download purpose _volumeStoreDao.expunge(volumeOnImageStore.getId()); }catch(Throwable th){ - s_logger.warn("Caught exception while deleting download url " +volumeOnImageStore.getExtractUrl() + + logger.warn("Caught exception while deleting download url " +volumeOnImageStore.getExtractUrl() + " for volume id " + volumeOnImageStore.getVolumeId(), th); } } @@ -2221,7 +2219,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C continue; } - s_logger.debug("Removing download url " + templateOnImageStore.getExtractUrl() + " for template id " + templateOnImageStore.getTemplateId()); + logger.debug("Removing download url " + templateOnImageStore.getExtractUrl() + " for template id " + templateOnImageStore.getTemplateId()); // Remove it from image store ImageStoreEntity secStore = (ImageStoreEntity) _dataStoreMgr.getDataStore(templateOnImageStore.getDataStoreId(), DataStoreRole.Image); @@ -2232,7 +2230,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C templateOnImageStore.setExtractUrlCreated(null); _templateStoreDao.update(templateOnImageStore.getId(), templateOnImageStore); }catch(Throwable th){ - s_logger.warn("caught exception while deleting download url " +templateOnImageStore.getExtractUrl() + + logger.warn("caught exception while deleting download url " +templateOnImageStore.getExtractUrl() + " for template id " +templateOnImageStore.getTemplateId(), th); } } diff --git a/server/src/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/com/cloud/storage/VolumeApiServiceImpl.java index d4e8c99fdca..3ea625deb17 100644 --- a/server/src/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/com/cloud/storage/VolumeApiServiceImpl.java @@ -39,7 +39,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand; import org.apache.cloudstack.utils.imagestore.ImageStoreUtil; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; @@ -170,7 +169,6 @@ import org.joda.time.DateTime; import org.joda.time.DateTimeZone; public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiService, VmWorkJobHandler { - private final static Logger s_logger = Logger.getLogger(VolumeApiServiceImpl.class); public static final String VM_WORK_JOB_HANDLER = VolumeApiServiceImpl.class.getSimpleName(); @Inject @@ -400,7 +398,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic try { ImageFormat.valueOf(format.toUpperCase()); } catch (IllegalArgumentException e) { - s_logger.debug("ImageFormat IllegalArgumentException: " + e.getMessage()); + logger.debug("ImageFormat IllegalArgumentException: " + e.getMessage()); throw new IllegalArgumentException("Image format: " + format + " is incorrect. Supported formats are " + EnumUtils.listValues(ImageFormat.values())); } @@ -761,8 +759,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic message.append(cmd.getVirtualMachineId()); message.append(" due to error: "); message.append(ex.getMessage()); - if (s_logger.isDebugEnabled()) { - s_logger.debug(message, ex); + if (logger.isDebugEnabled()) { + logger.debug(message, ex); } throw new CloudRuntimeException(message.toString()); } @@ -776,7 +774,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic throw new CloudRuntimeException("Failed to create volume: " + volume.getId(), e); } finally { if (!created) { - s_logger.trace("Decrementing volume resource count for account id=" + volume.getAccountId() + " as volume failed to create on the backend"); + logger.trace("Decrementing volume resource count for account id=" + volume.getAccountId() + " as volume failed to create on the backend"); _resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.volume, cmd.getDisplayVolume()); _resourceLimitMgr.recalculateResourceCount(volume.getAccountId(), volume.getDomainId(), ResourceType.primary_storage.getOrdinal()); } @@ -979,7 +977,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic /* If this volume has never been beyond allocated state, short circuit everything and simply update the database. */ if (volume.getState() == Volume.State.Allocated) { - s_logger.debug("Volume is in the allocated state, but has never been created. Simply updating database with new size and IOPS."); + logger.debug("Volume is in the allocated state, but has never been created. Simply updating database with new size and IOPS."); volume.setSize(newSize); volume.setMinIops(newMinIops); @@ -1145,7 +1143,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic VolumeApiResult result = future.get(); if (result.isFailed()) { - s_logger.warn("Failed to resize the volume " + volume); + logger.warn("Failed to resize the volume " + volume); String details = ""; if (result.getResult() != null && !result.getResult().isEmpty()) { details = result.getResult(); @@ -1169,13 +1167,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } return volume; } catch (InterruptedException e) { - s_logger.warn("failed get resize volume result", e); + logger.warn("failed get resize volume result", e); throw new CloudRuntimeException(e.getMessage()); } catch (ExecutionException e) { - s_logger.warn("failed get resize volume result", e); + logger.warn("failed get resize volume result", e); throw new CloudRuntimeException(e.getMessage()); } catch (Exception e) { - s_logger.warn("failed get resize volume result", e); + logger.warn("failed get resize volume result", e); throw new CloudRuntimeException(e.getMessage()); } } @@ -1240,26 +1238,26 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic // expunge volume from primary if volume is on primary VolumeInfo volOnPrimary = volFactory.getVolume(volume.getId(), DataStoreRole.Primary); if (volOnPrimary != null) { - s_logger.info("Expunging volume " + volume.getId() + " from primary data store"); + logger.info("Expunging volume " + volume.getId() + " from primary data store"); AsyncCallFuture future = volService.expungeVolumeAsync(volOnPrimary); future.get(); } // expunge volume from secondary if volume is on image store VolumeInfo volOnSecondary = volFactory.getVolume(volume.getId(), DataStoreRole.Image); if (volOnSecondary != null) { - s_logger.info("Expunging volume " + volume.getId() + " from secondary data store"); + logger.info("Expunging volume " + volume.getId() + " from secondary data store"); AsyncCallFuture future2 = volService.expungeVolumeAsync(volOnSecondary); future2.get(); } // delete all cache entries for this volume List cacheVols = volFactory.listVolumeOnCache(volume.getId()); for (VolumeInfo volOnCache : cacheVols) { - s_logger.info("Delete volume from image cache store: " + volOnCache.getDataStore().getName()); + logger.info("Delete volume from image cache store: " + volOnCache.getDataStore().getName()); volOnCache.delete(); } } catch (Exception e) { - s_logger.warn("Failed to expunge volume:", e); + logger.warn("Failed to expunge volume:", e); return false; } @@ -1320,7 +1318,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic try { newVolumeOnPrimaryStorage = _volumeMgr.createVolumeOnPrimaryStorage(vm, volumeToAttach, rootDiskHyperType, destPrimaryStorage); } catch (NoTransitionException e) { - s_logger.debug("Failed to create volume on primary storage", e); + logger.debug("Failed to create volume on primary storage", e); throw new CloudRuntimeException("Failed to create volume on primary storage", e); } } @@ -1341,10 +1339,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic newVolumeOnPrimaryStorage = _volumeMgr.moveVolume(newVolumeOnPrimaryStorage, vmRootVolumePool.getDataCenterId(), vmRootVolumePool.getPodId(), vmRootVolumePool.getClusterId(), volumeToAttachHyperType); } catch (ConcurrentOperationException e) { - s_logger.debug("move volume failed", e); + logger.debug("move volume failed", e); throw new CloudRuntimeException("move volume failed", e); } catch (StorageUnavailableException e) { - s_logger.debug("move volume failed", e); + logger.debug("move volume failed", e); throw new CloudRuntimeException("move volume failed", e); } } @@ -1464,8 +1462,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic if (asyncExecutionContext != null) { AsyncJob job = asyncExecutionContext.getJob(); - if (s_logger.isInfoEnabled()) { - s_logger.info("Trying to attaching volume " + volumeId + " to vm instance:" + vm.getId() + ", update async job-" + job.getId() + " progress status"); + if (logger.isInfoEnabled()) { + logger.info("Trying to attaching volume " + volumeId + " to vm instance:" + vm.getId() + ", update async job-" + job.getId() + " progress status"); } _jobMgr.updateAsyncJobAttachment(job.getId(), "Volume", volumeId); @@ -1672,8 +1670,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic if (asyncExecutionContext != null) { AsyncJob job = asyncExecutionContext.getJob(); - if (s_logger.isInfoEnabled()) { - s_logger.info("Trying to attaching volume " + volumeId + "to vm instance:" + vm.getId() + ", update async job-" + job.getId() + " progress status"); + if (logger.isInfoEnabled()) { + logger.info("Trying to attaching volume " + volumeId + "to vm instance:" + vm.getId() + ", update async job-" + job.getId() + " progress status"); } _jobMgr.updateAsyncJobAttachment(job.getId(), "Volume", volumeId); @@ -1965,10 +1963,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic newVol = _volumeMgr.migrateVolume(vol, destPool); } } catch (StorageUnavailableException e) { - s_logger.debug("Failed to migrate volume", e); + logger.debug("Failed to migrate volume", e); throw new CloudRuntimeException(e.getMessage()); } catch (Exception e) { - s_logger.debug("Failed to migrate volume", e); + logger.debug("Failed to migrate volume", e); throw new CloudRuntimeException(e.getMessage()); } return newVol; @@ -1981,15 +1979,15 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic try { VolumeApiResult result = future.get(); if (result.isFailed()) { - s_logger.debug("migrate volume failed:" + result.getResult()); + logger.debug("migrate volume failed:" + result.getResult()); throw new StorageUnavailableException("Migrate volume failed: " + result.getResult(), destPool.getId()); } return result.getVolume(); } catch (InterruptedException e) { - s_logger.debug("migrate volume failed", e); + logger.debug("migrate volume failed", e); throw new CloudRuntimeException(e.getMessage()); } catch (ExecutionException e) { - s_logger.debug("migrate volume failed", e); + logger.debug("migrate volume failed", e); throw new CloudRuntimeException(e.getMessage()); } } @@ -2160,7 +2158,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic // Extract activity only for detached volumes or for volumes whose // instance is stopped if (volume.getInstanceId() != null && ApiDBUtils.findVMInstanceById(volume.getInstanceId()).getState() != State.Stopped) { - s_logger.debug("Invalid state of the volume with ID: " + volumeId + ". It should be either detached or the VM should be in stopped state."); + logger.debug("Invalid state of the volume with ID: " + volumeId + ". It should be either detached or the VM should be in stopped state."); PermissionDeniedException ex = new PermissionDeniedException( "Invalid state of the volume with specified ID. It should be either detached or the VM should be in stopped state."); ex.addProxyObject(volume.getUuid(), "volumeId"); @@ -2264,10 +2262,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic try { cvResult = cvAnswer.get(); } catch (InterruptedException e1) { - s_logger.debug("failed copy volume", e1); + logger.debug("failed copy volume", e1); throw new CloudRuntimeException("Failed to copy volume", e1); } catch (ExecutionException e1) { - s_logger.debug("failed copy volume", e1); + logger.debug("failed copy volume", e1); throw new CloudRuntimeException("Failed to copy volume", e1); } if (cvResult == null || cvResult.isFailed()) { @@ -2654,7 +2652,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); AsyncJobVO jobVo = _jobMgr.getAsyncJob(workJob.getId()); - s_logger.debug("New job " + workJob.getId() + ", result field: " + jobVo.getResult()); + logger.debug("New job " + workJob.getId() + ", result field: " + jobVo.getResult()); AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); diff --git a/server/src/com/cloud/storage/download/DownloadMonitorImpl.java b/server/src/com/cloud/storage/download/DownloadMonitorImpl.java index f1937f85232..64c35b0dd01 100644 --- a/server/src/com/cloud/storage/download/DownloadMonitorImpl.java +++ b/server/src/com/cloud/storage/download/DownloadMonitorImpl.java @@ -26,7 +26,6 @@ import java.util.Timer; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; @@ -67,7 +66,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value = {DownloadMonitor.class}) public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor { - static final Logger s_logger = Logger.getLogger(DownloadMonitorImpl.class); @Inject private TemplateDataStoreDao _vmTemplateStoreDao; @@ -96,7 +94,7 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor String cert = configs.get("secstorage.ssl.cert.domain"); if (!"realhostip.com".equalsIgnoreCase(cert)) { - s_logger.warn("Only realhostip.com ssl cert is supported, ignoring self-signed and other certs"); + logger.warn("Only realhostip.com ssl cert is supported, ignoring self-signed and other certs"); } _copyAuthPasswd = configs.get("secstorage.copy.password"); @@ -155,7 +153,7 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor EndPoint ep = _epSelector.select(template); if (ep == null) { String errMsg = "There is no secondary storage VM for downloading template to image store " + store.getName(); - s_logger.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } DownloadListener dl = new DownloadListener(ep, store, template, _timer, this, dcmd, callback); @@ -166,14 +164,14 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor // DownloadListener to use // new ObjectInDataStore.State transition. TODO: fix this later // to be able to remove downloadState from template_store_ref. - s_logger.info("found existing download job"); + logger.info("found existing download job"); dl.setCurrState(vmTemplateStore.getDownloadState()); } try { ep.sendMessageAsync(dcmd, new UploadListener.Callback(ep.getId(), dl)); } catch (Exception e) { - s_logger.warn("Unable to start /resume download of template " + template.getId() + " to " + store.getName(), e); + logger.warn("Unable to start /resume download of template " + template.getId() + " to " + store.getName(), e); dl.setDisconnected(); dl.scheduleStatusCheck(RequestType.GET_OR_RESTART); } @@ -189,12 +187,12 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor if (template.getUri() != null) { initiateTemplateDownload(template, callback); } else { - s_logger.info("Template url is null, cannot download"); + logger.info("Template url is null, cannot download"); DownloadAnswer ans = new DownloadAnswer("Template url is null", Status.UNKNOWN); callback.complete(ans); } } else { - s_logger.info("Template download is already in progress or already downloaded"); + logger.info("Template download is already in progress or already downloaded"); DownloadAnswer ans = new DownloadAnswer("Template download is already in progress or already downloaded", Status.UNKNOWN); callback.complete(ans); @@ -239,7 +237,7 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor EndPoint ep = _epSelector.select(volume); if (ep == null) { - s_logger.warn("There is no secondary storage VM for image store " + store.getName()); + logger.warn("There is no secondary storage VM for image store " + store.getName()); return; } DownloadListener dl = new DownloadListener(ep, store, volume, _timer, this, dcmd, callback); @@ -252,7 +250,7 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor try { ep.sendMessageAsync(dcmd, new UploadListener.Callback(ep.getId(), dl)); } catch (Exception e) { - s_logger.warn("Unable to start /resume download of volume " + volume.getId() + " to " + store.getName(), e); + logger.warn("Unable to start /resume download of volume " + volume.getId() + " to " + store.getName(), e); dl.setDisconnected(); dl.scheduleStatusCheck(RequestType.GET_OR_RESTART); } diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java index bc661305d2a..87346d9e69b 100644 --- a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -26,7 +26,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.user.snapshot.CreateSnapshotPolicyCmd; @@ -132,7 +131,6 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDao; @Component @Local(value = {SnapshotManager.class, SnapshotApiService.class}) public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, SnapshotApiService { - private static final Logger s_logger = Logger.getLogger(SnapshotManagerImpl.class); @Inject private VMTemplateDao _templateDao; @Inject @@ -209,25 +207,25 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, if (result.second().getResult()) { return result.second(); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("The result for " + cmd.getClass().getName() + " is " + result.second().getDetails() + " through " + result.first()); + if (logger.isDebugEnabled()) { + logger.debug("The result for " + cmd.getClass().getName() + " is " + result.second().getDetails() + " through " + result.first()); } hostIdsToAvoid.add(result.first()); } catch (StorageUnavailableException e1) { - s_logger.warn("Storage unavailable ", e1); + logger.warn("Storage unavailable ", e1); return null; } try { Thread.sleep(_pauseInterval * 1000); } catch (InterruptedException e) { - s_logger.debug("[ignored] interupted while retry cmd."); + logger.debug("[ignored] interupted while retry cmd."); } - s_logger.debug("Retrying..."); + logger.debug("Retrying..."); } - s_logger.warn("After " + _totalRetries + " retries, the command " + cmd.getClass().getName() + " did not succeed."); + logger.warn("After " + _totalRetries + " retries, the command " + cmd.getClass().getName() + " did not succeed."); return null; } @@ -267,7 +265,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.REVERT); if (snapshotStrategy == null) { - s_logger.error("Unable to find snaphot strategy to handle snapshot with id '" + snapshotId + "'"); + logger.error("Unable to find snaphot strategy to handle snapshot with id '" + snapshotId + "'"); return false; } @@ -318,7 +316,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, SnapshotInfo snapshot = snapshotFactory.getSnapshot(snapshotId, DataStoreRole.Primary); if(snapshot == null) { - s_logger.debug("Failed to create snapshot"); + logger.debug("Failed to create snapshot"); throw new CloudRuntimeException("Failed to create snapshot"); } try { @@ -332,7 +330,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, _resourceLimitMgr.incrementResourceCount(snapshotOwner.getId(), ResourceType.snapshot); } catch (Exception e) { - s_logger.debug("Failed to create snapshot", e); + logger.debug("Failed to create snapshot", e); throw new CloudRuntimeException("Failed to create snapshot", e); } @@ -399,7 +397,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, SnapshotVO oldestSnapshot = snaps.get(0); long oldSnapId = oldestSnapshot.getId(); if (policy != null) { - s_logger.debug("Max snaps: " + policy.getMaxSnaps() + " exceeded for snapshot policy with Id: " + policyId + ". Deleting oldest snapshot: " + oldSnapId); + logger.debug("Max snaps: " + policy.getMaxSnaps() + " exceeded for snapshot policy with Id: " + policyId + ". Deleting oldest snapshot: " + oldSnapId); } if (deleteSnapshot(oldSnapId)) { //log Snapshot delete event @@ -428,7 +426,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshotCheck, SnapshotOperation.DELETE); if (snapshotStrategy == null) { - s_logger.error("Unable to find snaphot strategy to handle snapshot with id '" + snapshotId + "'"); + logger.error("Unable to find snaphot strategy to handle snapshot with id '" + snapshotId + "'"); return false; } @@ -457,7 +455,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return result; } catch (Exception e) { - s_logger.debug("Failed to delete snapshot: " + snapshotCheck.getId() + ":" + e.toString()); + logger.debug("Failed to delete snapshot: " + snapshotCheck.getId() + ":" + e.toString()); throw new CloudRuntimeException("Failed to delete snapshot:" + e.toString()); } @@ -614,18 +612,18 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { answer = ep.sendMessage(cmd); } if ((answer != null) && answer.getResult()) { - s_logger.debug("Deleted all snapshots for volume: " + volumeId + " under account: " + accountId); + logger.debug("Deleted all snapshots for volume: " + volumeId + " under account: " + accountId); } else { success = false; if (answer != null) { - s_logger.warn("Failed to delete all snapshot for volume " + volumeId + " on secondary storage " + ssHost.getUri()); - s_logger.error(answer.getDetails()); + logger.warn("Failed to delete all snapshot for volume " + volumeId + " on secondary storage " + ssHost.getUri()); + logger.error(answer.getDetails()); } } } @@ -635,7 +633,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, for (SnapshotVO snapshot : snapshots) { SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.DELETE); if (snapshotStrategy == null) { - s_logger.error("Unable to find snaphot strategy to handle snapshot with id '" + snapshot.getId() + "'"); + logger.error("Unable to find snaphot strategy to handle snapshot with id '" + snapshot.getId() + "'"); continue; } SnapshotDataStoreVO snapshotStoreRef = _snapshotStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Image); @@ -703,7 +701,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, TimeZone timeZone = TimeZone.getTimeZone(cmd.getTimezone()); String timezoneId = timeZone.getID(); if (!timezoneId.equals(cmd.getTimezone())) { - s_logger.warn("Using timezone: " + timezoneId + " for running this snapshot policy as an equivalent of " + cmd.getTimezone()); + logger.warn("Using timezone: " + timezoneId + " for running this snapshot policy as an equivalent of " + cmd.getTimezone()); } try { DateUtil.getNextRunTime(intvType, cmd.getSchedule(), timezoneId, null); @@ -895,7 +893,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, if (vm.getState() != VirtualMachine.State.Stopped && vm.getState() != VirtualMachine.State.Destroyed) { boolean snapshotEnabled = Boolean.parseBoolean(_configDao.getValue("kvm.snapshot.enabled")); if (!snapshotEnabled) { - s_logger.debug("Snapshot is not supported on host " + host + " for the volume " + volume + " attached to the vm " + vm); + logger.debug("Snapshot is not supported on host " + host + " for the volume " + volume + " attached to the vm " + vm); return false; } } @@ -998,10 +996,10 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, // Correct the resource count of snapshot in case of delta snapshots. _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.secondary_storage, new Long(volume.getSize() - snapshotStoreRef.getPhysicalSize())); } catch (Exception e) { - s_logger.debug("post process snapshot failed", e); + logger.debug("post process snapshot failed", e); } } catch (Exception e) { - s_logger.debug("Failed to create snapshot", e); + logger.debug("Failed to create snapshot", e); _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.snapshot); _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.secondary_storage, new Long(volume.getSize())); throw new CloudRuntimeException("Failed to create snapshot", e); @@ -1021,7 +1019,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, _totalRetries = NumbersUtil.parseInt(_configDao.getValue("total.retries"), 4); _pauseInterval = 2 * NumbersUtil.parseInt(_configDao.getValue("ping.interval"), 60); - s_logger.info("Snapshot Manager is configured."); + logger.info("Snapshot Manager is configured."); return true; } @@ -1076,7 +1074,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, for (Long pId : policyIds) { if (!deletePolicy(userId, pId)) { success = false; - s_logger.warn("Failed to delete snapshot policy with Id: " + policyId); + logger.warn("Failed to delete snapshot policy with Id: " + policyId); return success; } } @@ -1110,7 +1108,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } catch (ResourceAllocationException e) { if (snapshotType != Type.MANUAL) { String msg = "Snapshot resource limit exceeded for account id : " + owner.getId() + ". Failed to create recurring snapshots"; - s_logger.warn(msg); + logger.warn(msg); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_UPDATE_RESOURCE_COUNT, 0L, 0L, msg, "Snapshot resource limit exceeded for account id : " + owner.getId() + ". Failed to create recurring snapshots; please use updateResourceLimit to increase the limit"); diff --git a/server/src/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java index 4ce2bba498e..93fb4a3fccb 100644 --- a/server/src/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java @@ -27,7 +27,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.ApiConstants; @@ -69,7 +68,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {SnapshotScheduler.class}) public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotScheduler { - private static final Logger s_logger = Logger.getLogger(SnapshotSchedulerImpl.class); @Inject protected AsyncJobDao _asyncJobDao; @@ -117,7 +115,7 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu nextTimestamp = DateUtil.getNextRunTime(type, schedule, timezone, currentTimestamp); final String currentTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, currentTimestamp); final String nextScheduledTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, nextTimestamp); - s_logger.debug("Current time is " + currentTime + ". NextScheduledTime of policyId " + policyId + " is " + nextScheduledTime); + logger.debug("Current time is " + currentTime + ". NextScheduledTime of policyId " + policyId + " is " + nextScheduledTime); } return nextTimestamp; } @@ -224,10 +222,10 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu @DB protected void scheduleSnapshots() { String displayTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, _currentTimestamp); - s_logger.debug("Snapshot scheduler.poll is being called at " + displayTime); + logger.debug("Snapshot scheduler.poll is being called at " + displayTime); final List snapshotsToBeExecuted = _snapshotScheduleDao.getSchedulesToExecute(_currentTimestamp); - s_logger.debug("Got " + snapshotsToBeExecuted.size() + " snapshots to be executed at " + displayTime); + logger.debug("Got " + snapshotsToBeExecuted.size() + " snapshots to be executed at " + displayTime); for (final SnapshotScheduleVO snapshotToBeExecuted : snapshotsToBeExecuted) { SnapshotScheduleVO tmpSnapshotScheduleVO = null; @@ -243,18 +241,18 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu Account volAcct = _acctDao.findById(volume.getAccountId()); if (volAcct == null || volAcct.getState() == Account.State.disabled) { // this account has been removed, so don't trigger recurring snapshot - if (s_logger.isDebugEnabled()) { - s_logger.debug("Skip snapshot for volume " + volume.getUuid() + " since its account has been removed or disabled"); + if (logger.isDebugEnabled()) { + logger.debug("Skip snapshot for volume " + volume.getUuid() + " since its account has been removed or disabled"); } continue; } if (_snapshotPolicyDao.findById(policyId) == null) { _snapshotScheduleDao.remove(snapshotToBeExecuted.getId()); } - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { final Date scheduledTimestamp = snapshotToBeExecuted.getScheduledTimestamp(); displayTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, scheduledTimestamp); - s_logger.debug("Scheduling 1 snapshot for volume id " + volumeId + " (volume name:" + + logger.debug("Scheduling 1 snapshot for volume id " + volumeId + " (volume name:" + volume.getName() + ") for schedule id: " + snapshotToBeExecuted.getId() + " at " + displayTime); } @@ -287,7 +285,7 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu _snapshotScheduleDao.update(snapshotScheId, tmpSnapshotScheduleVO); } catch (final Exception e) { // TODO Logging this exception is enough? - s_logger.warn("Scheduling snapshot failed due to " + e.toString()); + logger.warn("Scheduling snapshot failed due to " + e.toString()); } finally { if (tmpSnapshotScheduleVO != null) { _snapshotScheduleDao.releaseFromLockTable(snapshotScheId); @@ -379,7 +377,7 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu success = _snapshotScheduleDao.remove(schedule.getId()); } if (!success) { - s_logger.debug("Error while deleting Snapshot schedule with Id: " + schedule.getId()); + logger.debug("Error while deleting Snapshot schedule with Id: " + schedule.getId()); } return success; } @@ -402,7 +400,7 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu } _currentTimestamp = new Date(); - s_logger.info("Snapshot Scheduler is configured."); + logger.info("Snapshot Scheduler is configured."); return true; } @@ -430,7 +428,7 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu final Date currentTimestamp = new Date(); poll(currentTimestamp); } catch (final Throwable t) { - s_logger.warn("Catch throwable in snapshot scheduler ", t); + logger.warn("Catch throwable in snapshot scheduler ", t); } } }; diff --git a/server/src/com/cloud/storage/upload/UploadMonitorImpl.java b/server/src/com/cloud/storage/upload/UploadMonitorImpl.java index 35cb665e667..3dc47abc14d 100644 --- a/server/src/com/cloud/storage/upload/UploadMonitorImpl.java +++ b/server/src/com/cloud/storage/upload/UploadMonitorImpl.java @@ -32,7 +32,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -84,7 +83,6 @@ import com.cloud.vm.dao.SecondaryStorageVmDao; @Local(value = {UploadMonitor.class}) public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { - static final Logger s_logger = Logger.getLogger(UploadMonitorImpl.class); @Inject private UploadDao _uploadDao; @@ -161,12 +159,12 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { EndPoint ep = _epSelector.select(secStore); if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); return; } ep.sendMessageAsync(ucmd, new UploadListener.Callback(ep.getId(), ul)); } catch (Exception e) { - s_logger.warn("Unable to start upload of volume " + volume.getName() + " from " + secStore.getName() + " to " + url, e); + logger.warn("Unable to start upload of volume " + volume.getName() + " from " + secStore.getName() + " to " + url, e); ul.setDisconnected(); ul.scheduleStatusCheck(RequestType.GET_OR_RESTART); } @@ -194,12 +192,12 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { EndPoint ep = _epSelector.select(secStore); if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); return null; } ep.sendMessageAsync(ucmd, new UploadListener.Callback(ep.getId(), ul)); } catch (Exception e) { - s_logger.warn("Unable to start upload of " + template.getUniqueName() + " from " + secStore.getName() + " to " + url, e); + logger.warn("Unable to start upload of " + template.getUniqueName() + " from " + secStore.getName() + " to " + url, e); ul.setDisconnected(); ul.scheduleStatusCheck(RequestType.GET_OR_RESTART); } @@ -220,7 +218,7 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { EndPoint ep = _epSelector.select(store); if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); return null; } @@ -263,7 +261,7 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { Answer ans = ep.sendMessage(cmd); if (ans == null || !ans.getResult()) { errorString = "Unable to create a link for " + type + " id:" + template.getId() + "," + (ans == null ? "" : ans.getDetails()); - s_logger.error(errorString); + logger.error(errorString); throw new CloudRuntimeException(errorString); } @@ -319,7 +317,7 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { Answer ans = ep.sendMessage(cmd); if (ans == null || !ans.getResult()) { errorString = "Unable to create a link for " + type + " id:" + entityId + "," + (ans == null ? "" : ans.getDetails()); - s_logger.warn(errorString); + logger.warn(errorString); throw new CloudRuntimeException(errorString); } @@ -328,7 +326,7 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { SecondaryStorageVmVO ssVm = ssVms.get(0); if (ssVm.getPublicIpAddress() == null) { errorString = "A running secondary storage vm has a null public ip?"; - s_logger.error(errorString); + logger.error(errorString); throw new CloudRuntimeException(errorString); } //Construct actual URL locally now that the symlink exists at SSVM @@ -378,7 +376,7 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { String cert = configs.get("secstorage.secure.copy.cert"); if ("realhostip.com".equalsIgnoreCase(cert)) { - s_logger.warn("Only realhostip.com ssl cert is supported, ignoring self-signed and other certs"); + logger.warn("Only realhostip.com ssl cert is supported, ignoring self-signed and other certs"); } _ssvmUrlDomain = configs.get("secstorage.ssl.cert.domain"); @@ -425,10 +423,10 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { HostVO storageHost = _serverDao.findById(sserverId); if (storageHost == null) { - s_logger.warn("Huh? Agent id " + sserverId + " does not correspond to a row in hosts table?"); + logger.warn("Huh? Agent id " + sserverId + " does not correspond to a row in hosts table?"); return; } - s_logger.debug("Handling upload sserverId " + sserverId); + logger.debug("Handling upload sserverId " + sserverId); List uploadsInProgress = new ArrayList(); uploadsInProgress.addAll(_uploadDao.listByHostAndUploadStatus(sserverId, UploadVO.Status.UPLOAD_IN_PROGRESS)); uploadsInProgress.addAll(_uploadDao.listByHostAndUploadStatus(sserverId, UploadVO.Status.COPY_IN_PROGRESS)); @@ -466,7 +464,7 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { } } catch (Exception e) { - s_logger.error("Caught the following Exception", e); + logger.error("Caught the following Exception", e); } } } @@ -494,17 +492,17 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { new DeleteEntityDownloadURLCommand(path, extractJob.getType(), extractJob.getUploadUrl(), ((ImageStoreVO)secStore).getParent()); EndPoint ep = _epSelector.select(secStore); if (ep == null) { - s_logger.warn("UploadMonitor cleanup: There is no secondary storage VM for secondary storage host " + extractJob.getDataStoreId()); + logger.warn("UploadMonitor cleanup: There is no secondary storage VM for secondary storage host " + extractJob.getDataStoreId()); continue; //TODO: why continue? why not break? } - if (s_logger.isDebugEnabled()) { - s_logger.debug("UploadMonitor cleanup: Sending deletion of extract URL " + extractJob.getUploadUrl() + " to ssvm " + ep.getHostAddr()); + if (logger.isDebugEnabled()) { + logger.debug("UploadMonitor cleanup: Sending deletion of extract URL " + extractJob.getUploadUrl() + " to ssvm " + ep.getHostAddr()); } Answer ans = ep.sendMessage(cmd); if (ans != null && ans.getResult()) { _uploadDao.remove(extractJob.getId()); } else { - s_logger.warn("UploadMonitor cleanup: Unable to delete the link for " + extractJob.getType() + " id=" + extractJob.getTypeId() + " url=" + + logger.warn("UploadMonitor cleanup: Unable to delete the link for " + extractJob.getType() + " id=" + extractJob.getTypeId() + " url=" + extractJob.getUploadUrl() + " on ssvm " + ep.getHostAddr()); } } diff --git a/server/src/com/cloud/tags/TaggedResourceManagerImpl.java b/server/src/com/cloud/tags/TaggedResourceManagerImpl.java index dfe3ae0405f..34eedebe49b 100644 --- a/server/src/com/cloud/tags/TaggedResourceManagerImpl.java +++ b/server/src/com/cloud/tags/TaggedResourceManagerImpl.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.InternalIdentity; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; import com.cloud.api.query.dao.ResourceTagJoinDao; import com.cloud.dc.DataCenterVO; @@ -91,7 +90,6 @@ import com.cloud.vm.snapshot.VMSnapshotVO; @Local(value = {TaggedResourceService.class}) public class TaggedResourceManagerImpl extends ManagerBase implements TaggedResourceService { - public static final Logger s_logger = Logger.getLogger(TaggedResourceManagerImpl.class); private static final Map> s_typeMap = new HashMap>(); static { @@ -354,7 +352,7 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso public void doInTransactionWithoutResult(TransactionStatus status) { for (ResourceTag tagToRemove : tagsToRemove) { _resourceTagDao.remove(tagToRemove.getId()); - s_logger.debug("Removed the tag " + tagToRemove); + logger.debug("Removed the tag " + tagToRemove); } } }); diff --git a/server/src/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/com/cloud/template/HypervisorTemplateAdapter.java index 0b7854e4bfb..09aa75b3b58 100644 --- a/server/src/com/cloud/template/HypervisorTemplateAdapter.java +++ b/server/src/com/cloud/template/HypervisorTemplateAdapter.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplat import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand; -import org.apache.log4j.Logger; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; @@ -88,7 +87,6 @@ import com.cloud.utils.exception.CloudRuntimeException; @Local(value = TemplateAdapter.class) public class HypervisorTemplateAdapter extends TemplateAdapterBase { - private final static Logger s_logger = Logger.getLogger(HypervisorTemplateAdapter.class); @Inject DownloadMonitor _downloadMonitor; @Inject @@ -175,19 +173,19 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { if (zoneId != null) { DataCenterVO zone = _dcDao.findById(zoneId); if (zone == null) { - s_logger.warn("Unable to find zone by id " + zoneId + ", so skip downloading template to its image store " + imageStore.getId()); + logger.warn("Unable to find zone by id " + zoneId + ", so skip downloading template to its image store " + imageStore.getId()); continue; } // Check if zone is disabled if (Grouping.AllocationState.Disabled == zone.getAllocationState()) { - s_logger.info("Zone " + zoneId + " is disabled, so skip downloading template to its image store " + imageStore.getId()); + logger.info("Zone " + zoneId + " is disabled, so skip downloading template to its image store " + imageStore.getId()); continue; } // Check if image store has enough capacity for template if (!_statsCollector.imageStoreHasEnoughCapacity(imageStore)) { - s_logger.info("Image store doesn't has enough capacity, so skip downloading template to this image store " + imageStore.getId()); + logger.info("Image store doesn't has enough capacity, so skip downloading template to this image store " + imageStore.getId()); continue; } // We want to download private template to one of the image store in a zone @@ -239,13 +237,13 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { if (zoneId != null) { DataCenterVO zone = _dcDao.findById(zoneId); if (zone == null) { - s_logger.warn("Unable to find zone by id " + zoneId + ", so skip downloading template to its image store " + imageStore.getId()); + logger.warn("Unable to find zone by id " + zoneId + ", so skip downloading template to its image store " + imageStore.getId()); continue; } // Check if zone is disabled if (Grouping.AllocationState.Disabled == zone.getAllocationState()) { - s_logger.info("Zone " + zoneId + " is disabled, so skip downloading template to its image store " + imageStore.getId()); + logger.info("Zone " + zoneId + " is disabled, so skip downloading template to its image store " + imageStore.getId()); continue; } @@ -268,7 +266,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { EndPoint ep = _epSelector.select(templateOnStore); if (ep == null) { String errMsg = "There is no secondary storage VM for downloading template to image store " + imageStore.getName(); - s_logger.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } @@ -335,7 +333,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { if (tmpltStore != null) { physicalSize = tmpltStore.getPhysicalSize(); } else { - s_logger.warn("No entry found in template_store_ref for template id: " + template.getId() + " and image store id: " + ds.getId() + + logger.warn("No entry found in template_store_ref for template id: " + template.getId() + " and image store id: " + ds.getId() + " at the end of registering template!"); } Scope dsScope = ds.getScope(); @@ -344,7 +342,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { UsageEventUtils.publishUsageEvent(etype, template.getAccountId(), dsScope.getScopeId(), template.getId(), template.getName(), null, null, physicalSize, template.getSize(), VirtualMachineTemplate.class.getName(), template.getUuid()); } else { - s_logger.warn("Zone scope image store " + ds.getId() + " has a null scope id"); + logger.warn("Zone scope image store " + ds.getId() + " has a null scope id"); } } else if (dsScope.getScopeType() == ScopeType.REGION) { // publish usage event for region-wide image store using a -1 zoneId for 4.2, need to revisit post-4.2 @@ -369,7 +367,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { List imageStores = templateMgr.getImageStoreByTemplate(template.getId(), profile.getZoneId()); if (imageStores == null || imageStores.size() == 0) { // already destroyed on image stores - s_logger.info("Unable to find image store still having template: " + template.getName() + ", so just mark the template removed"); + logger.info("Unable to find image store still having template: " + template.getName() + ", so just mark the template removed"); } else { // Make sure the template is downloaded to all found image stores for (DataStore store : imageStores) { @@ -378,7 +376,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { for (TemplateDataStoreVO templateStore : templateStores) { if (templateStore.getDownloadState() == Status.DOWNLOAD_IN_PROGRESS) { String errorMsg = "Please specify a template that is not currently being downloaded."; - s_logger.debug("Template: " + template.getName() + " is currently being downloaded to secondary storage host: " + store.getName() + + logger.debug("Template: " + template.getName() + " is currently being downloaded to secondary storage host: " + store.getName() + "; cant' delete it."); throw new CloudRuntimeException(errorMsg); } @@ -399,13 +397,13 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { UsageEventUtils.publishUsageEvent(eventType, template.getAccountId(), sZoneId, template.getId(), null, null, null); } - s_logger.info("Delete template from image store: " + imageStore.getName()); + logger.info("Delete template from image store: " + imageStore.getName()); AsyncCallFuture future = imageService.deleteTemplateAsync(imageFactory.getTemplate(template.getId(), imageStore)); try { TemplateApiResult result = future.get(); success = result.isSuccess(); if (!success) { - s_logger.warn("Failed to delete the template " + template + " from the image store: " + imageStore.getName() + " due to: " + result.getResult()); + logger.warn("Failed to delete the template " + template + " from the image store: " + imageStore.getName() + " due to: " + result.getResult()); break; } @@ -420,10 +418,10 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { templateDataStoreDao.removeByTemplateStore(template.getId(), imageStore.getId()); } catch (InterruptedException e) { - s_logger.debug("delete template Failed", e); + logger.debug("delete template Failed", e); throw new CloudRuntimeException("delete template Failed", e); } catch (ExecutionException e) { - s_logger.debug("delete template Failed", e); + logger.debug("delete template Failed", e); throw new CloudRuntimeException("delete template Failed", e); } } @@ -437,7 +435,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { // delete all cache entries for this template List cacheTmpls = imageFactory.listTemplateOnCache(template.getId()); for (TemplateInfo tmplOnCache : cacheTmpls) { - s_logger.info("Delete template from image cache store: " + tmplOnCache.getDataStore().getName()); + logger.info("Delete template from image cache store: " + tmplOnCache.getDataStore().getName()); tmplOnCache.delete(); } diff --git a/server/src/com/cloud/template/TemplateAdapterBase.java b/server/src/com/cloud/template/TemplateAdapterBase.java index c5d0c5b261f..b5e62106413 100644 --- a/server/src/com/cloud/template/TemplateAdapterBase.java +++ b/server/src/com/cloud/template/TemplateAdapterBase.java @@ -23,7 +23,6 @@ import java.util.Map; import javax.inject.Inject; import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; @@ -74,7 +73,6 @@ import com.cloud.vm.UserVmVO; import com.cloud.vm.dao.UserVmDao; public abstract class TemplateAdapterBase extends AdapterBase implements TemplateAdapter { - private final static Logger s_logger = Logger.getLogger(TemplateAdapterBase.class); protected @Inject DomainDao _domainDao; protected @Inject @@ -202,7 +200,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat try { imgfmt = ImageFormat.valueOf(format.toUpperCase()); } catch (IllegalArgumentException e) { - s_logger.debug("ImageFormat IllegalArgumentException: " + e.getMessage()); + logger.debug("ImageFormat IllegalArgumentException: " + e.getMessage()); throw new IllegalArgumentException("Image format: " + format + " is incorrect. Supported formats are " + EnumUtils.listValues(ImageFormat.values())); } diff --git a/server/src/com/cloud/template/TemplateManagerImpl.java b/server/src/com/cloud/template/TemplateManagerImpl.java index 391134099e0..8a75c98c973 100644 --- a/server/src/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/com/cloud/template/TemplateManagerImpl.java @@ -43,7 +43,6 @@ import org.apache.cloudstack.api.response.GetUploadParamsResponse; import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand; import org.apache.cloudstack.utils.imagestore.ImageStoreUtil; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseListTemplateOrIsoPermissionsCmd; @@ -196,7 +195,6 @@ import org.joda.time.DateTimeZone; @Local(value = {TemplateManager.class, TemplateApiService.class}) public class TemplateManagerImpl extends ManagerBase implements TemplateManager, TemplateApiService, Configurable { - private final static Logger s_logger = Logger.getLogger(TemplateManagerImpl.class); @Inject private VMTemplateDao _tmpltDao; @@ -451,7 +449,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, if (pool.getStatus() == StoragePoolStatus.Up && pool.getDataCenterId() == zoneId) { prepareTemplateInOneStoragePool(vmTemplate, pool); } else { - s_logger.warn("Skip loading template " + vmTemplate.getId() + " into primary storage " + pool.getId() + " as either the pool zone " + logger.warn("Skip loading template " + vmTemplate.getId() + " into primary storage " + pool.getId() + " as either the pool zone " + pool.getDataCenterId() + " is different from the requested zone " + zoneId + " or the pool is currently not available."); } } @@ -543,7 +541,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, if (vm.getIsoId() != null) { TemplateInfo template = prepareIso(vm.getIsoId(), vm.getDataCenterId()); if (template == null){ - s_logger.error("Failed to prepare ISO on secondary or cache storage"); + logger.error("Failed to prepare ISO on secondary or cache storage"); throw new CloudRuntimeException("Failed to prepare ISO on secondary or cache storage"); } if (template.isBootable()) { @@ -569,22 +567,22 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } private void prepareTemplateInOneStoragePool(final VMTemplateVO template, final StoragePoolVO pool) { - s_logger.info("Schedule to preload template " + template.getId() + " into primary storage " + pool.getId()); + logger.info("Schedule to preload template " + template.getId() + " into primary storage " + pool.getId()); _preloadExecutor.execute(new ManagedContextRunnable() { @Override protected void runInContext() { try { reallyRun(); } catch (Throwable e) { - s_logger.warn("Unexpected exception ", e); + logger.warn("Unexpected exception ", e); } } private void reallyRun() { - s_logger.info("Start to preload template " + template.getId() + " into primary storage " + pool.getId()); + logger.info("Start to preload template " + template.getId() + " into primary storage " + pool.getId()); StoragePool pol = (StoragePool)_dataStoreMgr.getPrimaryDataStore(pool.getId()); prepareTemplateForCreate(template, pol); - s_logger.info("End of preloading template " + template.getId() + " into primary storage " + pool.getId()); + logger.info("End of preloading template " + template.getId() + " into primary storage " + pool.getId()); } }); } @@ -595,7 +593,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, if (pool.getDataCenterId() == zoneId) { prepareTemplateInOneStoragePool(template, pool); } else { - s_logger.info("Skip loading template " + template.getId() + " into primary storage " + pool.getId() + " as pool zone " + pool.getDataCenterId() + + logger.info("Skip loading template " + template.getId() + " into primary storage " + pool.getId() + " as pool zone " + pool.getDataCenterId() + " is different from the requested zone " + zoneId); } } @@ -617,8 +615,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, _tmpltPoolDao.update(templateStoragePoolRef.getId(), templateStoragePoolRef); if (templateStoragePoolRef.getDownloadState() == Status.DOWNLOADED) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Template " + templateId + " has already been downloaded to pool " + poolId); + if (logger.isDebugEnabled()) { + logger.debug("Template " + templateId + " has already been downloaded to pool " + poolId); } return templateStoragePoolRef; @@ -627,7 +625,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, templateStoreRef = _tmplStoreDao.findByTemplateZoneDownloadStatus(templateId, pool.getDataCenterId(), VMTemplateStorageResourceAssoc.Status.DOWNLOADED); if (templateStoreRef == null) { - s_logger.error("Unable to find a secondary storage host who has completely downloaded the template."); + logger.error("Unable to find a secondary storage host who has completely downloaded the template."); return null; } @@ -637,8 +635,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } if (templateStoragePoolRef == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Downloading template " + templateId + " to pool " + poolId); + if (logger.isDebugEnabled()) { + logger.debug("Downloading template " + templateId + " to pool " + poolId); } DataStore srcSecStore = _dataStoreMgr.getDataStore(templateStoreRef.getDataStoreId(), DataStoreRole.Image); TemplateInfo srcTemplate = _tmplFactory.getTemplate(templateId, srcSecStore); @@ -647,13 +645,13 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, try { TemplateApiResult result = future.get(); if (result.isFailed()) { - s_logger.debug("prepare template failed:" + result.getResult()); + logger.debug("prepare template failed:" + result.getResult()); return null; } return _tmpltPoolDao.findByPoolTemplate(poolId, templateId); } catch (Exception ex) { - s_logger.debug("failed to copy template from image store:" + srcSecStore.getName() + " to primary storage"); + logger.debug("failed to copy template from image store:" + srcSecStore.getName() + " to primary storage"); } } @@ -667,7 +665,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { answer = ep.sendMessage(cmd); @@ -686,7 +684,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, VMTemplateStoragePoolVO templateStoragePoolRef = _tmpltPoolDao.acquireInLockTable(templateStoragePoolRefId, 1200); if (templateStoragePoolRef == null) { - s_logger.warn("resetTemplateDownloadStateOnPool failed - unable to lock TemplateStorgePoolRef " + templateStoragePoolRefId); + logger.warn("resetTemplateDownloadStateOnPool failed - unable to lock TemplateStorgePoolRef " + templateStoragePoolRefId); return false; } @@ -742,7 +740,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, try { TemplateApiResult result = future.get(); if (result.isFailed()) { - s_logger.debug("copy template failed for image store " + dstSecStore.getName() + ":" + result.getResult()); + logger.debug("copy template failed for image store " + dstSecStore.getName() + ":" + result.getResult()); continue; // try next image store } @@ -754,7 +752,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } return true; } catch (Exception ex) { - s_logger.debug("failed to copy template to image store:" + dstSecStore.getName() + " ,will try next one"); + logger.debug("failed to copy template to image store:" + dstSecStore.getName() + " ,will try next one"); } } return false; @@ -792,7 +790,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, if (template.isCrossZones()) { // sync template from cache store to region store if it is not there, for cases where we are going to migrate existing NFS to S3. _tmpltSvr.syncTemplateToRegionStore(templateId, srcSecStore); - s_logger.debug("Template " + templateId + " is cross-zone, don't need to copy"); + logger.debug("Template " + templateId + " is cross-zone, don't need to copy"); return template; } @@ -814,7 +812,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, DataStore dstSecStore = getImageStore(destZoneId, templateId); if (dstSecStore != null) { - s_logger.debug("There is template " + templateId + " in secondary storage " + dstSecStore.getName() + " in zone " + destZoneId + " , don't need to copy"); + logger.debug("There is template " + templateId + " in secondary storage " + dstSecStore.getName() + " in zone " + destZoneId + " , don't need to copy"); return template; } @@ -879,7 +877,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, //Assumption here is that, we will hold the same lock during create volume from template VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.acquireInLockTable(templatePoolVO.getId()); if (templatePoolRef == null) { - s_logger.debug("can't aquire the lock for template pool ref:" + templatePoolVO.getId()); + logger.debug("can't aquire the lock for template pool ref:" + templatePoolVO.getId()); return; } @@ -887,8 +885,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, StoragePool pool = (StoragePool)_dataStoreMgr.getPrimaryDataStore(templatePoolVO.getPoolId()); VMTemplateVO template = _tmpltDao.findByIdIncludingRemoved(templatePoolVO.getTemplateId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Evicting " + templatePoolVO); + if (logger.isDebugEnabled()) { + logger.debug("Evicting " + templatePoolVO); } DestroyCommand cmd = new DestroyCommand(pool, templatePoolVO); @@ -898,13 +896,13 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, if (answer != null && answer.getResult()) { // Remove the templatePoolVO if (_tmpltPoolDao.remove(templatePoolVO.getId())) { - s_logger.debug("Successfully evicted template: " + template.getName() + " from storage pool: " + pool.getName()); + logger.debug("Successfully evicted template: " + template.getName() + " from storage pool: " + pool.getName()); } } else { - s_logger.info("Will retry evicte template: " + template.getName() + " from storage pool: " + pool.getName()); + logger.info("Will retry evicte template: " + template.getName() + " from storage pool: " + pool.getName()); } } catch (StorageUnavailableException e) { - s_logger.info("Storage is unavailable currently. Will retry evicte template: " + template.getName() + " from storage pool: " + pool.getName()); + logger.info("Storage is unavailable currently. Will retry evicte template: " + template.getName() + " from storage pool: " + pool.getName()); } } finally { _tmpltPoolDao.releaseFromLockTable(templatePoolRef.getId()); @@ -949,14 +947,14 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, List nonExpungedVms = _vmInstanceDao.listNonExpungedByZoneAndTemplate(zoneId, templateId); if (!nonExpungedVms.isEmpty()) { - s_logger.debug("Template " + template.getName() + " in zone " + zone.getName() + + logger.debug("Template " + template.getName() + " in zone " + zone.getName() + " is not deleteable because there are non-expunged VMs deployed from this template."); return false; } List userVmUsingIso = _userVmDao.listByIsoId(templateId); // check if there is any VM using this ISO. if (!userVmUsingIso.isEmpty()) { - s_logger.debug("ISO " + template.getName() + " in zone " + zone.getName() + " is not deleteable because it is attached to " + userVmUsingIso.size() + " VMs"); + logger.debug("ISO " + template.getName() + " in zone " + zone.getName() + " is not deleteable because it is attached to " + userVmUsingIso.size() + " VMs"); return false; } // Check if there are any snapshots for the template in the template @@ -965,7 +963,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, for (VolumeVO volume : volumes) { List snapshots = _snapshotDao.listByVolumeIdVersion(volume.getId(), "2.1"); if (!snapshots.isEmpty()) { - s_logger.debug("Template " + template.getName() + " in zone " + zone.getName() + + logger.debug("Template " + template.getName() + " in zone " + zone.getName() + " is not deleteable because there are 2.1 snapshots using this template."); return false; } @@ -983,7 +981,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, // always be copied to // primary storage before deploying VM. if (!userVmUsingIso.isEmpty()) { - s_logger.debug("ISO " + templateId + " is not deleteable because it is attached to " + userVmUsingIso.size() + " VMs"); + logger.debug("ISO " + templateId + " is not deleteable because it is attached to " + userVmUsingIso.size() + " VMs"); return false; } @@ -1080,7 +1078,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, public TemplateInfo prepareIso(long isoId, long dcId) { TemplateInfo tmplt = _tmplFactory.getTemplate(isoId, DataStoreRole.Image, dcId); if (tmplt == null || tmplt.getFormat() != ImageFormat.ISO) { - s_logger.warn("ISO: " + isoId + " does not exist in vm_template table"); + logger.warn("ISO: " + isoId + " does not exist in vm_template table"); return null; } @@ -1089,7 +1087,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, Scope destScope = new ZoneScope(dcId); TemplateInfo cacheData = (TemplateInfo)cacheMgr.createCacheObject(tmplt, destScope); if (cacheData == null) { - s_logger.error("Failed in copy iso from S3 to cache storage"); + logger.error("Failed in copy iso from S3 to cache storage"); return null; } return cacheData; @@ -1114,7 +1112,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, HostVO host = _hostDao.findById(vm.getHostId()); if (host == null) { - s_logger.warn("Host: " + vm.getHostId() + " does not exist"); + logger.warn("Host: " + vm.getHostId() + " does not exist"); return false; } @@ -1308,7 +1306,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, // If the template is removed throw an error. if (template.getRemoved() != null) { - s_logger.error("unable to update permissions for " + mediaType + " with id " + id + " as it is removed "); + logger.error("unable to update permissions for " + mediaType + " with id " + id + " as it is removed "); throw new InvalidParameterValueException("unable to update permissions for " + mediaType + " with id " + id + " as it is removed "); } @@ -1494,7 +1492,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, result = future.get(); if (result.isFailed()) { privateTemplate = null; - s_logger.debug("Failed to create template" + result.getResult()); + logger.debug("Failed to create template" + result.getResult()); throw new CloudRuntimeException("Failed to create template" + result.getResult()); } @@ -1514,10 +1512,10 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, privateTemplate.getSourceTemplateId(), srcTmpltStore.getPhysicalSize(), privateTemplate.getSize()); _usageEventDao.persist(usageEvent); } catch (InterruptedException e) { - s_logger.debug("Failed to create template", e); + logger.debug("Failed to create template", e); throw new CloudRuntimeException("Failed to create template", e); } catch (ExecutionException e) { - s_logger.debug("Failed to create template", e); + logger.debug("Failed to create template", e); throw new CloudRuntimeException("Failed to create template", e); } @@ -1629,8 +1627,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, // created if (!_volumeMgr.volumeInactive(volume)) { String msg = "Unable to create private template for volume: " + volume.getName() + "; volume is attached to a non-stopped VM, please stop the VM first"; - if (s_logger.isInfoEnabled()) { - s_logger.info(msg); + if (logger.isInfoEnabled()) { + logger.info(msg); } throw new CloudRuntimeException(msg); } @@ -1694,8 +1692,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } String templateTag = cmd.getTemplateTag(); if (templateTag != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Adding template tag: " + templateTag); + if (logger.isDebugEnabled()) { + logger.debug("Adding template tag: " + templateTag); } } privateTemplate = new VMTemplateVO(nextTemplateId, name, ImageFormat.RAW, isPublic, featured, isExtractable, @@ -1703,8 +1701,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, passwordEnabledValue, guestOS.getId(), true, hyperType, templateTag, cmd.getDetails(), false, isDynamicScalingEnabled); if (sourceTemplateId != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("This template is getting created from other template, setting source template Id to: " + sourceTemplateId); + if (logger.isDebugEnabled()) { + logger.debug("This template is getting created from other template, setting source template Id to: " + sourceTemplateId); } } privateTemplate.setSourceTemplateId(sourceTemplateId); diff --git a/server/src/com/cloud/usage/UsageServiceImpl.java b/server/src/com/cloud/usage/UsageServiceImpl.java index df43a009dbb..54fb7d7abf3 100644 --- a/server/src/com/cloud/usage/UsageServiceImpl.java +++ b/server/src/com/cloud/usage/UsageServiceImpl.java @@ -64,7 +64,6 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.usage.Usage; import org.apache.cloudstack.usage.UsageService; import org.apache.cloudstack.usage.UsageTypes; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import javax.ejb.Local; @@ -80,7 +79,6 @@ import java.util.TimeZone; @Component @Local(value = {UsageService.class}) public class UsageServiceImpl extends ManagerBase implements UsageService, Manager { - public static final Logger s_logger = Logger.getLogger(UsageServiceImpl.class); //ToDo: Move implementation to ManagaerImpl @@ -213,7 +211,7 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag } else if (_accountService.isDomainAdmin(caller.getId())) { isDomainAdmin = true; } - s_logger.debug("Account details not available. Using userContext accountId: " + accountId); + logger.debug("Account details not available. Using userContext accountId: " + accountId); } Date startDate = cmd.getStartDate(); @@ -225,8 +223,8 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag Date adjustedStartDate = computeAdjustedTime(startDate, usageTZ); Date adjustedEndDate = computeAdjustedTime(endDate, usageTZ); - if (s_logger.isDebugEnabled()) { - s_logger.debug("getting usage records for account: " + accountId + ", domainId: " + domainId + ", between " + adjustedStartDate + " and " + adjustedEndDate + + if (logger.isDebugEnabled()) { + logger.debug("getting usage records for account: " + accountId + ", domainId: " + domainId + ", between " + adjustedStartDate + " and " + adjustedEndDate + ", using pageSize: " + cmd.getPageSizeVal() + " and startIndex: " + cmd.getStartIndex()); } @@ -394,7 +392,7 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag cal.set(Calendar.SECOND, 0); cal.set(Calendar.MILLISECOND, 0); long execTS = cal.getTimeInMillis(); - s_logger.debug("Trying to remove old raw cloud_usage records older than " + interval + " day(s), current time=" + curTS + " next job execution time=" + execTS); + logger.debug("Trying to remove old raw cloud_usage records older than " + interval + " day(s), current time=" + curTS + " next job execution time=" + execTS); // Let's avoid cleanup when job runs and around a 15 min interval if (Math.abs(curTS - execTS) < 15 * 60 * 1000) { return false; diff --git a/server/src/com/cloud/user/AccountManagerImpl.java b/server/src/com/cloud/user/AccountManagerImpl.java index 2d07e841601..c6821a2f041 100644 --- a/server/src/com/cloud/user/AccountManagerImpl.java +++ b/server/src/com/cloud/user/AccountManagerImpl.java @@ -41,7 +41,6 @@ import javax.naming.ConfigurationException; import org.apache.commons.codec.binary.Base64; import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.QuerySelector; @@ -171,7 +170,6 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDao; @Local(value = {AccountManager.class, AccountService.class}) public class AccountManagerImpl extends ManagerBase implements AccountManager, Manager { - public static final Logger s_logger = Logger.getLogger(AccountManagerImpl.class); @Inject private AccountDao _accountDao; @@ -397,8 +395,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M for (SecurityChecker checker : _securityCheckers) { try { if (checker.checkAccess(acct, null, null, "SystemCapability")) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Root Access granted to " + acct + " by " + checker.getName()); + if (logger.isTraceEnabled()) { + logger.trace("Root Access granted to " + acct + " by " + checker.getName()); } return true; } @@ -420,8 +418,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M for (SecurityChecker checker : _securityCheckers) { try { if (checker.checkAccess(acct, null, null, "DomainCapability")) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("DomainAdmin Access granted to " + acct + " by " + checker.getName()); + if (logger.isTraceEnabled()) { + logger.trace("DomainAdmin Access granted to " + acct + " by " + checker.getName()); } return true; } @@ -451,8 +449,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M for (SecurityChecker checker : _securityCheckers) { try { if (checker.checkAccess(acct, null, null, "DomainResourceCapability")) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("ResourceDomainAdmin Access granted to " + acct + " by " + checker.getName()); + if (logger.isTraceEnabled()) { + logger.trace("ResourceDomainAdmin Access granted to " + acct + " by " + checker.getName()); } return true; } @@ -479,8 +477,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M public void checkAccess(Account caller, Domain domain) throws PermissionDeniedException { for (SecurityChecker checker : _securityCheckers) { if (checker.checkAccess(caller, domain)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Access granted to " + caller + " to " + domain + " by " + checker.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Access granted to " + caller + " to " + domain + " by " + checker.getName()); } return; } @@ -517,8 +515,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M if (caller.getId() == Account.ACCOUNT_ID_SYSTEM || isRootAdmin(caller.getId())) { // no need to make permission checks if the system/root admin makes the call - if (s_logger.isTraceEnabled()) { - s_logger.trace("No need to make permission check for System/RootAdmin account, returning true"); + if (logger.isTraceEnabled()) { + logger.trace("No need to make permission check for System/RootAdmin account, returning true"); } return; } @@ -545,8 +543,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M boolean granted = false; for (SecurityChecker checker : _securityCheckers) { if (checker.checkAccess(caller, entity, accessType, apiName)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Access to " + entity + " granted to " + caller + " by " + checker.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Access to " + entity + " granted to " + caller + " by " + checker.getName()); } granted = true; break; @@ -621,7 +619,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } }); } catch (Exception e) { - s_logger.error("Failed to update login attempts for user with id " + id); + logger.error("Failed to update login attempts for user with id " + id); } } @@ -652,12 +650,12 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M acctForUpdate.setState(State.locked); success = _accountDao.update(Long.valueOf(accountId), acctForUpdate); } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Attempting to lock a non-enabled account, current state is " + account.getState() + " (accountId: " + accountId + "), locking failed."); + if (logger.isInfoEnabled()) { + logger.info("Attempting to lock a non-enabled account, current state is " + account.getState() + " (accountId: " + accountId + "), locking failed."); } } } else { - s_logger.warn("Failed to lock account " + accountId + ", account not found."); + logger.warn("Failed to lock account " + accountId + ", account not found."); } return success; } @@ -668,12 +666,12 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // delete the account record if (!_accountDao.remove(accountId)) { - s_logger.error("Unable to delete account " + accountId); + logger.error("Unable to delete account " + accountId); return false; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Removed account " + accountId); + if (logger.isDebugEnabled()) { + logger.debug("Removed account " + accountId); } return cleanupAccount(account, callerUserId, caller); @@ -688,7 +686,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M List users = _userDao.listByAccount(accountId); for (UserVO user : users) { if (!_userDao.remove(user.getId())) { - s_logger.error("Unable to delete user: " + user + " as a part of account " + account + " cleanup"); + logger.error("Unable to delete user: " + user + " as a part of account " + account + " cleanup"); accountCleanupNeeded = true; } } @@ -711,7 +709,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M List groups = _vmGroupDao.listByAccountId(accountId); for (InstanceGroupVO group : groups) { if (!_vmMgr.deleteVmGroup(group.getId())) { - s_logger.error("Unable to delete group: " + group.getId()); + logger.error("Unable to delete group: " + group.getId()); accountCleanupNeeded = true; } } @@ -719,7 +717,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // Delete the snapshots dir for the account. Have to do this before destroying the VMs. boolean success = _snapMgr.deleteSnapshotDirsForAccount(accountId); if (success) { - s_logger.debug("Successfully deleted snapshots directories for all volumes under account " + accountId + " across all zones"); + logger.debug("Successfully deleted snapshots directories for all volumes under account " + accountId + " across all zones"); } // clean up templates @@ -730,14 +728,14 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M try { allTemplatesDeleted = _tmpltMgr.delete(callerUserId, template.getId(), null); } catch (Exception e) { - s_logger.warn("Failed to delete template while removing account: " + template.getName() + " due to: ", e); + logger.warn("Failed to delete template while removing account: " + template.getName() + " due to: ", e); allTemplatesDeleted = false; } } } if (!allTemplatesDeleted) { - s_logger.warn("Failed to delete templates while removing account id=" + accountId); + logger.warn("Failed to delete templates while removing account id=" + accountId); accountCleanupNeeded = true; } @@ -747,20 +745,20 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M try { _vmSnapshotMgr.deleteVMSnapshot(vmSnapshot.getId()); } catch (Exception e) { - s_logger.debug("Failed to cleanup vm snapshot " + vmSnapshot.getId() + " due to " + e.toString()); + logger.debug("Failed to cleanup vm snapshot " + vmSnapshot.getId() + " due to " + e.toString()); } } // Destroy the account's VMs List vms = _userVmDao.listByAccountId(accountId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Expunging # of vms (accountId=" + accountId + "): " + vms.size()); + if (logger.isDebugEnabled()) { + logger.debug("Expunging # of vms (accountId=" + accountId + "): " + vms.size()); } // no need to catch exception at this place as expunging vm should pass in order to perform further cleanup for (UserVmVO vm : vms) { if (!_vmMgr.expunge(vm, callerUserId, caller)) { - s_logger.error("Unable to expunge vm: " + vm.getId()); + logger.error("Unable to expunge vm: " + vm.getId()); accountCleanupNeeded = true; } } @@ -772,7 +770,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M try { volumeService.deleteVolume(volume.getId(), caller); } catch (Exception ex) { - s_logger.warn("Failed to cleanup volumes as a part of account id=" + accountId + " cleanup due to Exception: ", ex); + logger.warn("Failed to cleanup volumes as a part of account id=" + accountId + " cleanup due to Exception: ", ex); accountCleanupNeeded = true; } } @@ -791,21 +789,21 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M _remoteAccessVpnMgr.destroyRemoteAccessVpnForIp(vpn.getServerAddressId(), caller); } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup remote access vpn resources as a part of account id=" + accountId + " cleanup due to Exception: ", ex); + logger.warn("Failed to cleanup remote access vpn resources as a part of account id=" + accountId + " cleanup due to Exception: ", ex); accountCleanupNeeded = true; } // Cleanup security groups int numRemoved = _securityGroupDao.removeByAccountId(accountId); - s_logger.info("deleteAccount: Deleted " + numRemoved + " network groups for account " + accountId); + logger.info("deleteAccount: Deleted " + numRemoved + " network groups for account " + accountId); // Cleanup affinity groups int numAGRemoved = _affinityGroupDao.removeByAccountId(accountId); - s_logger.info("deleteAccount: Deleted " + numAGRemoved + " affinity groups for account " + accountId); + logger.info("deleteAccount: Deleted " + numAGRemoved + " affinity groups for account " + accountId); // Delete all the networks boolean networksDeleted = true; - s_logger.debug("Deleting networks for account " + account.getId()); + logger.debug("Deleting networks for account " + account.getId()); List networks = _networkDao.listByOwner(accountId); if (networks != null) { for (NetworkVO network : networks) { @@ -813,27 +811,27 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M ReservationContext context = new ReservationContextImpl(null, null, getActiveUser(callerUserId), caller); if (!_networkMgr.destroyNetwork(network.getId(), context, false)) { - s_logger.warn("Unable to destroy network " + network + " as a part of account id=" + accountId + " cleanup."); + logger.warn("Unable to destroy network " + network + " as a part of account id=" + accountId + " cleanup."); accountCleanupNeeded = true; networksDeleted = false; } else { - s_logger.debug("Network " + network.getId() + " successfully deleted as a part of account id=" + accountId + " cleanup."); + logger.debug("Network " + network.getId() + " successfully deleted as a part of account id=" + accountId + " cleanup."); } } } // Delete all VPCs boolean vpcsDeleted = true; - s_logger.debug("Deleting vpcs for account " + account.getId()); + logger.debug("Deleting vpcs for account " + account.getId()); List vpcs = _vpcMgr.getVpcsForAccount(account.getId()); for (Vpc vpc : vpcs) { if (!_vpcMgr.destroyVpc(vpc, caller, callerUserId)) { - s_logger.warn("Unable to destroy VPC " + vpc + " as a part of account id=" + accountId + " cleanup."); + logger.warn("Unable to destroy VPC " + vpc + " as a part of account id=" + accountId + " cleanup."); accountCleanupNeeded = true; vpcsDeleted = false; } else { - s_logger.debug("VPC " + vpc.getId() + " successfully deleted as a part of account id=" + accountId + " cleanup."); + logger.debug("VPC " + vpc.getId() + " successfully deleted as a part of account id=" + accountId + " cleanup."); } } @@ -841,9 +839,9 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // release ip addresses belonging to the account List ipsToRelease = _ipAddressDao.listByAccount(accountId); for (IpAddress ip : ipsToRelease) { - s_logger.debug("Releasing ip " + ip + " as a part of account id=" + accountId + " cleanup"); + logger.debug("Releasing ip " + ip + " as a part of account id=" + accountId + " cleanup"); if (!_ipAddrMgr.disassociatePublicIpAddress(ip.getId(), callerUserId, caller)) { - s_logger.warn("Failed to release ip address " + ip + logger.warn("Failed to release ip address " + ip + " as a part of account id=" + accountId + " clenaup"); accountCleanupNeeded = true; @@ -852,16 +850,16 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } // Delete Site 2 Site VPN customer gateway - s_logger.debug("Deleting site-to-site VPN customer gateways for account " + accountId); + logger.debug("Deleting site-to-site VPN customer gateways for account " + accountId); if (!_vpnMgr.deleteCustomerGatewayByAccount(accountId)) { - s_logger.warn("Fail to delete site-to-site VPN customer gateways for account " + accountId); + logger.warn("Fail to delete site-to-site VPN customer gateways for account " + accountId); } // Delete autoscale resources if any try { _autoscaleMgr.cleanUpAutoScaleResources(accountId); } catch (CloudRuntimeException ex) { - s_logger.warn("Failed to cleanup AutoScale resources as a part of account id=" + accountId + " cleanup due to exception:", ex); + logger.warn("Failed to cleanup AutoScale resources as a part of account id=" + accountId + " cleanup due to exception:", ex); accountCleanupNeeded = true; } @@ -871,7 +869,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M if (!_configMgr.releaseAccountSpecificVirtualRanges(accountId)) { accountCleanupNeeded = true; } else { - s_logger.debug("Account specific Virtual IP ranges " + " are successfully released as a part of account id=" + accountId + " cleanup."); + logger.debug("Account specific Virtual IP ranges " + " are successfully released as a part of account id=" + accountId + " cleanup."); } } @@ -881,14 +879,14 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M _dataCenterVnetDao.releaseDedicatedGuestVlans(map.getId()); } int vlansReleased = _accountGuestVlanMapDao.removeByAccountId(accountId); - s_logger.info("deleteAccount: Released " + vlansReleased + " dedicated guest vlan ranges from account " + accountId); + logger.info("deleteAccount: Released " + vlansReleased + " dedicated guest vlan ranges from account " + accountId); // release account specific acquired portable IP's. Since all the portable IP's must have been already // disassociated with VPC/guest network (due to deletion), so just mark portable IP as free. List ipsToRelease = _ipAddressDao.listByAccount(accountId); for (IpAddress ip : ipsToRelease) { if (ip.isPortable()) { - s_logger.debug("Releasing portable ip " + ip + " as a part of account id=" + accountId + " cleanup"); + logger.debug("Releasing portable ip " + ip + " as a part of account id=" + accountId + " cleanup"); _ipAddrMgr.releasePortableIpAddress(ip.getId()); } } @@ -896,10 +894,10 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // release dedication if any List dedicatedResources = _dedicatedDao.listByAccountId(accountId); if (dedicatedResources != null && !dedicatedResources.isEmpty()) { - s_logger.debug("Releasing dedicated resources for account " + accountId); + logger.debug("Releasing dedicated resources for account " + accountId); for (DedicatedResourceVO dr : dedicatedResources) { if (!_dedicatedDao.remove(dr.getId())) { - s_logger.warn("Fail to release dedicated resources for account " + accountId); + logger.warn("Fail to release dedicated resources for account " + accountId); } } } @@ -917,11 +915,11 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M _resourceLimitDao.removeEntriesByOwner(accountId, ResourceOwnerType.Account); return true; } catch (Exception ex) { - s_logger.warn("Failed to cleanup account " + account + " due to ", ex); + logger.warn("Failed to cleanup account " + account + " due to ", ex); accountCleanupNeeded = true; return true; } finally { - s_logger.info("Cleanup for account " + account.getId() + (accountCleanupNeeded ? " is needed." : " is not needed.")); + logger.info("Cleanup for account " + account.getId() + (accountCleanupNeeded ? " is needed." : " is not needed.")); if (accountCleanupNeeded) { _accountDao.markForCleanup(accountId); } else { @@ -935,8 +933,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M public boolean disableAccount(long accountId) throws ConcurrentOperationException, ResourceUnavailableException { boolean success = false; if (accountId <= 2) { - if (s_logger.isInfoEnabled()) { - s_logger.info("disableAccount -- invalid account id: " + accountId); + if (logger.isInfoEnabled()) { + logger.info("disableAccount -- invalid account id: " + accountId); } return false; } @@ -955,7 +953,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M disableAccountResult = doDisableAccount(accountId); } finally { if (!disableAccountResult) { - s_logger.warn("Failed to disable account " + account + " resources as a part of disableAccount call, marking the account for cleanup"); + logger.warn("Failed to disable account " + account + " resources as a part of disableAccount call, marking the account for cleanup"); _accountDao.markForCleanup(accountId); } else { acctForUpdate = _accountDao.createForUpdate(); @@ -976,13 +974,13 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M try { _itMgr.advanceStop(vm.getUuid(), false); } catch (OperationTimedoutException ote) { - s_logger.warn( + logger.warn( "Operation for stopping vm timed out, unable to stop vm " + vm.getHostName(), ote); success = false; } } catch (AgentUnavailableException aue) { - s_logger.warn("Agent running on host " + vm.getHostId() + " is unavailable, unable to stop vm " + vm.getHostName(), aue); + logger.warn("Agent running on host " + vm.getHostId() + " is unavailable, unable to stop vm " + vm.getHostName(), aue); success = false; } } @@ -1233,8 +1231,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M user.setSecretKey(secretKey); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("updating user with id: " + userId); + if (logger.isDebugEnabled()) { + logger.debug("updating user with id: " + userId); } try { // check if the apiKey and secretKey are globally unique @@ -1253,7 +1251,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M _userDao.update(userId, user); } catch (Throwable th) { - s_logger.error("error updating user", th); + logger.error("error updating user", th); throw new CloudRuntimeException("Unable to update user " + userId); } @@ -1421,8 +1419,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M success = (success && lockAccount(user.getAccountId())); } } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Attempting to lock a non-enabled user, current state is " + user.getState() + " (userId: " + user.getId() + "), locking failed."); + if (logger.isInfoEnabled()) { + logger.info("Attempting to lock a non-enabled user, current state is " + user.getState() + " (userId: " + user.getId() + "), locking failed."); } success = false; } @@ -1451,7 +1449,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M if (account == null || account.getRemoved() != null) { if (account != null) { - s_logger.info("The account:" + account.getAccountName() + " is already removed"); + logger.info("The account:" + account.getAccountName() + " is already removed"); } return true; } @@ -1601,7 +1599,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // Check if account exists if (account == null || account.getType() == Account.ACCOUNT_TYPE_PROJECT) { - s_logger.error("Unable to find account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); + logger.error("Unable to find account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); throw new InvalidParameterValueException("Unable to find account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); } @@ -1700,39 +1698,39 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M try { GlobalLock lock = GlobalLock.getInternLock("AccountCleanup"); if (lock == null) { - s_logger.debug("Couldn't get the global lock"); + logger.debug("Couldn't get the global lock"); return; } if (!lock.lock(30)) { - s_logger.debug("Couldn't lock the db"); + logger.debug("Couldn't lock the db"); return; } try { // Cleanup removed accounts List removedAccounts = _accountDao.findCleanupsForRemovedAccounts(null); - s_logger.info("Found " + removedAccounts.size() + " removed accounts to cleanup"); + logger.info("Found " + removedAccounts.size() + " removed accounts to cleanup"); for (AccountVO account : removedAccounts) { - s_logger.debug("Cleaning up " + account.getId()); + logger.debug("Cleaning up " + account.getId()); cleanupAccount(account, getSystemUser().getId(), getSystemAccount()); } // cleanup disabled accounts List disabledAccounts = _accountDao.findCleanupsForDisabledAccounts(); - s_logger.info("Found " + disabledAccounts.size() + " disabled accounts to cleanup"); + logger.info("Found " + disabledAccounts.size() + " disabled accounts to cleanup"); for (AccountVO account : disabledAccounts) { - s_logger.debug("Disabling account " + account.getId()); + logger.debug("Disabling account " + account.getId()); try { disableAccount(account.getId()); } catch (Exception e) { - s_logger.error("Skipping due to error on account " + account.getId(), e); + logger.error("Skipping due to error on account " + account.getId(), e); } } // cleanup inactive domains List inactiveDomains = _domainMgr.findInactiveDomains(); - s_logger.info("Found " + inactiveDomains.size() + " inactive domains to cleanup"); + logger.info("Found " + inactiveDomains.size() + " inactive domains to cleanup"); for (Domain inactiveDomain : inactiveDomains) { long domainId = inactiveDomain.getId(); try { @@ -1741,47 +1739,47 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // release dedication if any, before deleting the domain List dedicatedResources = _dedicatedDao.listByDomainId(domainId); if (dedicatedResources != null && !dedicatedResources.isEmpty()) { - s_logger.debug("Releasing dedicated resources for domain" + domainId); + logger.debug("Releasing dedicated resources for domain" + domainId); for (DedicatedResourceVO dr : dedicatedResources) { if (!_dedicatedDao.remove(dr.getId())) { - s_logger.warn("Fail to release dedicated resources for domain " + domainId); + logger.warn("Fail to release dedicated resources for domain " + domainId); } } } - s_logger.debug("Removing inactive domain id=" + domainId); + logger.debug("Removing inactive domain id=" + domainId); _domainMgr.removeDomain(domainId); } else { - s_logger.debug("Can't remove inactive domain id=" + domainId + " as it has accounts that need cleanup"); + logger.debug("Can't remove inactive domain id=" + domainId + " as it has accounts that need cleanup"); } } catch (Exception e) { - s_logger.error("Skipping due to error on domain " + domainId, e); + logger.error("Skipping due to error on domain " + domainId, e); } } // cleanup inactive projects List inactiveProjects = _projectDao.listByState(Project.State.Disabled); - s_logger.info("Found " + inactiveProjects.size() + " disabled projects to cleanup"); + logger.info("Found " + inactiveProjects.size() + " disabled projects to cleanup"); for (ProjectVO project : inactiveProjects) { try { Account projectAccount = getAccount(project.getProjectAccountId()); if (projectAccount == null) { - s_logger.debug("Removing inactive project id=" + project.getId()); + logger.debug("Removing inactive project id=" + project.getId()); _projectMgr.deleteProject(CallContext.current().getCallingAccount(), CallContext.current().getCallingUserId(), project); } else { - s_logger.debug("Can't remove disabled project " + project + " as it has non removed account id=" + project.getId()); + logger.debug("Can't remove disabled project " + project + " as it has non removed account id=" + project.getId()); } } catch (Exception e) { - s_logger.error("Skipping due to error on project " + project, e); + logger.error("Skipping due to error on project " + project, e); } } } catch (Exception e) { - s_logger.error("Exception ", e); + logger.error("Exception ", e); } finally { lock.unlock(); } } catch (Exception e) { - s_logger.error("Exception ", e); + logger.error("Exception ", e); } } } @@ -1982,8 +1980,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M protected UserVO createUser(long accountId, String userName, String password, String firstName, String lastName, String email, String timezone, String userUUID, User.Source source) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating user: " + userName + ", accountId: " + accountId + " timezone:" + timezone); + if (logger.isDebugEnabled()) { + logger.debug("Creating user: " + userName + ", accountId: " + accountId + " timezone:" + timezone); } String encodedPassword = null; @@ -2065,14 +2063,14 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M timestamp = Long.parseLong(timestampStr); long currentTime = System.currentTimeMillis(); if (Math.abs(currentTime - timestamp) > tolerance) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Expired timestamp passed in to login, current time = " + currentTime + ", timestamp = " + timestamp); + if (logger.isDebugEnabled()) { + logger.debug("Expired timestamp passed in to login, current time = " + currentTime + ", timestamp = " + timestamp); } return null; } } catch (NumberFormatException nfe) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Invalid timestamp passed in to login: " + timestampStr); + if (logger.isDebugEnabled()) { + logger.debug("Invalid timestamp passed in to login: " + timestampStr); } return null; } @@ -2086,8 +2084,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } if ((signature == null) || (timestamp == 0L)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Missing parameters in login request, signature = " + signature + ", timestamp = " + timestamp); + if (logger.isDebugEnabled()) { + logger.debug("Missing parameters in login request, signature = " + signature + ", timestamp = " + timestamp); } return null; } @@ -2102,12 +2100,12 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M String computedSignature = new String(Base64.encodeBase64(encryptedBytes)); boolean equalSig = ConstantTimeComparator.compareStrings(signature, computedSignature); if (!equalSig) { - s_logger.info("User signature: " + signature + " is not equaled to computed signature: " + computedSignature); + logger.info("User signature: " + signature + " is not equaled to computed signature: " + computedSignature); } else { user = _userAccountDao.getUserAccount(username, domainId); } } catch (Exception ex) { - s_logger.error("Exception authenticating user", ex); + logger.error("Exception authenticating user", ex); return null; } } @@ -2115,17 +2113,17 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M if (user != null) { // don't allow to authenticate system user if (user.getId() == User.UID_SYSTEM) { - s_logger.error("Failed to authenticate user: " + username + " in domain " + domainId); + logger.error("Failed to authenticate user: " + username + " in domain " + domainId); return null; } // don't allow baremetal system user if (BaremetalUtils.BAREMETAL_SYSTEM_ACCOUNT_NAME.equals(user.getUsername())) { - s_logger.error("Won't authenticate user: " + username + " in domain " + domainId); + logger.error("Won't authenticate user: " + username + " in domain " + domainId); return null; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("User: " + username + " in domain " + domainId + " has successfully logged in"); + if (logger.isDebugEnabled()) { + logger.debug("User: " + username + " in domain " + domainId + " has successfully logged in"); } ActionEventUtils.onActionEvent(user.getId(), user.getAccountId(), user.getDomainId(), EventTypes.EVENT_USER_LOGIN, "user has logged in from IP Address " + @@ -2133,20 +2131,20 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M return user; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("User: " + username + " in domain " + domainId + " has failed to log in"); + if (logger.isDebugEnabled()) { + logger.debug("User: " + username + " in domain " + domainId + " has failed to log in"); } return null; } } private UserAccount getUserAccount(String username, String password, Long domainId, Map requestParameters) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Attempting to log in user: " + username + " in domain " + domainId); + if (logger.isDebugEnabled()) { + logger.debug("Attempting to log in user: " + username + " in domain " + domainId); } UserAccount userAccount = _userAccountDao.getUserAccount(username, domainId); if (userAccount == null) { - s_logger.warn("Unable to find an user with username " + username + " in domain " + domainId); + logger.warn("Unable to find an user with username " + username + " in domain " + domainId); return null; } @@ -2180,8 +2178,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M if (!userAccount.getState().equalsIgnoreCase(Account.State.enabled.toString()) || !userAccount.getAccountState().equalsIgnoreCase(Account.State.enabled.toString())) { - if (s_logger.isInfoEnabled()) { - s_logger.info("User " + username + " in domain " + domainName + " is disabled/locked (or account is disabled/locked)"); + if (logger.isInfoEnabled()) { + logger.info("User " + username + " in domain " + domainName + " is disabled/locked (or account is disabled/locked)"); } throw new CloudAuthenticationException("User " + username + " in domain " + domainName + " is disabled/locked (or account is disabled/locked)"); // return null; @@ -2192,8 +2190,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M return userAccount; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to authenticate user with username " + username + " in domain " + domainId); + if (logger.isDebugEnabled()) { + logger.debug("Unable to authenticate user with username " + username + " in domain " + domainId); } if (userAccount.getState().equalsIgnoreCase(Account.State.enabled.toString())) { @@ -2203,15 +2201,15 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M if (updateIncorrectLoginCount) { if (attemptsMade < _allowedLoginAttempts) { updateLoginAttempts(userAccount.getId(), attemptsMade, false); - s_logger.warn("Login attempt failed. You have " + (_allowedLoginAttempts - attemptsMade) + " attempt(s) remaining"); + logger.warn("Login attempt failed. You have " + (_allowedLoginAttempts - attemptsMade) + " attempt(s) remaining"); } else { updateLoginAttempts(userAccount.getId(), _allowedLoginAttempts, true); - s_logger.warn("User " + userAccount.getUsername() + " has been disabled due to multiple failed login attempts." + " Please contact admin."); + logger.warn("User " + userAccount.getUsername() + " has been disabled due to multiple failed login attempts." + " Please contact admin."); } } } } else { - s_logger.info("User " + userAccount.getUsername() + " is disabled/locked"); + logger.info("User " + userAccount.getUsername() + " is disabled/locked"); } return null; } @@ -2297,7 +2295,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M _userDao.update(userId, updatedUser); return encodedKey; } catch (NoSuchAlgorithmException ex) { - s_logger.error("error generating secret key for user id=" + userId, ex); + logger.error("error generating secret key for user id=" + userId, ex); } return null; } @@ -2324,7 +2322,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M _userDao.update(userId, updatedUser); return encodedKey; } catch (NoSuchAlgorithmException ex) { - s_logger.error("error generating secret key for user id=" + userId, ex); + logger.error("error generating secret key for user id=" + userId, ex); } return null; } @@ -2692,8 +2690,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M throws PermissionDeniedException { for (SecurityChecker checker : _securityCheckers) { if (checker.checkAccess(account, so)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Access granted to " + account + " to " + so + " by " + checker.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Access granted to " + account + " to " + so + " by " + checker.getName()); } return; } @@ -2708,8 +2706,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M throws PermissionDeniedException { for (SecurityChecker checker : _securityCheckers) { if (checker.checkAccess(account, dof)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Access granted to " + account + " to " + dof + " by " + checker.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Access granted to " + account + " to " + dof + " by " + checker.getName()); } return; } diff --git a/server/src/com/cloud/user/DomainManagerImpl.java b/server/src/com/cloud/user/DomainManagerImpl.java index fbbe0c28704..91271a39cc4 100644 --- a/server/src/com/cloud/user/DomainManagerImpl.java +++ b/server/src/com/cloud/user/DomainManagerImpl.java @@ -24,7 +24,6 @@ import java.util.UUID; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.admin.domain.ListDomainChildrenCmd; @@ -78,7 +77,6 @@ import com.cloud.vm.ReservationContextImpl; @Component @Local(value = {DomainManager.class, DomainService.class}) public class DomainManagerImpl extends ManagerBase implements DomainManager, DomainService { - public static final Logger s_logger = Logger.getLogger(DomainManagerImpl.class); @Inject private DomainDao _domainDao; @@ -256,7 +254,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom @Override public boolean deleteDomain(DomainVO domain, Boolean cleanup) { // mark domain as inactive - s_logger.debug("Marking domain id=" + domain.getId() + " as " + Domain.State.Inactive + " before actually deleting it"); + logger.debug("Marking domain id=" + domain.getId() + " as " + Domain.State.Inactive + " before actually deleting it"); domain.setState(Domain.State.Inactive); _domainDao.update(domain.getId(), domain); boolean rollBackState = false; @@ -279,7 +277,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom List accountsForCleanup = _accountDao.findCleanupsForRemovedAccounts(domain.getId()); List dedicatedResources = _dedicatedDao.listByDomainId(domain.getId()); if (dedicatedResources != null && !dedicatedResources.isEmpty()) { - s_logger.error("There are dedicated resources for the domain " + domain.getId()); + logger.error("There are dedicated resources for the domain " + domain.getId()); hasDedicatedResources = true; } if (accountsForCleanup.isEmpty() && networkIds.isEmpty() && !hasDedicatedResources) { @@ -312,7 +310,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom CallContext.current().putContextParameter(Domain.class, domain.getUuid()); return true; } catch (Exception ex) { - s_logger.error("Exception deleting domain with id " + domain.getId(), ex); + logger.error("Exception deleting domain with id " + domain.getId(), ex); if (ex instanceof CloudRuntimeException) throw (CloudRuntimeException)ex; else @@ -320,7 +318,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom } finally { //when success is false if (rollBackState) { - s_logger.debug("Changing domain id=" + domain.getId() + " state back to " + Domain.State.Active + + logger.debug("Changing domain id=" + domain.getId() + " state back to " + Domain.State.Active + " because it can't be removed due to resources referencing to it"); domain.setState(Domain.State.Active); _domainDao.update(domain.getId(), domain); @@ -342,7 +340,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom } private boolean cleanupDomain(Long domainId, Long ownerId) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("Cleaning up domain id=" + domainId); + logger.debug("Cleaning up domain id=" + domainId); boolean success = true; { DomainVO domainHandle = _domainDao.findById(domainId); @@ -367,7 +365,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom for (DomainVO domain : domains) { success = (success && cleanupDomain(domain.getId(), domain.getAccountId())); if (!success) { - s_logger.warn("Failed to cleanup domain id=" + domain.getId()); + logger.warn("Failed to cleanup domain id=" + domain.getId()); } } } @@ -378,18 +376,18 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom List accounts = _accountDao.search(sc, null); for (AccountVO account : accounts) { if (account.getType() != Account.ACCOUNT_TYPE_PROJECT) { - s_logger.debug("Deleting account " + account + " as a part of domain id=" + domainId + " cleanup"); + logger.debug("Deleting account " + account + " as a part of domain id=" + domainId + " cleanup"); boolean deleteAccount = _accountMgr.deleteAccount(account, CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount()); if (!deleteAccount) { - s_logger.warn("Failed to cleanup account id=" + account.getId() + " as a part of domain cleanup"); + logger.warn("Failed to cleanup account id=" + account.getId() + " as a part of domain cleanup"); } success = (success && deleteAccount); } else { ProjectVO project = _projectDao.findByProjectAccountId(account.getId()); - s_logger.debug("Deleting project " + project + " as a part of domain id=" + domainId + " cleanup"); + logger.debug("Deleting project " + project + " as a part of domain id=" + domainId + " cleanup"); boolean deleteProject = _projectMgr.deleteProject(CallContext.current().getCallingAccount(), CallContext.current().getCallingUserId(), project); if (!deleteProject) { - s_logger.warn("Failed to cleanup project " + project + " as a part of domain cleanup"); + logger.warn("Failed to cleanup project " + project + " as a part of domain cleanup"); } success = (success && deleteProject); } @@ -397,23 +395,23 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom //delete the domain shared networks boolean networksDeleted = true; - s_logger.debug("Deleting networks for domain id=" + domainId); + logger.debug("Deleting networks for domain id=" + domainId); List networkIds = _networkDomainDao.listNetworkIdsByDomain(domainId); CallContext ctx = CallContext.current(); ReservationContext context = new ReservationContextImpl(null, null, _accountMgr.getActiveUser(ctx.getCallingUserId()), ctx.getCallingAccount()); for (Long networkId : networkIds) { - s_logger.debug("Deleting network id=" + networkId + " as a part of domain id=" + domainId + " cleanup"); + logger.debug("Deleting network id=" + networkId + " as a part of domain id=" + domainId + " cleanup"); if (!_networkMgr.destroyNetwork(networkId, context, false)) { - s_logger.warn("Unable to destroy network id=" + networkId + " as a part of domain id=" + domainId + " cleanup."); + logger.warn("Unable to destroy network id=" + networkId + " as a part of domain id=" + domainId + " cleanup."); networksDeleted = false; } else { - s_logger.debug("Network " + networkId + " successfully deleted as a part of domain id=" + domainId + " cleanup."); + logger.debug("Network " + networkId + " successfully deleted as a part of domain id=" + domainId + " cleanup."); } } //don't proceed if networks failed to cleanup. The cleanup will be performed for inactive domain once again if (!networksDeleted) { - s_logger.debug("Failed to delete the shared networks as a part of domain id=" + domainId + " clenaup"); + logger.debug("Failed to delete the shared networks as a part of domain id=" + domainId + " clenaup"); return false; } @@ -424,10 +422,10 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom //release dedication if any, before deleting the domain List dedicatedResources = _dedicatedDao.listByDomainId(domainId); if (dedicatedResources != null && !dedicatedResources.isEmpty()) { - s_logger.debug("Releasing dedicated resources for domain" + domainId); + logger.debug("Releasing dedicated resources for domain" + domainId); for (DedicatedResourceVO dr : dedicatedResources) { if (!_dedicatedDao.remove(dr.getId())) { - s_logger.warn("Fail to release dedicated resources for domain " + domainId); + logger.warn("Fail to release dedicated resources for domain " + domainId); return false; } } @@ -439,7 +437,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom _resourceCountDao.removeEntriesByOwner(domainId, ResourceOwnerType.Domain); _resourceLimitDao.removeEntriesByOwner(domainId, ResourceOwnerType.Domain); } else { - s_logger.debug("Can't delete the domain yet because it has " + accountsForCleanup.size() + "accounts that need a cleanup"); + logger.debug("Can't delete the domain yet because it has " + accountsForCleanup.size() + "accounts that need a cleanup"); return false; } diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index b23f984b242..1433050395c 100644 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -89,7 +89,6 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -286,7 +285,6 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDao; @Local(value = {UserVmManager.class, UserVmService.class}) public class UserVmManagerImpl extends ManagerBase implements UserVmManager, VirtualMachineGuru, UserVmService, Configurable { - private static final Logger s_logger = Logger.getLogger(UserVmManagerImpl.class); private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 3; // 3 @@ -609,7 +607,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir boolean decrementCount = true; try { - s_logger.debug("Trying for vm "+ vmId +" nic Id "+nicId +" ip retrieval ..."); + logger.debug("Trying for vm "+ vmId +" nic Id "+nicId +" ip retrieval ..."); Answer answer = _agentMgr.send(hostId, cmd); NicVO nic = _nicDao.findById(nicId); if (answer.getResult()) { @@ -620,7 +618,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (nic != null) { nic.setIPv4Address(vmIp); _nicDao.update(nicId, nic); - s_logger.debug("Vm "+ vmId +" IP "+vmIp +" got retrieved successfully"); + logger.debug("Vm "+ vmId +" IP "+vmIp +" got retrieved successfully"); vmIdCountMap.remove(nicId); decrementCount = false; ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, @@ -636,18 +634,18 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir _nicDao.update(nicId, nic); } if (answer.getDetails() != null) { - s_logger.debug("Failed to get vm ip for Vm "+ vmId + answer.getDetails()); + logger.debug("Failed to get vm ip for Vm "+ vmId + answer.getDetails()); } } } catch (OperationTimedoutException e) { - s_logger.warn("Timed Out", e); + logger.warn("Timed Out", e); } catch (AgentUnavailableException e) { - s_logger.warn("Agent Unavailable ", e); + logger.warn("Agent Unavailable ", e); } finally { if (decrementCount) { VmAndCountDetails vmAndCount = vmIdCountMap.get(nicId); vmAndCount.decrementCount(); - s_logger.debug("Ip is not retrieved for VM " + vmId +" nic "+nicId + " ... decremented count to "+vmAndCount.getRetrievalCount()); + logger.debug("Ip is not retrieved for VM " + vmId +" nic "+nicId + " ... decremented count to "+vmAndCount.getRetrievalCount()); vmIdCountMap.put(nicId, vmAndCount); } } @@ -678,7 +676,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (userVm.getState() == State.Error || userVm.getState() == State.Expunging) { - s_logger.error("vm is not in the right state: " + vmId); + logger.error("vm is not in the right state: " + vmId); throw new InvalidParameterValueException("Vm with id " + vmId + " is not in the right state"); } @@ -711,7 +709,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (template.getEnablePassword()) { Nic defaultNic = _networkModel.getDefaultNic(vmId); if (defaultNic == null) { - s_logger.error("Unable to reset password for vm " + vmInstance + " as the instance doesn't have default nic"); + logger.error("Unable to reset password for vm " + vmInstance + " as the instance doesn't have default nic"); return false; } @@ -731,25 +729,25 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // Need to reboot the virtual machine so that the password gets // redownloaded from the DomR, and reset on the VM if (!result) { - s_logger.debug("Failed to reset password for the virutal machine; no need to reboot the vm"); + logger.debug("Failed to reset password for the virutal machine; no need to reboot the vm"); return false; } else { if (vmInstance.getState() == State.Stopped) { - s_logger.debug("Vm " + vmInstance + " is stopped, not rebooting it as a part of password reset"); + logger.debug("Vm " + vmInstance + " is stopped, not rebooting it as a part of password reset"); return true; } if (rebootVirtualMachine(userId, vmId) == null) { - s_logger.warn("Failed to reboot the vm " + vmInstance); + logger.warn("Failed to reboot the vm " + vmInstance); return false; } else { - s_logger.debug("Vm " + vmInstance + " is rebooted successfully as a part of password reset"); + logger.debug("Vm " + vmInstance + " is rebooted successfully as a part of password reset"); return true; } } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Reset password called for a vm that is not using a password enabled template"); + if (logger.isDebugEnabled()) { + logger.debug("Reset password called for a vm that is not using a password enabled template"); } return false; } @@ -774,11 +772,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // Do parameters input validation if (userVm.getState() == State.Error || userVm.getState() == State.Expunging) { - s_logger.error("vm is not in the right state: " + vmId); + logger.error("vm is not in the right state: " + vmId); throw new InvalidParameterValueException("Vm with specified id is not in the right state"); } if (userVm.getState() != State.Stopped) { - s_logger.error("vm is not in the right state: " + vmId); + logger.error("vm is not in the right state: " + vmId); throw new InvalidParameterValueException("Vm " + userVm + " should be stopped to do SSH Key reset"); } @@ -818,7 +816,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir VMTemplateVO template = _templateDao.findByIdIncludingRemoved(vmInstance.getTemplateId()); Nic defaultNic = _networkModel.getDefaultNic(vmId); if (defaultNic == null) { - s_logger.error("Unable to reset SSH Key for vm " + vmInstance + " as the instance doesn't have default nic"); + logger.error("Unable to reset SSH Key for vm " + vmInstance + " as the instance doesn't have default nic"); return false; } @@ -840,18 +838,18 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // Need to reboot the virtual machine so that the password gets redownloaded from the DomR, and reset on the VM if (!result) { - s_logger.debug("Failed to reset SSH Key for the virutal machine; no need to reboot the vm"); + logger.debug("Failed to reset SSH Key for the virutal machine; no need to reboot the vm"); return false; } else { if (vmInstance.getState() == State.Stopped) { - s_logger.debug("Vm " + vmInstance + " is stopped, not rebooting it as a part of SSH Key reset"); + logger.debug("Vm " + vmInstance + " is stopped, not rebooting it as a part of SSH Key reset"); return true; } if (rebootVirtualMachine(userId, vmId) == null) { - s_logger.warn("Failed to reboot the vm " + vmInstance); + logger.warn("Failed to reboot the vm " + vmInstance); return false; } else { - s_logger.debug("Vm " + vmInstance + " is rebooted successfully as a part of SSH Key reset"); + logger.debug("Vm " + vmInstance + " is rebooted successfully as a part of SSH Key reset"); return true; } } @@ -860,13 +858,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @Override public boolean stopVirtualMachine(long userId, long vmId) { boolean status = false; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Stopping vm=" + vmId); + if (logger.isDebugEnabled()) { + logger.debug("Stopping vm=" + vmId); } UserVmVO vm = _vmDao.findById(vmId); if (vm == null || vm.getRemoved() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is either removed or deleted."); + if (logger.isDebugEnabled()) { + logger.debug("VM is either removed or deleted."); } return true; } @@ -876,7 +874,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir VirtualMachineEntity vmEntity = _orchSrvc.getVirtualMachine(vm.getUuid()); status = vmEntity.stop(Long.toString(userId)); } catch (ResourceUnavailableException e) { - s_logger.debug("Unable to stop due to ", e); + logger.debug("Unable to stop due to ", e); status = false; } catch (CloudException e) { throw new CloudRuntimeException("Unable to contact the agent to stop the virtual machine " + vm, e); @@ -888,7 +886,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir UserVmVO vm = _vmDao.findById(vmId); if (vm == null || vm.getState() == State.Destroyed || vm.getState() == State.Expunging || vm.getRemoved() != null) { - s_logger.warn("Vm id=" + vmId + " doesn't exist"); + logger.warn("Vm id=" + vmId + " doesn't exist"); return null; } @@ -909,7 +907,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir //Safe to start the stopped router serially, this is consistent with the way how multiple networks are added to vm during deploy //and routers are started serially ,may revisit to make this process parallel for(DomainRouterVO routerToStart : routers) { - s_logger.warn("Trying to start router " + routerToStart.getInstanceName() + " as part of vm: " + vm.getInstanceName() + " reboot"); + logger.warn("Trying to start router " + routerToStart.getInstanceName() + " as part of vm: " + vm.getInstanceName() + " reboot"); _virtualNetAppliance.startRouter(routerToStart.getId(),true); } } @@ -918,12 +916,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } catch (Exception ex){ throw new CloudRuntimeException("Router start failed due to" + ex); }finally { - s_logger.info("Rebooting vm " + vm.getInstanceName()); + logger.info("Rebooting vm " + vm.getInstanceName()); _itMgr.reboot(vm.getUuid(), null); } return _vmDao.findById(vmId); } else { - s_logger.error("Vm id=" + vmId + " is not in Running state, failed to reboot"); + logger.error("Vm id=" + vmId + " is not in Running state, failed to reboot"); return null; } } @@ -1213,7 +1211,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new CloudRuntimeException("Unable to add NIC to " + vmInstance); } CallContext.current().putContextParameter(Nic.class.getName(), guestNic.getUuid()); - s_logger.debug("Successful addition of " + network + " from " + vmInstance); + logger.debug("Successful addition of " + network + " from " + vmInstance); return _vmDao.findById(vmInstance.getId()); } @@ -1285,7 +1283,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new CloudRuntimeException("Unable to remove " + network + " from " + vmInstance); } - s_logger.debug("Successful removal of " + network + " from " + vmInstance); + logger.debug("Successful removal of " + network + " from " + vmInstance); return _vmDao.findById(vmInstance.getId()); } @@ -1350,7 +1348,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (existing == null) { - s_logger.warn("Failed to update default nic, no nic profile found for existing default network"); + logger.warn("Failed to update default nic, no nic profile found for existing default network"); throw new CloudRuntimeException("Failed to find a nic profile for the existing default network. This is bad and probably means some sort of configuration corruption"); } @@ -1392,7 +1390,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } throw new CloudRuntimeException("Failed to change default nic to " + nic + " and now we have no default"); } else if (newdefault.getId() == nic.getNetworkId()) { - s_logger.debug("successfully set default network to " + network + " for " + vmInstance); + logger.debug("successfully set default network to " + network + " for " + vmInstance); String nicIdString = Long.toString(nic.getId()); long newNetworkOfferingId = network.getNetworkOfferingId(); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_REMOVE, vmInstance.getAccountId(), vmInstance.getDataCenterId(), vmInstance.getId(), @@ -1453,13 +1451,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir Answer answer = _agentMgr.easySend(hostId, new GetVmDiskStatsCommand(vmNames, _hostDao.findById(hostId).getGuid(), hostName)); if (answer == null || !answer.getResult()) { - s_logger.warn("Unable to obtain VM disk statistics."); + logger.warn("Unable to obtain VM disk statistics."); return null; } else { HashMap> vmDiskStatsByName = ((GetVmDiskStatsAnswer)answer).getVmDiskStatsMap(); if (vmDiskStatsByName == null) { - s_logger.warn("Unable to obtain VM disk statistics."); + logger.warn("Unable to obtain VM disk statistics."); return null; } @@ -1501,7 +1499,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir Account caller = CallContext.current().getCallingAccount(); VMInstanceVO vmInstance = _vmInstanceDao.findById(vmId); if (vmInstance.getHypervisorType() != HypervisorType.XenServer && vmInstance.getHypervisorType() != HypervisorType.VMware) { - s_logger.info("Scaling the VM dynamically is not supported for VMs running on Hypervisor "+vmInstance.getHypervisorType()); + logger.info("Scaling the VM dynamically is not supported for VMs running on Hypervisor "+vmInstance.getHypervisorType()); throw new InvalidParameterValueException("Scaling the VM dynamically is not supported for VMs running on Hypervisor "+vmInstance.getHypervisorType()); } @@ -1607,13 +1605,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } return success; } catch (InsufficientCapacityException e) { - s_logger.warn("Received exception while scaling ", e); + logger.warn("Received exception while scaling ", e); } catch (ResourceUnavailableException e) { - s_logger.warn("Received exception while scaling ", e); + logger.warn("Received exception while scaling ", e); } catch (ConcurrentOperationException e) { - s_logger.warn("Received exception while scaling ", e); + logger.warn("Received exception while scaling ", e); } catch (Exception e) { - s_logger.warn("Received exception while scaling ", e); + logger.warn("Received exception while scaling ", e); } finally { if (!success) { _itMgr.upgradeVmDb(vmId, currentServiceOffering.getId()); // rollback @@ -1681,13 +1679,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir Answer answer = _agentMgr.easySend(hostId, new GetVmStatsCommand(vmNames, _hostDao.findById(hostId).getGuid(), hostName)); if (answer == null || !answer.getResult()) { - s_logger.warn("Unable to obtain VM statistics."); + logger.warn("Unable to obtain VM statistics."); return null; } else { HashMap vmStatsByName = ((GetVmStatsAnswer)answer).getVmStatsMap(); if (vmStatsByName == null) { - s_logger.warn("Unable to obtain VM statistics."); + logger.warn("Unable to obtain VM statistics."); return null; } @@ -1720,21 +1718,21 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (vm.getRemoved() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find vm or vm is removed: " + vmId); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find vm or vm is removed: " + vmId); } throw new InvalidParameterValueException("Unable to find vm by id " + vmId); } if (vm.getState() != State.Destroyed) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("vm is not in the right state: " + vmId); + if (logger.isDebugEnabled()) { + logger.debug("vm is not in the right state: " + vmId); } throw new InvalidParameterValueException("Vm with id " + vmId + " is not in the right state"); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Recovering vm " + vmId); + if (logger.isDebugEnabled()) { + logger.debug("Recovering vm " + vmId); } Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { @@ -1759,7 +1757,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir try { if (!_itMgr.stateTransitTo(vm, VirtualMachine.Event.RecoveryRequested, null)) { - s_logger.debug("Unable to recover the vm because it is not in the correct state: " + vmId); + logger.debug("Unable to recover the vm because it is not in the correct state: " + vmId); throw new InvalidParameterValueException("Unable to recover the vm because it is not in the correct state: " + vmId); } } catch (NoTransitionException e) { @@ -1840,7 +1838,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir _scaleRetry = NumbersUtil.parseInt(configs.get(Config.ScaleRetry.key()), 2); - s_logger.info("User VM Manager is configured."); + logger.info("User VM Manager is configured."); return true; } @@ -1917,11 +1915,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (vm.getRemoved() == null) { // Cleanup vm resources - all the PF/LB/StaticNat rules // associated with vm - s_logger.debug("Starting cleaning up vm " + vm + " resources..."); + logger.debug("Starting cleaning up vm " + vm + " resources..."); if (cleanupVmResources(vm.getId())) { - s_logger.debug("Successfully cleaned up vm " + vm + " resources as a part of expunge process"); + logger.debug("Successfully cleaned up vm " + vm + " resources as a part of expunge process"); } else { - s_logger.warn("Failed to cleanup resources as a part of vm " + vm + " expunge"); + logger.warn("Failed to cleanup resources as a part of vm " + vm + " expunge"); return false; } @@ -1931,13 +1929,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir return true; } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to expunge " + vm, e); + logger.warn("Unable to expunge " + vm, e); return false; } catch (OperationTimedoutException e) { - s_logger.warn("Operation time out on expunging " + vm, e); + logger.warn("Operation time out on expunging " + vm, e); return false; } catch (ConcurrentOperationException e) { - s_logger.warn("Concurrent operations on expunging " + vm, e); + logger.warn("Concurrent operations on expunging " + vm, e); return false; } finally { _vmDao.releaseFromLockTable(vm.getId()); @@ -1954,26 +1952,26 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // cleanup firewall rules if (_firewallMgr.revokeFirewallRulesForVm(vmId)) { - s_logger.debug("Firewall rules are removed successfully as a part of vm id=" + vmId + " expunge"); + logger.debug("Firewall rules are removed successfully as a part of vm id=" + vmId + " expunge"); } else { success = false; - s_logger.warn("Fail to remove firewall rules as a part of vm id=" + vmId + " expunge"); + logger.warn("Fail to remove firewall rules as a part of vm id=" + vmId + " expunge"); } // cleanup port forwarding rules if (_rulesMgr.revokePortForwardingRulesForVm(vmId)) { - s_logger.debug("Port forwarding rules are removed successfully as a part of vm id=" + vmId + " expunge"); + logger.debug("Port forwarding rules are removed successfully as a part of vm id=" + vmId + " expunge"); } else { success = false; - s_logger.warn("Fail to remove port forwarding rules as a part of vm id=" + vmId + " expunge"); + logger.warn("Fail to remove port forwarding rules as a part of vm id=" + vmId + " expunge"); } // cleanup load balancer rules if (_lbMgr.removeVmFromLoadBalancers(vmId)) { - s_logger.debug("Removed vm id=" + vmId + " from all load balancers as a part of expunge process"); + logger.debug("Removed vm id=" + vmId + " from all load balancers as a part of expunge process"); } else { success = false; - s_logger.warn("Fail to remove vm id=" + vmId + " from load balancers as a part of expunge process"); + logger.warn("Fail to remove vm id=" + vmId + " from load balancers as a part of expunge process"); } // If vm is assigned to static nat, disable static nat for the ip @@ -1983,14 +1981,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir for (IPAddressVO ip : ips) { try { if (_rulesMgr.disableStaticNat(ip.getId(), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM), User.UID_SYSTEM, true)) { - s_logger.debug("Disabled 1-1 nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge"); + logger.debug("Disabled 1-1 nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge"); } else { - s_logger.warn("Failed to disable static nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge"); + logger.warn("Failed to disable static nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge"); success = false; } } catch (ResourceUnavailableException e) { success = false; - s_logger.warn("Failed to disable static nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge because resource is unavailable", e); + logger.warn("Failed to disable static nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge because resource is unavailable", e); } } @@ -2011,11 +2009,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (vm != null) { if (vm.getState().equals(State.Stopped)) { - s_logger.debug("Destroying vm " + vm + " as it failed to create on Host with Id:" + hostId); + logger.debug("Destroying vm " + vm + " as it failed to create on Host with Id:" + hostId); try { _itMgr.stateTransitTo(vm, VirtualMachine.Event.OperationFailedToError, null); } catch (NoTransitionException e1) { - s_logger.warn(e1.getMessage()); + logger.warn(e1.getMessage()); } // destroy associated volumes for vm in error state // get all volumes in non destroyed state @@ -2060,7 +2058,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (vmIdAndCount.getRetrievalCount() <= 0) { vmIdCountMap.remove(nicId); - s_logger.debug("Vm " + vmId +" nic "+nicId + " count is zero .. removing vm nic from map "); + logger.debug("Vm " + vmId +" nic "+nicId + " count is zero .. removing vm nic from map "); ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, Domain.ROOT_DOMAIN, EventTypes.EVENT_NETWORK_EXTERNAL_DHCP_VM_IPFETCH, @@ -2084,7 +2082,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } catch (Exception e) { - s_logger.error("Caught the Exception in VmIpFetchTask", e); + logger.error("Caught the Exception in VmIpFetchTask", e); } finally { scanLock.unlock(); } @@ -2108,22 +2106,22 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) { try { List vms = _vmDao.findDestroyedVms(new Date(System.currentTimeMillis() - ((long)_expungeDelay << 10))); - if (s_logger.isInfoEnabled()) { + if (logger.isInfoEnabled()) { if (vms.size() == 0) { - s_logger.trace("Found " + vms.size() + " vms to expunge."); + logger.trace("Found " + vms.size() + " vms to expunge."); } else { - s_logger.info("Found " + vms.size() + " vms to expunge."); + logger.info("Found " + vms.size() + " vms to expunge."); } } for (UserVmVO vm : vms) { try { expungeVm(vm.getId()); } catch (Exception e) { - s_logger.warn("Unable to expunge " + vm, e); + logger.warn("Unable to expunge " + vm, e); } } } catch (Exception e) { - s_logger.error("Caught the following Exception", e); + logger.error("Caught the following Exception", e); } finally { scanLock.unlock(); } @@ -2263,7 +2261,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (vm.getState() == State.Error || vm.getState() == State.Expunging) { - s_logger.error("vm is not in the right state: " + id); + logger.error("vm is not in the right state: " + id); throw new InvalidParameterValueException("Vm with id " + id + " is not in the right state"); } @@ -2316,7 +2314,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir checkNameForRFCCompliance(hostName); if (vm.getHostName().equalsIgnoreCase(hostName)) { - s_logger.debug("Vm " + vm + " is already set with the hostName specified: " + hostName); + logger.debug("Vm " + vm + " is already set with the hostName specified: " + hostName); hostName = null; } @@ -2334,7 +2332,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (updateUserdata) { boolean result = updateUserDataInternal(_vmDao.findById(id)); if (result) { - s_logger.debug("User data successfully updated for vm id=" + id); + logger.debug("User data successfully updated for vm id=" + id); } else { throw new CloudRuntimeException("Failed to reset userdata for the virtual machine "); } @@ -2348,7 +2346,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir List nics = _nicDao.listByVmId(vm.getId()); if (nics == null || nics.isEmpty()) { - s_logger.error("unable to find any nics for vm " + vm.getUuid()); + logger.error("unable to find any nics for vm " + vm.getUuid()); return false; } @@ -2365,7 +2363,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } boolean result = element.saveUserData(network, nicProfile, vmProfile); if (!result) { - s_logger.error("Failed to update userdata for vm " + vm + " and nic " + nic); + logger.error("Failed to update userdata for vm " + vm + " and nic " + nic); } } @@ -2462,7 +2460,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // not // created. if (account == null) { - s_logger.warn("Failed to acquire lock on account"); + logger.warn("Failed to acquire lock on account"); return null; } InstanceGroupVO group = _vmGroupDao.findByAccountAndName(accountId, groupName); @@ -2525,7 +2523,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (group != null) { UserVm userVm = _vmDao.acquireInLockTable(userVmId); if (userVm == null) { - s_logger.warn("Failed to acquire lock on user vm id=" + userVmId); + logger.warn("Failed to acquire lock on user vm id=" + userVmId); } try { final InstanceGroupVO groupFinal = group; @@ -2536,7 +2534,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // it. InstanceGroupVO ngrpLock = _vmGroupDao.lockRow(groupFinal.getId(), false); if (ngrpLock == null) { - s_logger.warn("Failed to acquire lock on vm group id=" + groupFinal.getId() + " name=" + groupFinal.getName()); + logger.warn("Failed to acquire lock on vm group id=" + groupFinal.getId() + " name=" + groupFinal.getName()); throw new CloudRuntimeException("Failed to acquire lock on vm group id=" + groupFinal.getId() + " name=" + groupFinal.getName()); } @@ -2580,7 +2578,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir return null; } } catch (Exception e) { - s_logger.warn("Error trying to get group for a vm: ", e); + logger.warn("Error trying to get group for a vm: ", e); return null; } } @@ -2595,7 +2593,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir _groupVMMapDao.expunge(sc); } } catch (Exception e) { - s_logger.warn("Error trying to remove vm from group: ", e); + logger.warn("Error trying to remove vm from group: ", e); } } @@ -2653,8 +2651,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir securityGroupIdList.add(defaultGroup.getId()); } else { // create default security group for the account - if (s_logger.isDebugEnabled()) { - s_logger.debug("Couldn't find default security group for the account " + owner + " so creating a new one"); + if (logger.isDebugEnabled()) { + logger.debug("Couldn't find default security group for the account " + owner + " so creating a new one"); } defaultGroup = _securityGroupMgr.createSecurityGroup(SecurityGroupManager.DEFAULT_GROUP_NAME, SecurityGroupManager.DEFAULT_GROUP_DESCRIPTION, owner.getDomainId(), owner.getId(), owner.getAccountName()); @@ -2764,8 +2762,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir securityGroupIdList.add(defaultGroup.getId()); } else { // create default security group for the account - if (s_logger.isDebugEnabled()) { - s_logger.debug("Couldn't find default security group for the account " + owner + " so creating a new one"); + if (logger.isDebugEnabled()) { + logger.debug("Couldn't find default security group for the account " + owner + " so creating a new one"); } defaultGroup = _securityGroupMgr.createSecurityGroup(SecurityGroupManager.DEFAULT_GROUP_NAME, SecurityGroupManager.DEFAULT_GROUP_DESCRIPTION, owner.getDomainId(), owner.getId(), owner.getAccountName()); @@ -2828,7 +2826,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Unable to find physical network with id: " + physicalNetworkId + " and tag: " + requiredOfferings.get(0).getTags()); } - s_logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of deployVM process"); + logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of deployVM process"); Network newNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", null, null, null, null, owner, null, physicalNetwork, zone.getId(), ACLType.Account, null, null, null, null, true, null); if (newNetwork != null) { @@ -3082,7 +3080,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir Long physicalNetworkId = _networkModel.findPhysicalNetworkId(zone.getId(), ntwkOffering.getTags(), ntwkOffering.getTrafficType()); if (physicalNetworkId == null) { - s_logger.warn("Network id " + network.getId() + " could not be streched to the zone " + zone.getId() + logger.warn("Network id " + network.getId() + " could not be streched to the zone " + zone.getId() + " as valid phyical network could not be found"); throw new InvalidParameterValueException("Network in which is VM getting deployed could not be" + " streched to the zone."); @@ -3090,7 +3088,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir String provider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.Connectivity); if (!_networkModel.isProviderEnabledInPhysicalNetwork(physicalNetworkId, provider)) { - s_logger.warn("Network id " + network.getId() + " could not be streched to the zone " +zone.getId() + logger.warn("Network id " + network.getId() + " could not be streched to the zone " +zone.getId() + " as Connectivity service provider is not enabled in the zone " + zone.getId()); throw new InvalidParameterValueException("Network in which is VM getting deployed could not be" + " streched to the zone."); @@ -3331,10 +3329,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("unsupported: rootdisksize override is smaller than template size " + templateVO.getSize() + "B (" + templateVOSizeGB + "GB)"); } else { - s_logger.debug("rootdisksize of " + (rootDiskSize << 30) + " was larger than template size of " + templateVO.getSize()); + logger.debug("rootdisksize of " + (rootDiskSize << 30) + " was larger than template size of " + templateVO.getSize()); } - s_logger.debug("found root disk size of " + rootDiskSize); + logger.debug("found root disk size of " + rootDiskSize); customParameters.remove("rootdisksize"); } @@ -3368,7 +3366,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir vm.setDetail("smc.present", "TRUE"); vm.setDetail(VmDetailConstants.ROOK_DISK_CONTROLLER, "scsi"); vm.setDetail("firmware", "efi"); - s_logger.info("guestOS is OSX : overwrite root disk controller to scsi, use smc and efi"); + logger.info("guestOS is OSX : overwrite root disk controller to scsi, use smc and efi"); } } @@ -3378,7 +3376,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } _vmDao.saveDetails(vm); - s_logger.debug("Allocating in the DB for vm"); + logger.debug("Allocating in the DB for vm"); DataCenterDeployment plan = new DataCenterDeployment(zone.getId()); List computeTags = new ArrayList(); @@ -3396,8 +3394,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir offering.getCpu(), offering.getSpeed(), offering.getRamSize(), diskSize, computeTags, rootDiskTags, networkNicMap, plan, rootDiskSize); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully allocated DB entry for " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Successfully allocated DB entry for " + vm); } CallContext.current().setEventDetails("Vm Id: " + vm.getId()); @@ -3557,10 +3555,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir try { answer = _agentMgr.send(hostId, cmd); } catch (OperationTimedoutException e) { - s_logger.warn("Timed Out", e); + logger.warn("Timed Out", e); return false; } catch (AgentUnavailableException e) { - s_logger.warn("Agent Unavailable ", e); + logger.warn("Agent Unavailable ", e); return false; } @@ -3608,8 +3606,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir Answer[] answersToCmds = cmds.getAnswers(); if (answersToCmds == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Returning from finalizeStart() since there are no answers to read"); + if (logger.isDebugEnabled()) { + logger.debug("Returning from finalizeStart() since there are no answers to read"); } return true; } @@ -3671,7 +3669,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir userVm.setPrivateIpAddress(guestNic.getIPv4Address()); _vmDao.update(userVm.getId(), userVm); - s_logger.info("Detected that ip changed in the answer, updated nic in the db with new ip " + returnedIp); + logger.info("Detected that ip changed in the answer, updated nic in the db with new ip " + returnedIp); } } @@ -3679,7 +3677,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir try { _rulesMgr.getSystemIpAndEnableStaticNatForVm(profile.getVirtualMachine(), false); } catch (Exception ex) { - s_logger.warn("Failed to get system ip and enable static nat for the vm " + profile.getVirtualMachine() + " due to exception ", ex); + logger.warn("Failed to get system ip and enable static nat for the vm " + profile.getVirtualMachine() + " due to exception ", ex); return false; } @@ -3738,7 +3736,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir assert (offering.getAssociatePublicIP() == true) : "User VM should not have system owned public IP associated with it when offering configured not to associate public IP."; _rulesMgr.disableStaticNat(ip.getId(), ctx.getCallingAccount(), ctx.getCallingUserId(), true); } catch (Exception ex) { - s_logger.warn("Failed to disable static nat and release system ip " + ip + " as a part of vm " + profile.getVirtualMachine() + " stop due to exception ", ex); + logger.warn("Failed to disable static nat and release system ip " + ip + " as a part of vm " + profile.getVirtualMachine() + " stop due to exception ", ex); } } @@ -3800,8 +3798,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (_securityGroupMgr.isVmSecurityGroupEnabled(vmId) && _securityGroupMgr.getSecurityGroupsForVm(vmId).isEmpty() && !_securityGroupMgr.isVmMappedToDefaultSecurityGroup(vmId) && _networkModel.canAddDefaultSecurityGroup()) { // if vm is not mapped to security group, create a mapping - if (s_logger.isDebugEnabled()) { - s_logger.debug("Vm " + vm + " is security group enabled, but not mapped to default security group; creating the mapping automatically"); + if (logger.isDebugEnabled()) { + logger.debug("Vm " + vm + " is security group enabled, but not mapped to default security group; creating the mapping automatically"); } SecurityGroup defaultSecurityGroup = _securityGroupMgr.getDefaultSecurityGroup(vm.getAccountId()); @@ -3814,7 +3812,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir DataCenterDeployment plan = null; if (destinationHost != null) { - s_logger.debug("Destination Host to deploy the VM is specified, specifying a deployment plan to deploy the VM"); + logger.debug("Destination Host to deploy the VM is specified, specifying a deployment plan to deploy the VM"); plan = new DataCenterDeployment(vm.getDataCenterId(), destinationHost.getPodId(), destinationHost.getClusterId(), destinationHost.getId(), null, null); } @@ -3896,7 +3894,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (vm.getState() == State.Destroyed || vm.getState() == State.Expunging) { - s_logger.trace("Vm id=" + vmId + " is already destroyed"); + logger.trace("Vm id=" + vmId + " is already destroyed"); return vm; } @@ -3943,7 +3941,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // support KVM only util 2013.06.25 if (!userVm.getHypervisorType().equals(HypervisorType.KVM)) return; - s_logger.debug("Collect vm disk statistics from host before stopping Vm"); + logger.debug("Collect vm disk statistics from host before stopping Vm"); long hostId = userVm.getHostId(); List vmNames = new ArrayList(); vmNames.add(userVm.getInstanceName()); @@ -3953,12 +3951,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir try { diskStatsAnswer = (GetVmDiskStatsAnswer)_agentMgr.easySend(hostId, new GetVmDiskStatsCommand(vmNames, host.getGuid(), host.getName())); } catch (Exception e) { - s_logger.warn("Error while collecting disk stats for vm: " + userVm.getInstanceName() + " from host: " + host.getName(), e); + logger.warn("Error while collecting disk stats for vm: " + userVm.getInstanceName() + " from host: " + host.getName(), e); return; } if (diskStatsAnswer != null) { if (!diskStatsAnswer.getResult()) { - s_logger.warn("Error while collecting disk stats vm: " + userVm.getInstanceName() + " from host: " + host.getName() + "; details: " + diskStatsAnswer.getDetails()); + logger.warn("Error while collecting disk stats vm: " + userVm.getInstanceName() + " from host: " + host.getName() + "; details: " + diskStatsAnswer.getDetails()); return; } try { @@ -3984,12 +3982,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir VmDiskStatisticsVO vmDiskStat_lock = _vmDiskStatsDao.lock(userVm.getAccountId(), userVm.getDataCenterId(), userVm.getId(), volume.getId()); if ((vmDiskStat.getIORead() == 0) && (vmDiskStat.getIOWrite() == 0) && (vmDiskStat.getBytesRead() == 0) && (vmDiskStat.getBytesWrite() == 0)) { - s_logger.debug("Read/Write of IO and Bytes are both 0. Not updating vm_disk_statistics"); + logger.debug("Read/Write of IO and Bytes are both 0. Not updating vm_disk_statistics"); continue; } if (vmDiskStat_lock == null) { - s_logger.warn("unable to find vm disk stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId() + " and volumeId:" + logger.warn("unable to find vm disk stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId() + " and volumeId:" + volume.getId()); continue; } @@ -3999,39 +3997,39 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir .getCurrentIOWrite()) || (previousVmDiskStats.getCurrentBytesRead() != vmDiskStat_lock.getCurrentBytesRead()) || (previousVmDiskStats .getCurrentBytesWrite() != vmDiskStat_lock.getCurrentBytesWrite())))) { - s_logger.debug("vm disk stats changed from the time GetVmDiskStatsCommand was sent. " + "Ignoring current answer. Host: " + host.getName() + logger.debug("vm disk stats changed from the time GetVmDiskStatsCommand was sent. " + "Ignoring current answer. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + " IO Read: " + vmDiskStat.getIORead() + " IO Write: " + vmDiskStat.getIOWrite() + " Bytes Read: " + vmDiskStat.getBytesRead() + " Bytes Write: " + vmDiskStat.getBytesWrite()); continue; } if (vmDiskStat_lock.getCurrentIORead() > vmDiskStat.getIORead()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Read # of IO that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + if (logger.isDebugEnabled()) { + logger.debug("Read # of IO that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getIORead() + " Stored: " + vmDiskStat_lock.getCurrentIORead()); } vmDiskStat_lock.setNetIORead(vmDiskStat_lock.getNetIORead() + vmDiskStat_lock.getCurrentIORead()); } vmDiskStat_lock.setCurrentIORead(vmDiskStat.getIORead()); if (vmDiskStat_lock.getCurrentIOWrite() > vmDiskStat.getIOWrite()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Write # of IO that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + if (logger.isDebugEnabled()) { + logger.debug("Write # of IO that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getIOWrite() + " Stored: " + vmDiskStat_lock.getCurrentIOWrite()); } vmDiskStat_lock.setNetIOWrite(vmDiskStat_lock.getNetIOWrite() + vmDiskStat_lock.getCurrentIOWrite()); } vmDiskStat_lock.setCurrentIOWrite(vmDiskStat.getIOWrite()); if (vmDiskStat_lock.getCurrentBytesRead() > vmDiskStat.getBytesRead()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Read # of Bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + if (logger.isDebugEnabled()) { + logger.debug("Read # of Bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getBytesRead() + " Stored: " + vmDiskStat_lock.getCurrentBytesRead()); } vmDiskStat_lock.setNetBytesRead(vmDiskStat_lock.getNetBytesRead() + vmDiskStat_lock.getCurrentBytesRead()); } vmDiskStat_lock.setCurrentBytesRead(vmDiskStat.getBytesRead()); if (vmDiskStat_lock.getCurrentBytesWrite() > vmDiskStat.getBytesWrite()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Write # of Bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + if (logger.isDebugEnabled()) { + logger.debug("Write # of Bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getBytesWrite() + " Stored: " + vmDiskStat_lock.getCurrentBytesWrite()); } @@ -4052,7 +4050,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } }); } catch (Exception e) { - s_logger.warn("Unable to update vm disk statistics for vm: " + userVm.getId() + " from host: " + hostId, e); + logger.warn("Unable to update vm disk statistics for vm: " + userVm.getId() + " from host: " + hostId, e); } } } @@ -4072,7 +4070,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (vm.getRemoved() != null) { - s_logger.trace("Vm id=" + vmId + " is already expunged"); + logger.trace("Vm id=" + vmId + " is already expunged"); return vm; } @@ -4129,8 +4127,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // access check - only root admin can migrate VM Account caller = CallContext.current().getCallingAccount(); if (!_accountMgr.isRootAdmin(caller.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Caller is not a root admin, permission denied to migrate the VM"); + if (logger.isDebugEnabled()) { + logger.debug("Caller is not a root admin, permission denied to migrate the VM"); } throw new PermissionDeniedException("No permission to migrate VM, Only Root Admin can migrate a VM!"); } @@ -4199,8 +4197,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // access check - only root admin can migrate VM Account caller = CallContext.current().getCallingAccount(); if (!_accountMgr.isRootAdmin(caller.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Caller is not a root admin, permission denied to migrate the VM"); + if (logger.isDebugEnabled()) { + logger.debug("Caller is not a root admin, permission denied to migrate the VM"); } throw new PermissionDeniedException("No permission to migrate VM, Only Root Admin can migrate a VM!"); } @@ -4211,8 +4209,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } // business logic if (vm.getState() != State.Running) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is not Running, unable to migrate the vm " + vm); + if (logger.isDebugEnabled()) { + logger.debug("VM is not Running, unable to migrate the vm " + vm); } InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Running, unable to migrate the vm with specified id"); ex.addProxyObject(vm.getUuid(), "vmId"); @@ -4227,8 +4225,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir && !vm.getHypervisorType().equals(HypervisorType.Ovm) && !vm.getHypervisorType().equals(HypervisorType.Hyperv) && !vm.getHypervisorType().equals(HypervisorType.LXC) && !vm.getHypervisorType().equals(HypervisorType.Simulator) && !vm.getHypervisorType().equals(HypervisorType.Ovm3)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(vm + " is not XenServer/VMware/KVM/Ovm/Hyperv, cannot migrate this VM."); + if (logger.isDebugEnabled()) { + logger.debug(vm + " is not XenServer/VMware/KVM/Ovm/Hyperv, cannot migrate this VM."); } throw new InvalidParameterValueException("Unsupported Hypervisor Type for VM migration, we support XenServer/VMware/KVM/Ovm/Hyperv/Ovm3 only"); } @@ -4238,8 +4236,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (isVMUsingLocalStorage(vm)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(vm + " is using Local Storage, cannot migrate this VM."); + if (logger.isDebugEnabled()) { + logger.debug(vm + " is using Local Storage, cannot migrate this VM."); } throw new InvalidParameterValueException("Unsupported operation, VM uses Local storage, cannot migrate"); } @@ -4278,8 +4276,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // check max guest vm limit for the destinationHost HostVO destinationHostVO = _hostDao.findById(destinationHost.getId()); if (_capacityMgr.checkIfHostReachMaxGuestLimit(destinationHostVO)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host name: " + destinationHost.getName() + ", hostId: " + destinationHost.getId() + if (logger.isDebugEnabled()) { + logger.debug("Host name: " + destinationHost.getName() + ", hostId: " + destinationHost.getId() + " already has max Running VMs(count includes system VMs), cannot migrate to this host"); } throw new VirtualMachineMigrationException("Destination host, hostId: " + destinationHost.getId() @@ -4355,14 +4353,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir //raise an alert String msg = "VM is being migrated from a explicitly dedicated host " + srcHost.getName() + " to non-dedicated host " + destHost.getName(); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); - s_logger.warn(msg); + logger.warn(msg); } //if srcHost is non dedicated but destination Host is explicitly dedicated if (!srcExplDedicated && destExplDedicated) { //raise an alert String msg = "VM is being migrated from a non dedicated host " + srcHost.getName() + " to a explicitly dedicated host " + destHost.getName(); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); - s_logger.warn(msg); + logger.warn(msg); } //if hosts are dedicated to different account/domains, raise an alert @@ -4371,13 +4369,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir String msg = "VM is being migrated from host " + srcHost.getName() + " explicitly dedicated to account " + accountOfDedicatedHost(srcHost) + " to host " + destHost.getName() + " explicitly dedicated to account " + accountOfDedicatedHost(destHost); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); - s_logger.warn(msg); + logger.warn(msg); } if (!((domainOfDedicatedHost(srcHost) == null) || (domainOfDedicatedHost(srcHost).equals(domainOfDedicatedHost(destHost))))) { String msg = "VM is being migrated from host " + srcHost.getName() + " explicitly dedicated to domain " + domainOfDedicatedHost(srcHost) + " to host " + destHost.getName() + " explicitly dedicated to domain " + domainOfDedicatedHost(destHost); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); - s_logger.warn(msg); + logger.warn(msg); } } @@ -4415,7 +4413,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); - s_logger.warn(msg); + logger.warn(msg); } else { //VM is not deployed using implicit planner, check if it migrated between dedicated hosts @@ -4438,12 +4436,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir msg = "VM is being migrated from implicitly dedicated host " + srcHost.getName() + " to shared host " + destHost.getName(); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); - s_logger.warn(msg); + logger.warn(msg); } else { if (destImplDedicated) { msg = "VM is being migrated from shared host " + srcHost.getName() + " to implicitly dedicated host " + destHost.getName(); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); - s_logger.warn(msg); + logger.warn(msg); } } } @@ -4483,11 +4481,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir return false; for (VMInstanceVO vm : allVmsOnHost) { if (!isImplicitPlannerUsedByOffering(vm.getServiceOfferingId()) || vm.getAccountId() != accountId) { - s_logger.info("Host " + vm.getHostId() + " found to be running a vm created by a planner other" + " than implicit, or running vms of other account"); + logger.info("Host " + vm.getHostId() + " found to be running a vm created by a planner other" + " than implicit, or running vms of other account"); createdByImplicitStrict = false; break; } else if (isServiceOfferingUsingPlannerInPreferredMode(vm.getServiceOfferingId()) || vm.getAccountId() != accountId) { - s_logger.info("Host " + vm.getHostId() + " found to be running a vm created by an implicit planner" + " in preferred mode, or running vms of other account"); + logger.info("Host " + vm.getHostId() + " found to be running a vm created by an implicit planner" + " in preferred mode, or running vms of other account"); createdByImplicitStrict = false; break; } @@ -4499,7 +4497,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir boolean implicitPlannerUsed = false; ServiceOfferingVO offering = _serviceOfferingDao.findByIdIncludingRemoved(offeringId); if (offering == null) { - s_logger.error("Couldn't retrieve the offering by the given id : " + offeringId); + logger.error("Couldn't retrieve the offering by the given id : " + offeringId); } else { String plannerName = offering.getDeploymentPlanner(); if (plannerName != null) { @@ -4519,8 +4517,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // Access check - only root administrator can migrate VM. Account caller = CallContext.current().getCallingAccount(); if (!_accountMgr.isRootAdmin(caller.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Caller is not a root admin, permission denied to migrate the VM"); + if (logger.isDebugEnabled()) { + logger.debug("Caller is not a root admin, permission denied to migrate the VM"); } throw new PermissionDeniedException("No permission to migrate VM, Only Root Admin can migrate a VM!"); } @@ -4531,8 +4529,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (vm.getState() != State.Running) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is not Running, unable to migrate the vm " + vm); + if (logger.isDebugEnabled()) { + logger.debug("VM is not Running, unable to migrate the vm " + vm); } CloudRuntimeException ex = new CloudRuntimeException("VM is not Running, unable to migrate the vm with" + " specified id"); ex.addProxyObject(vm.getUuid(), "vmId"); @@ -4658,8 +4656,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("There is no vm by that id " + cmd.getVmId()); } else if (vm.getState() == State.Running) { // VV 3: check if vm is // running - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is Running, unable to move the vm " + vm); + if (logger.isDebugEnabled()) { + logger.debug("VM is Running, unable to move the vm " + vm); } InvalidParameterValueException ex = new InvalidParameterValueException("VM is Running, unable to move the vm with specified vmId"); ex.addProxyObject(vm.getUuid(), "vmId"); @@ -4846,8 +4844,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } else { // create default security group for the account - if (s_logger.isDebugEnabled()) { - s_logger.debug("Couldn't find default security group for the account " + newAccount + " so creating a new one"); + if (logger.isDebugEnabled()) { + logger.debug("Couldn't find default security group for the account " + newAccount + " so creating a new one"); } defaultGroup = _securityGroupMgr.createSecurityGroup(SecurityGroupManager.DEFAULT_GROUP_NAME, SecurityGroupManager.DEFAULT_GROUP_DESCRIPTION, newAccount.getDomainId(), newAccount.getId(), newAccount.getAccountName()); @@ -4866,7 +4864,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir _securityGroupMgr.addInstanceToGroups(vm.getId(), securityGroupIdList); - s_logger.debug("AssignVM: Basic zone, adding security groups no " + securityGroupIdList.size() + " to " + vm.getInstanceName()); + logger.debug("AssignVM: Basic zone, adding security groups no " + securityGroupIdList.size() + " to " + vm.getInstanceName()); } else { if (zone.isSecurityGroupEnabled()) { throw new InvalidParameterValueException("Not yet implemented for SecurityGroupEnabled advanced networks."); @@ -4920,7 +4918,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Unable to find physical network with id: " + physicalNetworkId + " and tag: " + requiredOfferings.get(0).getTags()); } - s_logger.debug("Creating network for account " + newAccount + " from the network offering id=" + requiredOfferings.get(0).getId() + logger.debug("Creating network for account " + newAccount + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of deployVM process"); Network newNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), newAccount.getAccountName() + "-network", newAccount.getAccountName() + "-network", null, null, null, null, newAccount, null, physicalNetwork, zone.getId(), ACLType.Account, null, null, @@ -4929,17 +4927,17 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (requiredOfferings.get(0).getIsPersistent()) { DeployDestination dest = new DeployDestination(zone, null, null, null); UserVO callerUser = _userDao.findById(CallContext.current().getCallingUserId()); - Journal journal = new Journal.LogJournal("Implementing " + newNetwork, s_logger); + Journal journal = new Journal.LogJournal("Implementing " + newNetwork, logger); ReservationContext context = new ReservationContextImpl(UUID.randomUUID().toString(), journal, callerUser, caller); - s_logger.debug("Implementing the network for account" + newNetwork + " as a part of" + " network provision for persistent networks"); + logger.debug("Implementing the network for account" + newNetwork + " as a part of" + " network provision for persistent networks"); try { Pair implementedNetwork = _networkMgr.implementNetwork(newNetwork.getId(), dest, context); if (implementedNetwork == null || implementedNetwork.first() == null) { - s_logger.warn("Failed to implement the network " + newNetwork); + logger.warn("Failed to implement the network " + newNetwork); } newNetwork = implementedNetwork.second(); } catch (Exception ex) { - s_logger.warn("Failed to implement network " + newNetwork + " elements and" + logger.warn("Failed to implement network " + newNetwork + " elements and" + " resources as a part of network provision for persistent network due to ", ex); CloudRuntimeException e = new CloudRuntimeException("Failed to implement network" + " (with specified id) elements and resources as a part of network provision"); @@ -4975,10 +4973,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir VirtualMachine vmi = _itMgr.findById(vm.getId()); VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmi); _networkMgr.allocate(vmProfile, networks); - s_logger.debug("AssignVM: Advance virtual, adding networks no " + networks.size() + " to " + vm.getInstanceName()); + logger.debug("AssignVM: Advance virtual, adding networks no " + networks.size() + " to " + vm.getInstanceName()); } // END IF NON SEC GRP ENABLED } // END IF ADVANCED - s_logger.info("AssignVM: vm " + vm.getInstanceName() + " now belongs to account " + cmd.getAccountName()); + logger.info("AssignVM: vm " + vm.getInstanceName() + " now belongs to account " + cmd.getAccountName()); return vm; } @@ -5089,7 +5087,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir try { _itMgr.stop(vm.getUuid()); } catch (ResourceUnavailableException e) { - s_logger.debug("Stop vm " + vm.getUuid() + " failed", e); + logger.debug("Stop vm " + vm.getUuid() + " failed", e); CloudRuntimeException ex = new CloudRuntimeException("Stop vm failed for specified vmId"); ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; @@ -5134,12 +5132,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (vm.getHypervisorType() == HypervisorType.VMware) { VolumeInfo volumeInStorage = volFactory.getVolume(root.getId()); if (volumeInStorage != null) { - s_logger.info("Expunging volume " + root.getId() + " from primary data store"); + logger.info("Expunging volume " + root.getId() + " from primary data store"); AsyncCallFuture future = _volService.expungeVolumeAsync(volFactory.getVolume(root.getId())); try { future.get(); } catch (Exception e) { - s_logger.debug("Failed to expunge volume:" + root.getId(), e); + logger.debug("Failed to expunge volume:" + root.getId(), e); } } } @@ -5184,7 +5182,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } } catch (Exception e) { - s_logger.debug("Unable to start VM " + vm.getUuid(), e); + logger.debug("Unable to start VM " + vm.getUuid(), e); CloudRuntimeException ex = new CloudRuntimeException("Unable to start VM with specified id" + e.getMessage()); ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; @@ -5192,7 +5190,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } - s_logger.debug("Restore VM " + vmId + " done successfully"); + logger.debug("Restore VM " + vmId + " done successfully"); return vm; } @@ -5260,7 +5258,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (!cmds.isSuccessful()) { for (Answer answer : cmds.getAnswers()) { if (!answer.getResult()) { - s_logger.warn("Failed to reset vm due to: " + answer.getDetails()); + logger.warn("Failed to reset vm due to: " + answer.getDetails()); throw new CloudRuntimeException("Unable to reset " + vm + " due to " + answer.getDetails()); } @@ -5285,7 +5283,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir String sshPublicKey = vm.getDetail("SSH.PublicKey"); if (sshPublicKey != null && !sshPublicKey.equals("") && password != null && !password.equals("saved_password")) { if (!sshPublicKey.startsWith("ssh-rsa")) { - s_logger.warn("Only RSA public keys can be used to encrypt a vm password."); + logger.warn("Only RSA public keys can be used to encrypt a vm password."); return; } String encryptedPasswd = RSAHelper.encryptWithSSHPublicKey(sshPublicKey, password); diff --git a/server/src/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java b/server/src/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java index 2dcd2e27b5a..5d39e0d5a29 100644 --- a/server/src/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java +++ b/server/src/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java @@ -27,7 +27,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.user.vmsnapshot.ListVMSnapshotCmd; import org.apache.cloudstack.context.CallContext; @@ -102,7 +101,6 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDao; @Component @Local(value = { VMSnapshotManager.class, VMSnapshotService.class }) public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotManager, VMSnapshotService, VmWorkJobHandler { - private static final Logger s_logger = Logger.getLogger(VMSnapshotManagerImpl.class); public static final String VM_WORK_JOB_HANDLER = VMSnapshotManagerImpl.class.getSimpleName(); @@ -341,7 +339,7 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana return vmSnapshot; } catch (Exception e) { String msg = e.getMessage(); - s_logger.error("Create vm snapshot record failed for vm: " + vmId + " due to: " + msg); + logger.error("Create vm snapshot record failed for vm: " + vmId + " due to: " + msg); } return null; } @@ -437,7 +435,7 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana VMSnapshot snapshot = strategy.takeVMSnapshot(vmSnapshot); return snapshot; } catch (Exception e) { - s_logger.debug("Failed to create vm snapshot: " + vmSnapshotId, e); + logger.debug("Failed to create vm snapshot: " + vmSnapshotId, e); return null; } } @@ -474,7 +472,7 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana if (hasActiveVMSnapshotTasks(vmSnapshot.getVmId())) { List expungingSnapshots = _vmSnapshotDao.listByInstanceId(vmSnapshot.getVmId(), VMSnapshot.State.Expunging); if (expungingSnapshots.size() > 0 && expungingSnapshots.get(0).getId() == vmSnapshot.getId()) - s_logger.debug("Target VM snapshot already in expunging state, go on deleting it: " + vmSnapshot.getDisplayName()); + logger.debug("Target VM snapshot already in expunging state, go on deleting it: " + vmSnapshot.getDisplayName()); else throw new InvalidParameterValueException("There is other active vm snapshot tasks on the instance, please try again later"); } @@ -536,7 +534,7 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana if (hasActiveVMSnapshotTasks(vmSnapshot.getVmId())) { List expungingSnapshots = _vmSnapshotDao.listByInstanceId(vmSnapshot.getVmId(), VMSnapshot.State.Expunging); if (expungingSnapshots.size() > 0 && expungingSnapshots.get(0).getId() == vmSnapshot.getId()) - s_logger.debug("Target VM snapshot already in expunging state, go on deleting it: " + vmSnapshot.getDisplayName()); + logger.debug("Target VM snapshot already in expunging state, go on deleting it: " + vmSnapshot.getDisplayName()); else throw new InvalidParameterValueException("There is other active vm snapshot tasks on the instance, please try again later"); } @@ -548,7 +546,7 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana VMSnapshotStrategy strategy = findVMSnapshotStrategy(vmSnapshot); return strategy.deleteVMSnapshot(vmSnapshot); } catch (Exception e) { - s_logger.debug("Failed to delete vm snapshot: " + vmSnapshotId, e); + logger.debug("Failed to delete vm snapshot: " + vmSnapshotId, e); return false; } } @@ -684,7 +682,7 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana vm = _userVMDao.findById(userVm.getId()); hostId = vm.getHostId(); } catch (Exception e) { - s_logger.error("Start VM " + userVm.getInstanceName() + " before reverting failed due to " + e.getMessage()); + logger.error("Start VM " + userVm.getInstanceName() + " before reverting failed due to " + e.getMessage()); throw new CloudRuntimeException(e.getMessage()); } } else { @@ -692,7 +690,7 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana try { _itMgr.advanceStop(userVm.getUuid(), true); } catch (Exception e) { - s_logger.error("Stop VM " + userVm.getInstanceName() + " before reverting failed due to " + e.getMessage()); + logger.error("Stop VM " + userVm.getInstanceName() + " before reverting failed due to " + e.getMessage()); throw new CloudRuntimeException(e.getMessage()); } } @@ -708,7 +706,7 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana strategy.revertVMSnapshot(vmSnapshotVo); return userVm; } catch (Exception e) { - s_logger.debug("Failed to revert vmsnapshot: " + vmSnapshotId, e); + logger.debug("Failed to revert vmsnapshot: " + vmSnapshotId, e); throw new CloudRuntimeException(e.getMessage()); } } @@ -812,7 +810,7 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana } } } catch (Exception e) { - s_logger.error(e.getMessage(), e); + logger.error(e.getMessage(), e); if (_vmSnapshotDao.listByInstanceId(vm.getId(), VMSnapshot.State.Expunging).size() == 0) return true; else diff --git a/server/src/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java b/server/src/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java index d25bddb4398..ee2c6917475 100644 --- a/server/src/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java +++ b/server/src/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java @@ -27,7 +27,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import com.cloud.utils.fsm.StateMachine2; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.ControlledEntity.ACLType; @@ -73,7 +72,6 @@ import com.cloud.vm.dao.UserVmDao; @Local(value = {AffinityGroupService.class}) public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGroupService, Manager, StateListener { - public static final Logger s_logger = Logger.getLogger(AffinityGroupServiceImpl.class); private String _name; @Inject @@ -230,8 +228,8 @@ public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGro } }); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Created affinity group =" + affinityGroupName); + if (logger.isDebugEnabled()) { + logger.debug("Created affinity group =" + affinityGroupName); } return group; @@ -307,8 +305,8 @@ public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGro Pair, Long> params = new Pair, Long>(AffinityGroup.class, affinityGroupIdFinal); _messageBus.publish(_name, EntityManager.MESSAGE_REMOVE_ENTITY_EVENT, PublishScope.LOCAL, params); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deleted affinity group id=" + affinityGroupId); + if (logger.isDebugEnabled()) { + logger.debug("Deleted affinity group id=" + affinityGroupId); } return true; } @@ -465,7 +463,7 @@ public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGro // Check that the VM is stopped if (!vmInstance.getState().equals(State.Stopped)) { - s_logger.warn("Unable to update affinity groups of the virtual machine " + vmInstance.toString() + " in state " + vmInstance.getState()); + logger.warn("Unable to update affinity groups of the virtual machine " + vmInstance.toString() + " in state " + vmInstance.getState()); throw new InvalidParameterValueException("Unable update affinity groups of the virtual machine " + vmInstance.toString() + " " + "in state " + vmInstance.getState() + "; make sure the virtual machine is stopped and not in an error state before updating."); } @@ -491,8 +489,8 @@ public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGro } } _affinityGroupVMMapDao.updateMap(vmId, affinityGroupIds); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Updated VM :" + vmId + " affinity groups to =" + affinityGroupIds); + if (logger.isDebugEnabled()) { + logger.debug("Updated VM :" + vmId + " affinity groups to =" + affinityGroupIds); } // APIResponseHelper will pull out the updated affinitygroups. return vmInstance; diff --git a/server/src/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java b/server/src/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java index bc2f1c4da47..c912981c81d 100644 --- a/server/src/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java +++ b/server/src/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java @@ -24,7 +24,6 @@ import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -81,7 +80,6 @@ import com.cloud.utils.net.NetUtils; @Component @Local(value = {ApplicationLoadBalancerService.class}) public class ApplicationLoadBalancerManagerImpl extends ManagerBase implements ApplicationLoadBalancerService { - private static final Logger s_logger = Logger.getLogger(ApplicationLoadBalancerManagerImpl.class); @Inject NetworkModel _networkModel; @@ -184,7 +182,7 @@ public class ApplicationLoadBalancerManagerImpl extends ManagerBase implements A if (!_firewallDao.setStateToAdd(newRule)) { throw new CloudRuntimeException("Unable to update the state to add for " + newRule); } - s_logger.debug("Load balancer " + newRule.getId() + " for Ip address " + newRule.getSourceIp().addr() + ", source port " + + logger.debug("Load balancer " + newRule.getId() + " for Ip address " + newRule.getSourceIp().addr() + ", source port " + newRule.getSourcePortStart().intValue() + ", instance port " + newRule.getDefaultPortStart() + " is added successfully."); CallContext.current().setEventDetails("Load balancer Id: " + newRule.getId()); Network ntwk = _networkModel.getNetwork(newRule.getNetworkId()); @@ -261,7 +259,7 @@ public class ApplicationLoadBalancerManagerImpl extends ManagerBase implements A if (requestedIp != null) { if (_lbDao.countBySourceIp(new Ip(requestedIp), sourceIpNtwk.getId()) > 0) { - s_logger.debug("IP address " + requestedIp + " is already used by existing LB rule, returning it"); + logger.debug("IP address " + requestedIp + " is already used by existing LB rule, returning it"); return new Ip(requestedIp); } @@ -532,8 +530,8 @@ public class ApplicationLoadBalancerManagerImpl extends ManagerBase implements A } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("No network rule conflicts detected for " + newLbRule + " against " + (lbRules.size() - 1) + " existing rules"); + if (logger.isDebugEnabled()) { + logger.debug("No network rule conflicts detected for " + newLbRule + " against " + (lbRules.size() - 1) + " existing rules"); } } diff --git a/server/src/org/apache/cloudstack/region/RegionManagerImpl.java b/server/src/org/apache/cloudstack/region/RegionManagerImpl.java index 89107141966..ae8bc4bf2f5 100644 --- a/server/src/org/apache/cloudstack/region/RegionManagerImpl.java +++ b/server/src/org/apache/cloudstack/region/RegionManagerImpl.java @@ -25,7 +25,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.admin.account.UpdateAccountCmd; @@ -50,7 +49,6 @@ import com.cloud.utils.db.DbProperties; @Component @Local(value = {RegionManager.class}) public class RegionManagerImpl extends ManagerBase implements RegionManager, Manager { - public static final Logger s_logger = Logger.getLogger(RegionManagerImpl.class); @Inject RegionDao _regionDao; diff --git a/server/src/org/apache/cloudstack/region/RegionServiceImpl.java b/server/src/org/apache/cloudstack/region/RegionServiceImpl.java index 98cf5005f89..03eb217014f 100644 --- a/server/src/org/apache/cloudstack/region/RegionServiceImpl.java +++ b/server/src/org/apache/cloudstack/region/RegionServiceImpl.java @@ -23,7 +23,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.admin.account.DeleteAccountCmd; @@ -49,7 +48,6 @@ import com.cloud.utils.component.ManagerBase; @Component @Local(value = {RegionService.class}) public class RegionServiceImpl extends ManagerBase implements RegionService, Manager { - public static final Logger s_logger = Logger.getLogger(RegionServiceImpl.class); @Inject private RegionManager _regionMgr; diff --git a/server/test/com/cloud/consoleproxy/ConsoleProxyManagerTest.java b/server/test/com/cloud/consoleproxy/ConsoleProxyManagerTest.java index fd12d23a525..f9adecedb39 100644 --- a/server/test/com/cloud/consoleproxy/ConsoleProxyManagerTest.java +++ b/server/test/com/cloud/consoleproxy/ConsoleProxyManagerTest.java @@ -43,6 +43,7 @@ public class ConsoleProxyManagerTest { public void setup() throws Exception { MockitoAnnotations.initMocks(this); ReflectionTestUtils.setField(cpvmManager, "_allocProxyLock", globalLock); + ReflectionTestUtils.setField(cpvmManager, "logger", Logger.getLogger(ConsoleProxyManagerImpl.class)); Mockito.doCallRealMethod().when(cpvmManager).expandPool(Mockito.anyLong(), Mockito.anyObject()); } diff --git a/server/test/com/cloud/vpc/MockNetworkManagerImpl.java b/server/test/com/cloud/vpc/MockNetworkManagerImpl.java index 111df93b2b0..7441d5751f8 100644 --- a/server/test/com/cloud/vpc/MockNetworkManagerImpl.java +++ b/server/test/com/cloud/vpc/MockNetworkManagerImpl.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.command.user.network.ListNetworksCmd; import org.apache.cloudstack.api.command.user.network.RestartNetworkCmd; import org.apache.cloudstack.api.command.user.vm.ListNicsCmd; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.deploy.DataCenterDeployment; @@ -93,7 +92,6 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkOrches List _networkElements; private static HashMap s_providerToNetworkElementMap = new HashMap(); - private static final Logger s_logger = Logger.getLogger(MockNetworkManagerImpl.class); /* (non-Javadoc) * @see com.cloud.utils.component.Manager#start() @@ -104,7 +102,7 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkOrches Provider implementedProvider = element.getProvider(); if (implementedProvider != null) { if (s_providerToNetworkElementMap.containsKey(implementedProvider.getName())) { - s_logger.error("Cannot start MapNetworkManager: Provider <-> NetworkElement must be a one-to-one map, " + + logger.error("Cannot start MapNetworkManager: Provider <-> NetworkElement must be a one-to-one map, " + "multiple NetworkElements found for Provider: " + implementedProvider.getName()); return false; } diff --git a/server/test/com/cloud/vpc/dao/MockNetworkOfferingDaoImpl.java b/server/test/com/cloud/vpc/dao/MockNetworkOfferingDaoImpl.java index 7ea0a80167d..07b067d6525 100644 --- a/server/test/com/cloud/vpc/dao/MockNetworkOfferingDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockNetworkOfferingDaoImpl.java @@ -21,7 +21,6 @@ import java.util.List; import javax.ejb.Local; -import org.apache.log4j.Logger; import com.cloud.network.Network; import com.cloud.network.Network.GuestType; @@ -37,7 +36,6 @@ import com.cloud.utils.db.DB; @Local(value = NetworkOfferingDao.class) @DB() public class MockNetworkOfferingDaoImpl extends NetworkOfferingDaoImpl implements NetworkOfferingDao { - private static final Logger s_logger = Logger.getLogger(MockNetworkOfferingDaoImpl.class); /* (non-Javadoc) * @see com.cloud.offerings.dao.NetworkOfferingDao#findByUniqueName(java.lang.String) @@ -144,10 +142,10 @@ public class MockNetworkOfferingDaoImpl extends NetworkOfferingDaoImpl implement f.setAccessible(true); f.setLong(voToReturn, id); } catch (NoSuchFieldException ex) { - s_logger.warn(ex); + logger.warn(ex); return null; } catch (IllegalAccessException ex) { - s_logger.warn(ex); + logger.warn(ex); return null; } diff --git a/server/test/com/cloud/vpc/dao/MockVpcDaoImpl.java b/server/test/com/cloud/vpc/dao/MockVpcDaoImpl.java index a436a92a382..3220979f7de 100644 --- a/server/test/com/cloud/vpc/dao/MockVpcDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockVpcDaoImpl.java @@ -22,7 +22,6 @@ import java.util.Map; import javax.ejb.Local; -import org.apache.log4j.Logger; import com.cloud.network.vpc.Vpc; import com.cloud.network.vpc.Vpc.State; @@ -34,7 +33,6 @@ import com.cloud.utils.db.GenericDaoBase; @Local(value = VpcDao.class) @DB() public class MockVpcDaoImpl extends GenericDaoBase implements VpcDao { - private static final Logger s_logger = Logger.getLogger(MockNetworkOfferingDaoImpl.class); /* (non-Javadoc) * @see com.cloud.network.vpc.Dao.VpcDao#getVpcCountByOfferingId(long) @@ -117,10 +115,10 @@ public class MockVpcDaoImpl extends GenericDaoBase implements VpcDa f.setAccessible(true); f.setLong(voToReturn, id); } catch (NoSuchFieldException ex) { - s_logger.warn(ex); + logger.warn(ex); return null; } catch (IllegalAccessException ex) { - s_logger.warn(ex); + logger.warn(ex); return null; } diff --git a/services/secondary-storage/controller/src/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java index dd81809081a..af9f3c7da78 100644 --- a/services/secondary-storage/controller/src/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java @@ -44,7 +44,6 @@ import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.utils.identity.ManagementServerNode; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -164,7 +163,6 @@ import com.cloud.vm.dao.VMInstanceDao; @Local(value = {SecondaryStorageVmManager.class}) public class SecondaryStorageManagerImpl extends ManagerBase implements SecondaryStorageVmManager, VirtualMachineGuru, SystemVmLoadScanHandler, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(SecondaryStorageManagerImpl.class); private static final int DEFAULT_CAPACITY_SCAN_INTERVAL = 30000; // 30 // seconds @@ -262,16 +260,16 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar _itMgr.advanceStart(secStorageVm.getUuid(), null, null); return _secStorageVmDao.findById(secStorageVm.getId()); } catch (StorageUnavailableException e) { - s_logger.warn("Exception while trying to start secondary storage vm", e); + logger.warn("Exception while trying to start secondary storage vm", e); return null; } catch (InsufficientCapacityException e) { - s_logger.warn("Exception while trying to start secondary storage vm", e); + logger.warn("Exception while trying to start secondary storage vm", e); return null; } catch (ResourceUnavailableException e) { - s_logger.warn("Exception while trying to start secondary storage vm", e); + logger.warn("Exception while trying to start secondary storage vm", e); return null; } catch (Exception e) { - s_logger.warn("Exception while trying to start secondary storage vm", e); + logger.warn("Exception while trying to start secondary storage vm", e); return null; } } @@ -291,7 +289,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar SecondaryStorageVmVO secStorageVm = _secStorageVmDao.findByInstanceName(cssHost.getName()); if (secStorageVm == null) { - s_logger.warn("secondary storage VM " + cssHost.getName() + " doesn't exist"); + logger.warn("secondary storage VM " + cssHost.getName() + " doesn't exist"); return false; } @@ -322,12 +320,12 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar svo.setParent(an.get_dir()); _imageStoreDao.update(ssStore.getId(), svo); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully programmed secondary storage " + ssStore.getName() + " in secondary storage VM " + secStorageVm.getInstanceName()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully programmed secondary storage " + ssStore.getName() + " in secondary storage VM " + secStorageVm.getInstanceName()); } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully programmed secondary storage " + ssStore.getName() + " in secondary storage VM " + secStorageVm.getInstanceName()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully programmed secondary storage " + ssStore.getName() + " in secondary storage VM " + secStorageVm.getInstanceName()); } return false; } @@ -342,12 +340,12 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar HostVO host = _resourceMgr.findHostByName(ssVm.getInstanceName()); Answer answer = _agentMgr.easySend(host.getId(), setupCmd); if (answer != null && answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully programmed secondary storage " + host.getName() + " in secondary storage VM " + ssVm.getInstanceName()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully programmed secondary storage " + host.getName() + " in secondary storage VM " + ssVm.getInstanceName()); } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully programmed secondary storage " + host.getName() + " in secondary storage VM " + ssVm.getInstanceName()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully programmed secondary storage " + host.getName() + " in secondary storage VM " + ssVm.getInstanceName()); } return false; } @@ -365,7 +363,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar } SecondaryStorageVmVO secStorageVm = _secStorageVmDao.findByInstanceName(ssAHost.getName()); if (secStorageVm == null) { - s_logger.warn("secondary storage VM " + ssAHost.getName() + " doesn't exist"); + logger.warn("secondary storage VM " + ssAHost.getName() + " doesn't exist"); return false; } @@ -385,13 +383,13 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar setupCmd.setCopyUserName(TemplateConstants.DEFAULT_HTTP_AUTH_USER); Answer answer = _agentMgr.easySend(ssAHostId, setupCmd); if (answer != null && answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully programmed http auth into " + secStorageVm.getHostName()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully programmed http auth into " + secStorageVm.getHostName()); } return true; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("failed to program http auth into secondary storage vm : " + secStorageVm.getHostName()); + if (logger.isDebugEnabled()) { + logger.debug("failed to program http auth into secondary storage vm : " + secStorageVm.getHostName()); } return false; } @@ -411,7 +409,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar SecondaryStorageVmVO thisSecStorageVm = _secStorageVmDao.findByInstanceName(ssAHost.getName()); if (thisSecStorageVm == null) { - s_logger.warn("secondary storage VM " + ssAHost.getName() + " doesn't exist"); + logger.warn("secondary storage VM " + ssAHost.getName() + " doesn't exist"); return false; } @@ -429,12 +427,12 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar } Answer answer = _agentMgr.easySend(ssvm.getId(), thiscpc); if (answer != null && answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully programmed firewall rules into SSVM " + ssvm.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully programmed firewall rules into SSVM " + ssvm.getName()); } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("failed to program firewall rules into secondary storage vm : " + ssvm.getName()); + if (logger.isDebugEnabled()) { + logger.debug("failed to program firewall rules into secondary storage vm : " + ssvm.getName()); } return false; } @@ -450,12 +448,12 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar Answer answer = _agentMgr.easySend(ssAHostId, allSSVMIpList); if (answer != null && answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully programmed firewall rules into " + thisSecStorageVm.getHostName()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully programmed firewall rules into " + thisSecStorageVm.getHostName()); } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("failed to program firewall rules into secondary storage vm : " + thisSecStorageVm.getHostName()); + if (logger.isDebugEnabled()) { + logger.debug("failed to program firewall rules into secondary storage vm : " + thisSecStorageVm.getHostName()); } return false; } @@ -477,21 +475,21 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar public SecondaryStorageVmVO startNew(long dataCenterId, SecondaryStorageVm.Role role) { if (!isSecondaryStorageVmRequired(dataCenterId)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Secondary storage vm not required in zone " + dataCenterId + " acc. to zone config"); + if (logger.isDebugEnabled()) { + logger.debug("Secondary storage vm not required in zone " + dataCenterId + " acc. to zone config"); } return null; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Assign secondary storage vm from a newly started instance for request from data center : " + dataCenterId); + if (logger.isDebugEnabled()) { + logger.debug("Assign secondary storage vm from a newly started instance for request from data center : " + dataCenterId); } Map context = createSecStorageVmInstance(dataCenterId, role); long secStorageVmId = (Long)context.get("secStorageVmId"); if (secStorageVmId == 0) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Creating secondary storage vm instance failed, data center id : " + dataCenterId); + if (logger.isTraceEnabled()) { + logger.trace("Creating secondary storage vm instance failed, data center id : " + dataCenterId); } return null; @@ -505,8 +503,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar new SecStorageVmAlertEventArgs(SecStorageVmAlertEventArgs.SSVM_CREATED, dataCenterId, secStorageVmId, secStorageVm, null)); return secStorageVm; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to allocate secondary storage vm storage, remove the secondary storage vm record from DB, secondary storage vm id: " + + if (logger.isDebugEnabled()) { + logger.debug("Unable to allocate secondary storage vm storage, remove the secondary storage vm record from DB, secondary storage vm id: " + secStorageVmId); } SubscriptionMgr.getInstance().notifySubscribers(ALERT_SUBJECT, this, @@ -519,7 +517,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar DataStore secStore = _dataStoreMgr.getImageStore(dataCenterId); if (secStore == null) { String msg = "No secondary storage available in zone " + dataCenterId + ", cannot create secondary storage vm"; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -565,7 +563,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar networks.put(_networkMgr.setupNetwork(systemAcct, offering, plan, null, null, false).get(0), new ArrayList()); } } catch (ConcurrentOperationException e) { - s_logger.info("Unable to setup due to concurrent operation. " + e); + logger.info("Unable to setup due to concurrent operation. " + e); return new HashMap(); } @@ -589,7 +587,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar _itMgr.allocate(name, template, serviceOffering, networks, plan, null); secStorageVm = _secStorageVmDao.findById(secStorageVm.getId()); } catch (InsufficientCapacityException e) { - s_logger.warn("InsufficientCapacity", e); + logger.warn("InsufficientCapacity", e); throw new CloudRuntimeException("Insufficient capacity exception", e); } @@ -614,18 +612,18 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar public SecondaryStorageVmVO assignSecStorageVmFromRunningPool(long dataCenterId, SecondaryStorageVm.Role role) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Assign secondary storage vm from running pool for request from data center : " + dataCenterId); + if (logger.isTraceEnabled()) { + logger.trace("Assign secondary storage vm from running pool for request from data center : " + dataCenterId); } SecondaryStorageVmAllocator allocator = getCurrentAllocator(); assert (allocator != null); List runningList = _secStorageVmDao.getSecStorageVmListInStates(role, dataCenterId, State.Running); if (runningList != null && runningList.size() > 0) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Running secondary storage vm pool size : " + runningList.size()); + if (logger.isTraceEnabled()) { + logger.trace("Running secondary storage vm pool size : " + runningList.size()); for (SecondaryStorageVmVO secStorageVm : runningList) { - s_logger.trace("Running secStorageVm instance : " + secStorageVm.getHostName()); + logger.trace("Running secStorageVm instance : " + secStorageVm.getHostName()); } } @@ -633,8 +631,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar return allocator.allocSecondaryStorageVm(runningList, loadInfo, dataCenterId); } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Empty running secStorageVm pool for now in data center : " + dataCenterId); + if (logger.isTraceEnabled()) { + logger.trace("Empty running secStorageVm pool for now in data center : " + dataCenterId); } } return null; @@ -650,13 +648,13 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar } private void allocCapacity(long dataCenterId, SecondaryStorageVm.Role role) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Allocate secondary storage vm standby capacity for data center : " + dataCenterId); + if (logger.isTraceEnabled()) { + logger.trace("Allocate secondary storage vm standby capacity for data center : " + dataCenterId); } if (!isSecondaryStorageVmRequired(dataCenterId)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Secondary storage vm not required in zone " + dataCenterId + " according to zone config"); + if (logger.isDebugEnabled()) { + logger.debug("Secondary storage vm not required in zone " + dataCenterId + " according to zone config"); } return; } @@ -666,8 +664,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar boolean secStorageVmFromStoppedPool = false; secStorageVm = assignSecStorageVmFromStoppedPool(dataCenterId, role); if (secStorageVm == null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("No stopped secondary storage vm is available, need to allocate a new secondary storage vm"); + if (logger.isInfoEnabled()) { + logger.info("No stopped secondary storage vm is available, need to allocate a new secondary storage vm"); } if (_allocLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_SYNC)) { @@ -680,14 +678,14 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar _allocLock.unlock(); } } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Unable to acquire synchronization lock for secondary storage vm allocation, wait for next scan"); + if (logger.isInfoEnabled()) { + logger.info("Unable to acquire synchronization lock for secondary storage vm allocation, wait for next scan"); } return; } } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Found a stopped secondary storage vm, starting it. Vm id : " + secStorageVm.getId()); + if (logger.isInfoEnabled()) { + logger.info("Found a stopped secondary storage vm, starting it. Vm id : " + secStorageVm.getId()); } secStorageVmFromStoppedPool = true; } @@ -703,8 +701,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar secStorageVmLock.unlock(); } } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Unable to acquire synchronization lock for starting secondary storage vm id : " + secStorageVm.getId()); + if (logger.isInfoEnabled()) { + logger.info("Unable to acquire synchronization lock for starting secondary storage vm id : " + secStorageVm.getId()); } return; } @@ -713,8 +711,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar } if (secStorageVm == null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Unable to start secondary storage vm for standby capacity, vm id : " + secStorageVmId + ", will recycle it and start a new one"); + if (logger.isInfoEnabled()) { + logger.info("Unable to start secondary storage vm for standby capacity, vm id : " + secStorageVmId + ", will recycle it and start a new one"); } if (secStorageVmFromStoppedPool) { @@ -723,8 +721,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar } else { SubscriptionMgr.getInstance().notifySubscribers(ALERT_SUBJECT, this, new SecStorageVmAlertEventArgs(SecStorageVmAlertEventArgs.SSVM_UP, dataCenterId, secStorageVmId, secStorageVm, null)); - if (s_logger.isInfoEnabled()) { - s_logger.info("Secondary storage vm " + secStorageVm.getHostName() + " is started"); + if (logger.isInfoEnabled()) { + logger.info("Secondary storage vm " + secStorageVm.getHostName() + " is started"); } } } @@ -745,22 +743,22 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar if (zoneHostInfo != null && (zoneHostInfo.getFlags() & RunningHostInfoAgregator.ZoneHostInfo.ROUTING_HOST_MASK) != 0) { VMTemplateVO template = _templateDao.findSystemVMReadyTemplate(dataCenterId, HypervisorType.Any); if (template == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("System vm template is not ready at data center " + dataCenterId + ", wait until it is ready to launch secondary storage vm"); + if (logger.isDebugEnabled()) { + logger.debug("System vm template is not ready at data center " + dataCenterId + ", wait until it is ready to launch secondary storage vm"); } return false; } List stores = _dataStoreMgr.getImageStoresByScope(new ZoneScope(dataCenterId)); if (stores.size() < 1) { - s_logger.debug("No image store added in zone " + dataCenterId + ", wait until it is ready to launch secondary storage vm"); + logger.debug("No image store added in zone " + dataCenterId + ", wait until it is ready to launch secondary storage vm"); return false; } DataStore store = templateMgr.getImageStore(dataCenterId, template.getId()); if (store == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("No secondary storage available in zone " + dataCenterId + ", wait until it is ready to launch secondary storage vm"); + if (logger.isDebugEnabled()) { + logger.debug("No secondary storage available in zone " + dataCenterId + ", wait until it is ready to launch secondary storage vm"); } return false; } @@ -774,8 +772,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar if (l != null && l.size() > 0 && l.get(0).second().intValue() > 0) { return true; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Primary storage is not ready, wait until it is ready to launch secondary storage vm. dcId: " + dataCenterId + + if (logger.isDebugEnabled()) { + logger.debug("Primary storage is not ready, wait until it is ready to launch secondary storage vm. dcId: " + dataCenterId + ", " + ConfigurationManagerImpl.SystemVMUseLocalStorage.key() + ": " + useLocalStorage + ". " + "If you want to use local storage to start SSVM, need to set " + ConfigurationManagerImpl.SystemVMUseLocalStorage.key() + " to true"); } @@ -801,8 +799,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar @Override public boolean start() { - if (s_logger.isInfoEnabled()) { - s_logger.info("Start secondary storage vm manager"); + if (logger.isInfoEnabled()) { + logger.info("Start secondary storage vm manager"); } return true; @@ -818,8 +816,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar @Override public boolean configure(String name, Map params) throws ConfigurationException { - if (s_logger.isInfoEnabled()) { - s_logger.info("Start configuring secondary storage vm manager : " + name); + if (logger.isInfoEnabled()) { + logger.info("Start configuring secondary storage vm manager : " + name); } Map configs = _configDao.getConfiguration("management-server", params); @@ -839,7 +837,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar //default to HTTP in case of missing domain String ssvmUrlDomain = _configDao.getValue("secstorage.ssl.cert.domain"); if(_useSSlCopy && (ssvmUrlDomain == null || ssvmUrlDomain.isEmpty())){ - s_logger.warn("Empty secondary storage url domain, explicitly disabling SSL"); + logger.warn("Empty secondary storage url domain, explicitly disabling SSL"); _useSSlCopy = false; } @@ -871,11 +869,11 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar try { _serviceOffering = _offeringDao.findById(Long.parseLong(ssvmSrvcOffIdStr)); } catch (NumberFormatException ex) { - s_logger.debug("The system service offering specified by global config is not id, but uuid=" + ssvmSrvcOffIdStr + " for secondary storage vm"); + logger.debug("The system service offering specified by global config is not id, but uuid=" + ssvmSrvcOffIdStr + " for secondary storage vm"); } } if (_serviceOffering == null) { - s_logger.warn("Can't find system service offering specified by global config, uuid=" + ssvmSrvcOffIdStr + " for secondary storage vm"); + logger.warn("Can't find system service offering specified by global config, uuid=" + ssvmSrvcOffIdStr + " for secondary storage vm"); } } @@ -888,7 +886,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar // this can sometimes happen, if DB is manually or programmatically manipulated if (offerings == null || offerings.size() < 2) { String msg = "Data integrity problem : System Offering For Secondary Storage VM has been removed?"; - s_logger.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } } @@ -917,13 +915,13 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar errMsg = e.toString(); } finally { if (!valid) { - s_logger.debug("ssvm http proxy " + _httpProxy + " is invalid: " + errMsg); + logger.debug("ssvm http proxy " + _httpProxy + " is invalid: " + errMsg); throw new ConfigurationException("ssvm http proxy " + _httpProxy + "is invalid: " + errMsg); } } } - if (s_logger.isInfoEnabled()) { - s_logger.info("Secondary storage vm Manager is configured."); + if (logger.isInfoEnabled()) { + logger.info("Secondary storage vm Manager is configured."); } _resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this); return true; @@ -934,8 +932,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar SecondaryStorageVmVO secStorageVm = _secStorageVmDao.findById(secStorageVmId); if (secStorageVm == null) { String msg = "Stopping secondary storage vm failed: secondary storage vm " + secStorageVmId + " no longer exists"; - if (s_logger.isDebugEnabled()) { - s_logger.debug(msg); + if (logger.isDebugEnabled()) { + logger.debug(msg); } return false; } @@ -952,7 +950,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar } } else { String msg = "Unable to acquire secondary storage vm lock : " + secStorageVm.toString(); - s_logger.debug(msg); + logger.debug(msg); return false; } } finally { @@ -963,8 +961,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar // vm was already stopped, return true return true; } catch (ResourceUnavailableException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Stopping secondary storage vm " + secStorageVm.getHostName() + " faled : exception " + e.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Stopping secondary storage vm " + secStorageVm.getHostName() + " faled : exception " + e.toString()); } return false; } @@ -983,8 +981,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar final Answer answer = _agentMgr.easySend(secStorageVm.getHostId(), cmd); if (answer != null && answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully reboot secondary storage vm " + secStorageVm.getHostName()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully reboot secondary storage vm " + secStorageVm.getHostName()); } SubscriptionMgr.getInstance().notifySubscribers(ALERT_SUBJECT, this, @@ -993,8 +991,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar return true; } else { String msg = "Rebooting Secondary Storage VM failed - " + secStorageVm.getHostName(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(msg); + if (logger.isDebugEnabled()) { + logger.debug(msg); } return false; } @@ -1012,7 +1010,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar _secStorageVmDao.remove(ssvm.getId()); HostVO host = _hostDao.findByTypeNameAndZoneId(ssvm.getDataCenterId(), ssvm.getHostName(), Host.Type.SecondaryStorageVM); if (host != null) { - s_logger.debug("Removing host entry for ssvm id=" + vmId); + logger.debug("Removing host entry for ssvm id=" + vmId); _hostDao.remove(host.getId()); //Expire the download urls in the entire zone for templates and volumes. _tmplStoreDao.expireDnldUrlsForZone(host.getDataCenterId()); @@ -1021,7 +1019,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar } return false; } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to expunge " + ssvm, e); + logger.warn("Unable to expunge " + ssvm, e); return false; } } @@ -1064,7 +1062,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar buf.append(" workers=").append(_configDao.getValue("workers")); if (_configDao.isPremium()) { - s_logger.debug("VmWare hypervisor configured, telling the ssvm to load the PremiumSecondaryStorageResource"); + logger.debug("VmWare hypervisor configured, telling the ssvm to load the PremiumSecondaryStorageResource"); buf.append(" resource=com.cloud.storage.resource.PremiumSecondaryStorageResource"); } else { buf.append(" resource=org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource"); @@ -1129,8 +1127,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar } String bootArgs = buf.toString(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Boot Args for " + profile + ": " + bootArgs); + if (logger.isDebugEnabled()) { + logger.debug("Boot Args for " + profile + ": " + bootArgs); } return true; @@ -1174,7 +1172,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar if (controlNic == null) { if (managementNic == null) { - s_logger.error("Management network doesn't exist for the secondaryStorageVm " + profile.getVirtualMachine()); + logger.error("Management network doesn't exist for the secondaryStorageVm " + profile.getVirtualMachine()); return false; } controlNic = managementNic; @@ -1195,7 +1193,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar public boolean finalizeStart(VirtualMachineProfile profile, long hostId, Commands cmds, ReservationContext context) { CheckSshAnswer answer = (CheckSshAnswer)cmds.getAnswer("checkSsh"); if (!answer.getResult()) { - s_logger.warn("Unable to ssh to the VM: " + answer.getDetails()); + logger.warn("Unable to ssh to the VM: " + answer.getDetails()); return false; } @@ -1210,7 +1208,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar _secStorageVmDao.update(secVm.getId(), secVm); } } catch (Exception ex) { - s_logger.warn("Failed to get system ip and enable static nat for the vm " + profile.getVirtualMachine() + " due to exception ", ex); + logger.warn("Failed to get system ip and enable static nat for the vm " + profile.getVirtualMachine() + " due to exception ", ex); return false; } @@ -1226,7 +1224,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar try { _rulesMgr.disableStaticNat(ip.getId(), ctx.getCallingAccount(), ctx.getCallingUserId(), true); } catch (Exception ex) { - s_logger.warn("Failed to disable static nat and release system ip " + ip + " as a part of vm " + profile.getVirtualMachine() + " stop due to exception ", + logger.warn("Failed to disable static nat and release system ip " + ip + " as a part of vm " + profile.getVirtualMachine() + " stop due to exception ", ex); } } @@ -1276,14 +1274,14 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar long dataCenterId = pool.longValue(); if (!isZoneReady(_zoneHostInfoMap, dataCenterId)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Zone " + dataCenterId + " is not ready to launch secondary storage VM yet"); + if (logger.isDebugEnabled()) { + logger.debug("Zone " + dataCenterId + " is not ready to launch secondary storage VM yet"); } return false; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Zone " + dataCenterId + " is ready to launch secondary storage VM"); + if (logger.isDebugEnabled()) { + logger.debug("Zone " + dataCenterId + " is ready to launch secondary storage VM"); } return true; } @@ -1299,7 +1297,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar List ssStores = _dataStoreMgr.getImageStoresByScope(new ZoneScope(dataCenterId)); int storeSize = (ssStores == null) ? 0 : ssStores.size(); if (storeSize > vmSize) { - s_logger.info("No secondary storage vms found in datacenter id=" + dataCenterId + ", starting a new one"); + logger.info("No secondary storage vms found in datacenter id=" + dataCenterId + ", starting a new one"); return new Pair(AfterScanAction.expand, SecondaryStorageVm.Role.templateProcessor); } diff --git a/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/SecondaryStorageDiscoverer.java b/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/SecondaryStorageDiscoverer.java index ee5064751be..ed6066e5d13 100644 --- a/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/SecondaryStorageDiscoverer.java +++ b/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/SecondaryStorageDiscoverer.java @@ -30,7 +30,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.host.HostVO; @@ -55,7 +54,6 @@ import com.cloud.utils.script.Script; */ @Local(value = Discoverer.class) public class SecondaryStorageDiscoverer extends DiscovererBase implements Discoverer { - private static final Logger s_logger = Logger.getLogger(SecondaryStorageDiscoverer.class); long _timeout = 2 * 60 * 1000; // 2 minutes String _mountParent; @@ -79,7 +77,7 @@ public class SecondaryStorageDiscoverer extends DiscovererBase implements Discov find(long dcId, Long podId, Long clusterId, URI uri, String username, String password, List hostTags) { if (!uri.getScheme().equalsIgnoreCase("nfs") && !uri.getScheme().equalsIgnoreCase("cifs") && !uri.getScheme().equalsIgnoreCase("file") && !uri.getScheme().equalsIgnoreCase("iso") && !uri.getScheme().equalsIgnoreCase("dummy")) { - s_logger.debug("It's not NFS or file or ISO, so not a secondary storage server: " + uri.toString()); + logger.debug("It's not NFS or file or ISO, so not a secondary storage server: " + uri.toString()); return null; } @@ -101,7 +99,7 @@ public class SecondaryStorageDiscoverer extends DiscovererBase implements Discov } String mountStr = NfsUtils.uri2Mount(uri); - Script script = new Script(true, "mount", _timeout, s_logger); + Script script = new Script(true, "mount", _timeout, logger); String mntPoint = null; File file = null; do { @@ -110,19 +108,19 @@ public class SecondaryStorageDiscoverer extends DiscovererBase implements Discov } while (file.exists()); if (!file.mkdirs()) { - s_logger.warn("Unable to make directory: " + mntPoint); + logger.warn("Unable to make directory: " + mntPoint); return null; } script.add(mountStr, mntPoint); String result = script.execute(); if (result != null && !result.contains("already mounted")) { - s_logger.warn("Unable to mount " + uri.toString() + " due to " + result); + logger.warn("Unable to mount " + uri.toString() + " due to " + result); file.delete(); return null; } - script = new Script(true, "umount", 0, s_logger); + script = new Script(true, "umount", 0, logger); script.add(mntPoint); script.execute(); @@ -140,25 +138,25 @@ public class SecondaryStorageDiscoverer extends DiscovererBase implements Discov constructor.setAccessible(true); storage = (NfsSecondaryStorageResource)constructor.newInstance(); } catch (final ClassNotFoundException e) { - s_logger.error("Unable to load com.cloud.storage.resource.PremiumSecondaryStorageResource due to ClassNotFoundException"); + logger.error("Unable to load com.cloud.storage.resource.PremiumSecondaryStorageResource due to ClassNotFoundException"); return null; } catch (final SecurityException e) { - s_logger.error("Unable to load com.cloud.storage.resource.PremiumSecondaryStorageResource due to SecurityException"); + logger.error("Unable to load com.cloud.storage.resource.PremiumSecondaryStorageResource due to SecurityException"); return null; } catch (final NoSuchMethodException e) { - s_logger.error("Unable to load com.cloud.storage.resource.PremiumSecondaryStorageResource due to NoSuchMethodException"); + logger.error("Unable to load com.cloud.storage.resource.PremiumSecondaryStorageResource due to NoSuchMethodException"); return null; } catch (final IllegalArgumentException e) { - s_logger.error("Unable to load com.cloud.storage.resource.PremiumSecondaryStorageResource due to IllegalArgumentException"); + logger.error("Unable to load com.cloud.storage.resource.PremiumSecondaryStorageResource due to IllegalArgumentException"); return null; } catch (final InstantiationException e) { - s_logger.error("Unable to load com.cloud.storage.resource.PremiumSecondaryStorageResource due to InstantiationException"); + logger.error("Unable to load com.cloud.storage.resource.PremiumSecondaryStorageResource due to InstantiationException"); return null; } catch (final IllegalAccessException e) { - s_logger.error("Unable to load com.cloud.storage.resource.PremiumSecondaryStorageResource due to IllegalAccessException"); + logger.error("Unable to load com.cloud.storage.resource.PremiumSecondaryStorageResource due to IllegalAccessException"); return null; } catch (final InvocationTargetException e) { - s_logger.error("Unable to load com.cloud.storage.resource.PremiumSecondaryStorageResource due to InvocationTargetException"); + logger.error("Unable to load com.cloud.storage.resource.PremiumSecondaryStorageResource due to InvocationTargetException"); return null; } } else { @@ -183,7 +181,7 @@ public class SecondaryStorageDiscoverer extends DiscovererBase implements Discov try { storage.configure("Storage", params); } catch (ConfigurationException e) { - s_logger.warn("Unable to configure the storage ", e); + logger.warn("Unable to configure the storage ", e); return null; } srs.put(storage, details); @@ -214,7 +212,7 @@ public class SecondaryStorageDiscoverer extends DiscovererBase implements Discov try { storage.configure("Storage", params); } catch (ConfigurationException e) { - s_logger.warn("Unable to configure the storage ", e); + logger.warn("Unable to configure the storage ", e); return null; } srs.put(storage, details); @@ -244,7 +242,7 @@ public class SecondaryStorageDiscoverer extends DiscovererBase implements Discov try { storage.configure("Storage", params); } catch (ConfigurationException e) { - s_logger.warn("Unable to configure the storage ", e); + logger.warn("Unable to configure the storage ", e); return null; } srs.put(storage, details); diff --git a/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/DownloadManagerImpl.java b/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/DownloadManagerImpl.java index 25c08871823..447c4c10110 100644 --- a/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/DownloadManagerImpl.java +++ b/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/DownloadManagerImpl.java @@ -41,7 +41,6 @@ import java.util.concurrent.Executors; import javax.ejb.Local; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.storage.command.DownloadCommand; import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; @@ -217,7 +216,6 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager } } - public static final Logger s_logger = Logger.getLogger(DownloadManagerImpl.class); private String _templateDir; private String _volumeDir; private String createTmpltScr; @@ -250,12 +248,12 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager public void setDownloadStatus(String jobId, Status status) { DownloadJob dj = jobs.get(jobId); if (dj == null) { - s_logger.warn("setDownloadStatus for jobId: " + jobId + ", status=" + status + " no job found"); + logger.warn("setDownloadStatus for jobId: " + jobId + ", status=" + status + " no job found"); return; } TemplateDownloader td = dj.getTemplateDownloader(); - s_logger.info("Download Completion for jobId: " + jobId + ", status=" + status); - s_logger.info("local: " + td.getDownloadLocalPath() + ", bytes=" + td.getDownloadedBytes() + ", error=" + td.getDownloadError() + ", pct=" + + logger.info("Download Completion for jobId: " + jobId + ", status=" + status); + logger.info("local: " + td.getDownloadLocalPath() + ", bytes=" + td.getDownloadedBytes() + ", error=" + td.getDownloadError() + ", pct=" + td.getDownloadPercent()); switch (status) { @@ -268,7 +266,7 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager case UNKNOWN: return; case IN_PROGRESS: - s_logger.info("Resuming jobId: " + jobId + ", status=" + status); + logger.info("Resuming jobId: " + jobId + ", status=" + status); td.setResume(true); threadPool.execute(td); break; @@ -282,7 +280,7 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager td.setDownloadError("Download success, starting install "); String result = postDownload(jobId); if (result != null) { - s_logger.error("Failed post download script: " + result); + logger.error("Failed post download script: " + result); td.setStatus(Status.UNRECOVERABLE_ERROR); td.setDownloadError("Failed post download script: " + result); } else { @@ -357,7 +355,7 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager File originalTemplate = new File(td.getDownloadLocalPath()); String checkSum = computeCheckSum(originalTemplate); if (checkSum == null) { - s_logger.warn("Something wrong happened when try to calculate the checksum of downloaded template!"); + logger.warn("Something wrong happened when try to calculate the checksum of downloaded template!"); } dnld.setCheckSum(checkSum); @@ -366,7 +364,7 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager long timeout = (long)imgSizeGigs * installTimeoutPerGig; Script scr = null; String script = resourceType == ResourceType.TEMPLATE ? createTmpltScr : createVolScr; - scr = new Script(script, timeout, s_logger); + scr = new Script(script, timeout, logger); scr.add("-s", Integer.toString(imgSizeGigs)); scr.add("-S", Long.toString(td.getMaxTemplateSizeInBytes())); if (dnld.getDescription() != null && dnld.getDescription().length() > 1) { @@ -423,7 +421,7 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager try { loc.create(dnld.getId(), true, dnld.getTmpltName()); } catch (IOException e) { - s_logger.warn("Something is wrong with template location " + resourcePath, e); + logger.warn("Something is wrong with template location " + resourcePath, e); loc.purge(); return "Unable to download due to " + e.getMessage(); } @@ -436,7 +434,7 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager try { info = processor.process(resourcePath, null, templateName); } catch (InternalErrorException e) { - s_logger.error("Template process exception ", e); + logger.error("Template process exception ", e); return e.toString(); } if (info != null) { @@ -448,7 +446,7 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager } if (!loc.save()) { - s_logger.warn("Cleaning up because we're unable to save the formats"); + logger.warn("Cleaning up because we're unable to save the formats"); loc.purge(); } @@ -507,7 +505,7 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager try { if (!_storage.mkdirs(tmpDir)) { - s_logger.warn("Unable to create " + tmpDir); + logger.warn("Unable to create " + tmpDir); return "Unable to create " + tmpDir; } // TO DO - define constant for volume properties. @@ -519,7 +517,7 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager } if (!file.createNewFile()) { - s_logger.warn("Unable to create new file: " + file.getAbsolutePath()); + logger.warn("Unable to create new file: " + file.getAbsolutePath()); return "Unable to create new file: " + file.getAbsolutePath(); } @@ -559,7 +557,7 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager return jobId; } catch (IOException e) { - s_logger.warn("Unable to download to " + tmpDir, e); + logger.warn("Unable to download to " + tmpDir, e); return null; } } @@ -753,24 +751,24 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager private List listVolumes(String rootdir) { List result = new ArrayList(); - Script script = new Script(listVolScr, s_logger); + Script script = new Script(listVolScr, logger); script.add("-r", rootdir); ZfsPathParser zpp = new ZfsPathParser(rootdir); script.execute(zpp); result.addAll(zpp.getPaths()); - s_logger.info("found " + zpp.getPaths().size() + " volumes" + zpp.getPaths()); + logger.info("found " + zpp.getPaths().size() + " volumes" + zpp.getPaths()); return result; } private List listTemplates(String rootdir) { List result = new ArrayList(); - Script script = new Script(listTmpltScr, s_logger); + Script script = new Script(listTmpltScr, logger); script.add("-r", rootdir); ZfsPathParser zpp = new ZfsPathParser(rootdir); script.execute(zpp); result.addAll(zpp.getPaths()); - s_logger.info("found " + zpp.getPaths().size() + " templates" + zpp.getPaths()); + logger.info("found " + zpp.getPaths().size() + " templates" + zpp.getPaths()); return result; } @@ -789,13 +787,13 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager TemplateLocation loc = new TemplateLocation(_storage, path); try { if (!loc.load()) { - s_logger.warn("Post download installation was not completed for " + path); + logger.warn("Post download installation was not completed for " + path); // loc.purge(); _storage.cleanup(path, templateDir); continue; } } catch (IOException e) { - s_logger.warn("Unable to load template location " + path, e); + logger.warn("Unable to load template location " + path, e); continue; } @@ -810,12 +808,12 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager loc.updateVirtualSize(vSize); loc.save(); } catch (Exception e) { - s_logger.error("Unable to get the virtual size of the template: " + tInfo.getInstallPath() + " due to " + e.getMessage()); + logger.error("Unable to get the virtual size of the template: " + tInfo.getInstallPath() + " due to " + e.getMessage()); } } result.put(tInfo.getTemplateName(), tInfo); - s_logger.debug("Added template name: " + tInfo.getTemplateName() + ", path: " + tmplt); + logger.debug("Added template name: " + tInfo.getTemplateName() + ", path: " + tmplt); } /* for (String tmplt : isoTmplts) { @@ -824,7 +822,7 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager String tmpltName = tmp[tmp.length - 2]; tmplt = tmplt.substring(tmplt.lastIndexOf("iso/")); TemplateInfo tInfo = new TemplateInfo(tmpltName, tmplt, false); - s_logger.debug("Added iso template name: " + tmpltName + ", path: " + tmplt); + logger.debug("Added iso template name: " + tmpltName + ", path: " + tmplt); result.put(tmpltName, tInfo); } */ @@ -846,13 +844,13 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager TemplateLocation loc = new TemplateLocation(_storage, path); try { if (!loc.load()) { - s_logger.warn("Post download installation was not completed for " + path); + logger.warn("Post download installation was not completed for " + path); // loc.purge(); _storage.cleanup(path, volumeDir); continue; } } catch (IOException e) { - s_logger.warn("Unable to load volume location " + path, e); + logger.warn("Unable to load volume location " + path, e); continue; } @@ -867,12 +865,12 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager loc.updateVirtualSize(vSize); loc.save(); } catch (Exception e) { - s_logger.error("Unable to get the virtual size of the volume: " + vInfo.getInstallPath() + " due to " + e.getMessage()); + logger.error("Unable to get the virtual size of the volume: " + vInfo.getInstallPath() + " due to " + e.getMessage()); } } result.put(vInfo.getId(), vInfo); - s_logger.debug("Added volume name: " + vInfo.getTemplateName() + ", path: " + vol); + logger.debug("Added volume name: " + vInfo.getTemplateName() + ", path: " + vol); } return result; } @@ -936,7 +934,7 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager String inSystemVM = (String)params.get("secondary.storage.vm"); if (inSystemVM != null && "true".equalsIgnoreCase(inSystemVM)) { - s_logger.info("DownloadManager: starting additional services since we are inside system vm"); + logger.info("DownloadManager: starting additional services since we are inside system vm"); startAdditionalServices(); blockOutgoingOnPrivate(); } @@ -956,25 +954,25 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager if (listTmpltScr == null) { throw new ConfigurationException("Unable to find the listvmtmplt.sh"); } - s_logger.info("listvmtmplt.sh found in " + listTmpltScr); + logger.info("listvmtmplt.sh found in " + listTmpltScr); createTmpltScr = Script.findScript(scriptsDir, "createtmplt.sh"); if (createTmpltScr == null) { throw new ConfigurationException("Unable to find createtmplt.sh"); } - s_logger.info("createtmplt.sh found in " + createTmpltScr); + logger.info("createtmplt.sh found in " + createTmpltScr); listVolScr = Script.findScript(scriptsDir, "listvolume.sh"); if (listVolScr == null) { throw new ConfigurationException("Unable to find the listvolume.sh"); } - s_logger.info("listvolume.sh found in " + listVolScr); + logger.info("listvolume.sh found in " + listVolScr); createVolScr = Script.findScript(scriptsDir, "createvolume.sh"); if (createVolScr == null) { throw new ConfigurationException("Unable to find createvolume.sh"); } - s_logger.info("createvolume.sh found in " + createVolScr); + logger.info("createvolume.sh found in " + createVolScr); _processors = new HashMap(); @@ -1018,7 +1016,7 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager } private void blockOutgoingOnPrivate() { - Script command = new Script("/bin/bash", s_logger); + Script command = new Script("/bin/bash", logger); String intf = "eth1"; command.add("-c"); command.add("iptables -A OUTPUT -o " + intf + " -p tcp -m state --state NEW -m tcp --dport " + "80" + " -j REJECT;" + "iptables -A OUTPUT -o " + intf + @@ -1026,7 +1024,7 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager String result = command.execute(); if (result != null) { - s_logger.warn("Error in blocking outgoing to port 80/443 err=" + result); + logger.warn("Error in blocking outgoing to port 80/443 err=" + result); return; } } @@ -1048,41 +1046,41 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager private void startAdditionalServices() { - Script command = new Script("/bin/bash", s_logger); + Script command = new Script("/bin/bash", logger); command.add("-c"); command.add("if [ -d /etc/apache2 ] ; then service apache2 stop; else service httpd stop; fi "); String result = command.execute(); if (result != null) { - s_logger.warn("Error in stopping httpd service err=" + result); + logger.warn("Error in stopping httpd service err=" + result); } String port = Integer.toString(TemplateConstants.DEFAULT_TMPLT_COPY_PORT); String intf = TemplateConstants.DEFAULT_TMPLT_COPY_INTF; - command = new Script("/bin/bash", s_logger); + command = new Script("/bin/bash", logger); command.add("-c"); command.add("iptables -I INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + port + " -j ACCEPT;" + "iptables -I INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + "443" + " -j ACCEPT;"); result = command.execute(); if (result != null) { - s_logger.warn("Error in opening up httpd port err=" + result); + logger.warn("Error in opening up httpd port err=" + result); return; } - command = new Script("/bin/bash", s_logger); + command = new Script("/bin/bash", logger); command.add("-c"); command.add("if [ -d /etc/apache2 ] ; then service apache2 start; else service httpd start; fi "); result = command.execute(); if (result != null) { - s_logger.warn("Error in starting httpd service err=" + result); + logger.warn("Error in starting httpd service err=" + result); return; } - command = new Script("mkdir", s_logger); + command = new Script("mkdir", logger); command.add("-p"); command.add("/var/www/html/copy/template"); result = command.execute(); if (result != null) { - s_logger.warn("Error in creating directory =" + result); + logger.warn("Error in creating directory =" + result); return; } } diff --git a/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/UploadManagerImpl.java b/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/UploadManagerImpl.java index a8ed9a84d9b..ebbff09dbbf 100644 --- a/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/UploadManagerImpl.java +++ b/services/secondary-storage/server/src/org/apache/cloudstack/storage/template/UploadManagerImpl.java @@ -30,7 +30,6 @@ import java.util.concurrent.Executors; import javax.naming.ConfigurationException; import com.cloud.agent.api.Answer; -import org.apache.log4j.Logger; import org.apache.cloudstack.storage.resource.SecondaryStorageResource; @@ -93,7 +92,6 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager { } - public static final Logger s_logger = Logger.getLogger(UploadManagerImpl.class); private ExecutorService threadPool; private final Map jobs = new ConcurrentHashMap(); private String parentDir; @@ -109,13 +107,13 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager { String jobId = uuid.toString(); String completePath = parentDir + File.separator + installPathPrefix; - s_logger.debug("Starting upload from " + completePath); + logger.debug("Starting upload from " + completePath); URI uri; try { uri = new URI(url); } catch (URISyntaxException e) { - s_logger.error("URI is incorrect: " + url); + logger.error("URI is incorrect: " + url); throw new CloudRuntimeException("URI is incorrect: " + url); } TemplateUploader tu; @@ -123,11 +121,11 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager { if (uri.getScheme().equalsIgnoreCase("ftp")) { tu = new FtpTemplateUploader(completePath, url, new Completion(jobId), templateSizeInBytes); } else { - s_logger.error("Scheme is not supported " + url); + logger.error("Scheme is not supported " + url); throw new CloudRuntimeException("Scheme is not supported " + url); } } else { - s_logger.error("Unable to download from URL: " + url); + logger.error("Unable to download from URL: " + url); throw new CloudRuntimeException("Unable to download from URL: " + url); } UploadJob uj = new UploadJob(tu, jobId, id, name, format, hvm, accountId, descr, cksum, installPathPrefix); @@ -240,7 +238,7 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager { @Override public UploadAnswer handleUploadCommand(SecondaryStorageResource resource, UploadCommand cmd) { - s_logger.warn("Handling the upload " + cmd.getInstallPath() + " " + cmd.getId()); + logger.warn("Handling the upload " + cmd.getInstallPath() + " " + cmd.getId()); if (cmd instanceof UploadProgressCommand) { return handleUploadProgressCmd((UploadProgressCommand)cmd); } @@ -261,40 +259,40 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager { boolean isApacheUp = checkAndStartApache(); if (!isApacheUp) { String errorString = "Error in starting Apache server "; - s_logger.error(errorString); + logger.error(errorString); return new CreateEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE); } // Create the directory structure so that its visible under apache server root String extractDir = "/var/www/html/userdata/"; - Script command = new Script("mkdir", s_logger); + Script command = new Script("mkdir", logger); command.add("-p"); command.add(extractDir); String result = command.execute(); if (result != null) { String errorString = "Error in creating directory =" + result; - s_logger.error(errorString); + logger.error(errorString); return new CreateEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE); } // Create a random file under the directory for security reasons. String uuid = cmd.getExtractLinkUUID(); - command = new Script("touch", s_logger); + command = new Script("touch", logger); command.add(extractDir + uuid); result = command.execute(); if (result != null) { String errorString = "Error in creating file " + uuid + " ,error: " + result; - s_logger.warn(errorString); + logger.warn(errorString); return new CreateEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE); } // Create a symbolic link from the actual directory to the template location. The entity would be directly visible under /var/www/html/userdata/cmd.getInstallPath(); - command = new Script("/bin/bash", s_logger); + command = new Script("/bin/bash", logger); command.add("-c"); command.add("ln -sf /mnt/SecStorage/" + cmd.getParent() + File.separator + cmd.getInstallPath() + " " + extractDir + uuid); result = command.execute(); if (result != null) { String errorString = "Error in linking err=" + result; - s_logger.error(errorString); + logger.error(errorString); return new CreateEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE); } @@ -306,9 +304,9 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager { public Answer handleDeleteEntityDownloadURLCommand(DeleteEntityDownloadURLCommand cmd) { //Delete the soft link. Example path = volumes/8/74eeb2c6-8ab1-4357-841f-2e9d06d1f360.vhd - s_logger.warn("handleDeleteEntityDownloadURLCommand Path:" + cmd.getPath() + " Type:" + cmd.getType().toString()); + logger.warn("handleDeleteEntityDownloadURLCommand Path:" + cmd.getPath() + " Type:" + cmd.getType().toString()); String path = cmd.getPath(); - Script command = new Script("/bin/bash", s_logger); + Script command = new Script("/bin/bash", logger); command.add("-c"); //We just need to remove the UUID.vhd @@ -318,19 +316,19 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager { if (result != null) { // FIXME - Ideally should bail out if you cant delete symlink. Not doing it right now. // This is because the ssvm might already be destroyed and the symlinks do not exist. - s_logger.warn("Error in deleting symlink :" + result); + logger.warn("Error in deleting symlink :" + result); } // If its a volume also delete the Hard link since it was created only for the purpose of download. if (cmd.getType() == Upload.Type.VOLUME) { - command = new Script("/bin/bash", s_logger); + command = new Script("/bin/bash", logger); command.add("-c"); command.add("rm -rf /mnt/SecStorage/" + cmd.getParentPath() + File.separator + path); - s_logger.warn(" " + parentDir + File.separator + path); + logger.warn(" " + parentDir + File.separator + path); result = command.execute(); if (result != null) { String errorString = "Error in deleting volume " + path + " : " + result; - s_logger.warn(errorString); + logger.warn(errorString); return new Answer(cmd, false, errorString); } } @@ -380,7 +378,7 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager { String inSystemVM = (String)params.get("secondary.storage.vm"); if (inSystemVM != null && "true".equalsIgnoreCase(inSystemVM)) { - s_logger.info("UploadManager: starting additional services since we are inside system vm"); + logger.info("UploadManager: starting additional services since we are inside system vm"); startAdditionalServices(); //blockOutgoingOnPrivate(); } @@ -401,29 +399,29 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager { private void startAdditionalServices() { - Script command = new Script("rm", s_logger); + Script command = new Script("rm", logger); command.add("-rf"); command.add(extractMountPoint); String result = command.execute(); if (result != null) { - s_logger.warn("Error in creating file " + extractMountPoint + " ,error: " + result); + logger.warn("Error in creating file " + extractMountPoint + " ,error: " + result); return; } - command = new Script("touch", s_logger); + command = new Script("touch", logger); command.add(extractMountPoint); result = command.execute(); if (result != null) { - s_logger.warn("Error in creating file " + extractMountPoint + " ,error: " + result); + logger.warn("Error in creating file " + extractMountPoint + " ,error: " + result); return; } - command = new Script("/bin/bash", s_logger); + command = new Script("/bin/bash", logger); command.add("-c"); command.add("ln -sf " + parentDir + " " + extractMountPoint); result = command.execute(); if (result != null) { - s_logger.warn("Error in linking err=" + result); + logger.warn("Error in linking err=" + result); return; } @@ -440,12 +438,12 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager { public void setUploadStatus(String jobId, Status status) { UploadJob uj = jobs.get(jobId); if (uj == null) { - s_logger.warn("setUploadStatus for jobId: " + jobId + ", status=" + status + " no job found"); + logger.warn("setUploadStatus for jobId: " + jobId + ", status=" + status + " no job found"); return; } TemplateUploader tu = uj.getTemplateUploader(); - s_logger.warn("Upload Completion for jobId: " + jobId + ", status=" + status); - s_logger.warn("UploadedBytes=" + tu.getUploadedBytes() + ", error=" + tu.getUploadError() + ", pct=" + tu.getUploadPercent()); + logger.warn("Upload Completion for jobId: " + jobId + ", status=" + status); + logger.warn("UploadedBytes=" + tu.getUploadedBytes() + ", error=" + tu.getUploadError() + ", pct=" + tu.getUploadPercent()); switch (status) { case ABORTED: @@ -459,7 +457,7 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager { case UNKNOWN: return; case IN_PROGRESS: - s_logger.info("Resuming jobId: " + jobId + ", status=" + status); + logger.info("Resuming jobId: " + jobId + ", status=" + status); tu.setResume(true); threadPool.execute(tu); break; @@ -470,11 +468,11 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager { tu.setUploadError("Upload success, starting install "); String result = postUpload(jobId); if (result != null) { - s_logger.error("Failed post upload script: " + result); + logger.error("Failed post upload script: " + result); tu.setStatus(Status.UNRECOVERABLE_ERROR); tu.setUploadError("Failed post upload script: " + result); } else { - s_logger.warn("Upload completed successfully at " + new SimpleDateFormat().format(new Date())); + logger.warn("Upload completed successfully at " + new SimpleDateFormat().format(new Date())); tu.setStatus(Status.POST_UPLOAD_FINISHED); tu.setUploadError("Upload completed successfully at " + new SimpleDateFormat().format(new Date())); } @@ -503,7 +501,7 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager { private boolean checkAndStartApache() { //Check whether the Apache server is running - Script command = new Script("/bin/bash", s_logger); + Script command = new Script("/bin/bash", logger); command.add("-c"); command.add("if [ -d /etc/apache2 ] ; then service apache2 status | grep pid; else service httpd status | grep pid; fi "); String result = command.execute(); @@ -511,11 +509,11 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager { //Apache Server is not running. Try to start it. if (result != null) { - /*s_logger.warn("Apache server not running, trying to start it"); + /*logger.warn("Apache server not running, trying to start it"); String port = Integer.toString(TemplateConstants.DEFAULT_TMPLT_COPY_PORT); String intf = TemplateConstants.DEFAULT_TMPLT_COPY_INTF; - command = new Script("/bin/bash", s_logger); + command = new Script("/bin/bash", logger); command.add("-c"); command.add("iptables -D INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + port + " -j DROP;" + "iptables -D INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + port + " -j HTTP;" + @@ -531,16 +529,16 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager { result = command.execute(); if (result != null) { - s_logger.warn("Error in opening up httpd port err=" + result ); + logger.warn("Error in opening up httpd port err=" + result ); return false; }*/ - command = new Script("/bin/bash", s_logger); + command = new Script("/bin/bash", logger); command.add("-c"); command.add("if [ -d /etc/apache2 ] ; then service apache2 start; else service httpd start; fi "); result = command.execute(); if (result != null) { - s_logger.warn("Error in starting httpd service err=" + result); + logger.warn("Error in starting httpd service err=" + result); return false; } } diff --git a/usage/src/com/cloud/usage/UsageAlertManagerImpl.java b/usage/src/com/cloud/usage/UsageAlertManagerImpl.java index 3c55f57ede0..836b34ecf35 100644 --- a/usage/src/com/cloud/usage/UsageAlertManagerImpl.java +++ b/usage/src/com/cloud/usage/UsageAlertManagerImpl.java @@ -33,7 +33,6 @@ import javax.mail.internet.InternetAddress; import javax.naming.ConfigurationException; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.alert.AlertManager; @@ -48,8 +47,6 @@ import com.sun.mail.smtp.SMTPTransport; @Component @Local(value = {AlertManager.class}) public class UsageAlertManagerImpl extends ManagerBase implements AlertManager { - private static final Logger s_logger = Logger.getLogger(UsageAlertManagerImpl.class.getName()); - private static final Logger s_alertsLogger = Logger.getLogger("org.apache.cloudstack.alerts"); private EmailAlert _emailAlert; @Inject @@ -92,7 +89,7 @@ public class UsageAlertManagerImpl extends ManagerBase implements AlertManager { _emailAlert.clearAlert(alertType.getType(), dataCenterId, podId); } } catch (Exception ex) { - s_logger.error("Problem clearing email alert", ex); + logger.error("Problem clearing email alert", ex); } } @@ -104,11 +101,11 @@ public class UsageAlertManagerImpl extends ManagerBase implements AlertManager { if (_emailAlert != null) { _emailAlert.sendAlert(alertType, dataCenterId, podId, subject, body); } else { - s_alertsLogger.warn(" alertType:: " + alertType + " // dataCenterId:: " + dataCenterId + " // podId:: " + podId + " // clusterId:: " + null + + logger.warn(" alertType:: " + alertType + " // dataCenterId:: " + dataCenterId + " // podId:: " + podId + " // clusterId:: " + null + " // message:: " + subject + " // body:: " + body); } } catch (Exception ex) { - s_logger.error("Problem sending email alert", ex); + logger.error("Problem sending email alert", ex); } } @@ -130,7 +127,7 @@ public class UsageAlertManagerImpl extends ManagerBase implements AlertManager { try { _recipientList[i] = new InternetAddress(recipientList[i], recipientList[i]); } catch (Exception ex) { - s_logger.error("Exception creating address for: " + recipientList[i], ex); + logger.error("Exception creating address for: " + recipientList[i], ex); } } } @@ -177,7 +174,7 @@ public class UsageAlertManagerImpl extends ManagerBase implements AlertManager { // TODO: make sure this handles SSL transport (useAuth is true) and regular protected void sendAlert(AlertType alertType, long dataCenterId, Long podId, String subject, String content) throws MessagingException, UnsupportedEncodingException { - s_alertsLogger.warn(" alertType:: " + alertType + " // dataCenterId:: " + dataCenterId + " // podId:: " + + logger.warn(" alertType:: " + alertType + " // dataCenterId:: " + dataCenterId + " // podId:: " + podId + " // clusterId:: " + null + " // message:: " + subject); AlertVO alert = null; if ((alertType != AlertManager.AlertType.ALERT_TYPE_HOST) && @@ -202,8 +199,8 @@ public class UsageAlertManagerImpl extends ManagerBase implements AlertManager { newAlert.setName(alertType.getName()); _alertDao.persist(newAlert); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Have already sent: " + alert.getSentCount() + " emails for alert type '" + alertType + "' -- skipping send email"); + if (logger.isDebugEnabled()) { + logger.debug("Have already sent: " + alert.getSentCount() + " emails for alert type '" + alertType + "' -- skipping send email"); } return; } @@ -256,7 +253,7 @@ public class UsageAlertManagerImpl extends ManagerBase implements AlertManager { sendAlert(alertType, dataCenterId, podId, msg, msg); return true; } catch (Exception ex) { - s_logger.warn("Failed to generate an alert of type=" + alertType + "; msg=" + msg); + logger.warn("Failed to generate an alert of type=" + alertType + "; msg=" + msg); return false; } } diff --git a/usage/src/com/cloud/usage/UsageManagerImpl.java b/usage/src/com/cloud/usage/UsageManagerImpl.java index c1e26b30c0c..c358fabf84f 100644 --- a/usage/src/com/cloud/usage/UsageManagerImpl.java +++ b/usage/src/com/cloud/usage/UsageManagerImpl.java @@ -35,7 +35,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.utils.usage.UsageUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -92,7 +91,6 @@ import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = {UsageManager.class}) public class UsageManagerImpl extends ManagerBase implements UsageManager, Runnable { - public static final Logger s_logger = Logger.getLogger(UsageManagerImpl.class.getName()); protected static final String DAILY = "DAILY"; protected static final String WEEKLY = "WEEKLY"; @@ -176,16 +174,16 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna public boolean configure(String name, Map params) throws ConfigurationException { final String run = "usage.vmops.pid"; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking to see if " + run + " exists."); + if (logger.isDebugEnabled()) { + logger.debug("Checking to see if " + run + " exists."); } final Class c = UsageServer.class; _version = c.getPackage().getImplementationVersion(); if (_version == null) _version="unknown"; - if (s_logger.isInfoEnabled()) { - s_logger.info("Implementation Version is " + _version); + if (logger.isInfoEnabled()) { + logger.info("Implementation Version is " + _version); } Map configs = _configDao.getConfiguration(params); @@ -206,18 +204,18 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna if (aggreagationTimeZone != null && !aggreagationTimeZone.isEmpty()) { _usageTimezone = TimeZone.getTimeZone(aggreagationTimeZone); } - s_logger.debug("Usage stats aggregation time zone: " + aggreagationTimeZone); + logger.debug("Usage stats aggregation time zone: " + aggreagationTimeZone); try { if ((execTime == null) || (aggregationRange == null)) { - s_logger.error("missing configuration values for usage job, usage.stats.job.exec.time = " + execTime + ", usage.stats.job.aggregation.range = " + + logger.error("missing configuration values for usage job, usage.stats.job.exec.time = " + execTime + ", usage.stats.job.aggregation.range = " + aggregationRange); throw new ConfigurationException("Missing configuration values for usage job, usage.stats.job.exec.time = " + execTime + ", usage.stats.job.aggregation.range = " + aggregationRange); } String[] execTimeSegments = execTime.split(":"); if (execTimeSegments.length != 2) { - s_logger.error("Unable to parse usage.stats.job.exec.time"); + logger.error("Unable to parse usage.stats.job.exec.time"); throw new ConfigurationException("Unable to parse usage.stats.job.exec.time '" + execTime + "'"); } int hourOfDay = Integer.parseInt(execTimeSegments[0]); @@ -238,13 +236,13 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna _jobExecTime.roll(Calendar.DAY_OF_YEAR, true); } - s_logger.debug("Execution Time: " + execDate.toString()); + logger.debug("Execution Time: " + execDate.toString()); Date currentDate = new Date(System.currentTimeMillis()); - s_logger.debug("Current Time: " + currentDate.toString()); + logger.debug("Current Time: " + currentDate.toString()); _aggregationDuration = Integer.parseInt(aggregationRange); if (_aggregationDuration < UsageUtils.USAGE_AGGREGATION_RANGE_MIN) { - s_logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + UsageUtils.USAGE_AGGREGATION_RANGE_MIN); + logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + UsageUtils.USAGE_AGGREGATION_RANGE_MIN); _aggregationDuration = UsageUtils.USAGE_AGGREGATION_RANGE_MIN; } _hostname = InetAddress.getLocalHost().getHostName() + "/" + InetAddress.getLocalHost().getHostAddress(); @@ -252,7 +250,7 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna throw new ConfigurationException("Unable to parse usage.stats.job.exec.time '" + execTime + "' or usage.stats.job.aggregation.range '" + aggregationRange + "', please check configuration values"); } catch (Exception e) { - s_logger.error("Unhandled exception configuring UsageManger", e); + logger.error("Unhandled exception configuring UsageManger", e); throw new ConfigurationException("Unhandled exception configuring UsageManager " + e.toString()); } _pid = Integer.parseInt(System.getProperty("pid")); @@ -261,8 +259,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna @Override public boolean start() { - if (s_logger.isInfoEnabled()) { - s_logger.info("Starting Usage Manager"); + if (logger.isInfoEnabled()) { + logger.info("Starting Usage Manager"); } // use the configured exec time and aggregation duration for scheduling the job @@ -289,8 +287,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna _heartbeatLock.unlock(); } } else { - if (s_logger.isTraceEnabled()) - s_logger.trace("Heartbeat lock is in use by others, returning true as someone else will take over the job if required"); + if (logger.isTraceEnabled()) + logger.trace("Heartbeat lock is in use by others, returning true as someone else will take over the job if required"); } } finally { usageTxn.close(); @@ -320,8 +318,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna } protected void runInContextInternal() { - if (s_logger.isInfoEnabled()) { - s_logger.info("starting usage job..."); + if (logger.isInfoEnabled()) { + logger.info("starting usage job..."); } // how about we update the job exec time when the job starts??? @@ -374,19 +372,19 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna parse(job, startDate, endDate); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Not owner of usage job, skipping..."); + if (logger.isDebugEnabled()) { + logger.debug("Not owner of usage job, skipping..."); } } - if (s_logger.isInfoEnabled()) { - s_logger.info("usage job complete"); + if (logger.isInfoEnabled()) { + logger.info("usage job complete"); } } @Override public void scheduleParse() { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Scheduling Usage job..."); + if (logger.isDebugEnabled()) { + logger.debug("Scheduling Usage job..."); } _executor.schedule(this, 0, TimeUnit.MILLISECONDS); } @@ -408,8 +406,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna } if (startDateMillis >= endDateMillis) { - if (s_logger.isInfoEnabled()) { - s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")"); + if (logger.isInfoEnabled()) { + logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")"); } TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); @@ -431,8 +429,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna } Date startDate = new Date(startDateMillis); Date endDate = new Date(endDateMillis); - if (s_logger.isInfoEnabled()) { - s_logger.info("Parsing usage records between " + startDate + " and " + endDate); + if (logger.isInfoEnabled()) { + logger.info("Parsing usage records between " + startDate + " and " + endDate); } List accounts = null; @@ -652,8 +650,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna } _usageNetworkDao.saveUsageNetworks(usageNetworks); - if (s_logger.isDebugEnabled()) { - s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts"); + if (logger.isDebugEnabled()) { + logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts"); } // get vm disk stats in order to compute vm disk usage @@ -701,8 +699,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna } _usageVmDiskDao.saveUsageVmDisks(usageVmDisks); - if (s_logger.isDebugEnabled()) { - s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts"); + if (logger.isDebugEnabled()) { + logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts"); } // commit the helper records, then start a new transaction @@ -741,8 +739,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna offset = new Long(offset.longValue() + limit.longValue()); } while ((accounts != null) && !accounts.isEmpty()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts"); + if (logger.isDebugEnabled()) { + logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts"); } numAcctsProcessed = 0; @@ -762,12 +760,12 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna //mark public templates owned by deleted accounts as deleted List storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE); if (storageVOs.size() > 1) { - s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() + + logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() + "; marking them all as deleted..."); } for (UsageStorageVO storageVO : storageVOs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId()); } storageVO.setDeleted(account.getRemoved()); _usageStorageDao.update(storageVO); @@ -785,8 +783,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna currentEndDate = aggregateCal.getTime(); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts"); + if (logger.isDebugEnabled()) { + logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts"); } // FIXME: we don't break the above loop if something fails to parse, so it gets reset every account, @@ -797,7 +795,7 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna success = true; } } catch (Exception ex) { - s_logger.error("Exception in usage manager", ex); + logger.error("Exception in usage manager", ex); usageTxn.rollback(); } finally { // everything seemed to work...set endDate as the last success date @@ -822,7 +820,7 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna } } catch (Exception e) { - s_logger.error("Usage Manager error", e); + logger.error("Usage Manager error", e); } } @@ -830,84 +828,84 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna boolean parsed = false; parsed = VMInstanceUsageParser.parse(account, currentStartDate, currentEndDate); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (!parsed) { - s_logger.debug("vm usage instances successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + logger.debug("vm usage instances successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); } } parsed = NetworkUsageParser.parse(account, currentStartDate, currentEndDate); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (!parsed) { - s_logger.debug("network usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + logger.debug("network usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); } } parsed = VmDiskUsageParser.parse(account, currentStartDate, currentEndDate); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (!parsed) { - s_logger.debug("vm disk usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + logger.debug("vm disk usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); } } parsed = VolumeUsageParser.parse(account, currentStartDate, currentEndDate); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (!parsed) { - s_logger.debug("volume usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + logger.debug("volume usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); } } parsed = StorageUsageParser.parse(account, currentStartDate, currentEndDate); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (!parsed) { - s_logger.debug("storage usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + logger.debug("storage usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); } } parsed = SecurityGroupUsageParser.parse(account, currentStartDate, currentEndDate); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (!parsed) { - s_logger.debug("Security Group usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + logger.debug("Security Group usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); } } parsed = LoadBalancerUsageParser.parse(account, currentStartDate, currentEndDate); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (!parsed) { - s_logger.debug("load balancer usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + logger.debug("load balancer usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); } } parsed = PortForwardingUsageParser.parse(account, currentStartDate, currentEndDate); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (!parsed) { - s_logger.debug("port forwarding usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + logger.debug("port forwarding usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); } } parsed = NetworkOfferingUsageParser.parse(account, currentStartDate, currentEndDate); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (!parsed) { - s_logger.debug("network offering usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + logger.debug("network offering usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); } } parsed = IPAddressUsageParser.parse(account, currentStartDate, currentEndDate); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (!parsed) { - s_logger.debug("IPAddress usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + logger.debug("IPAddress usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); } } parsed = VPNUserUsageParser.parse(account, currentStartDate, currentEndDate); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (!parsed) { - s_logger.debug("VPN user usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + logger.debug("VPN user usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); } } parsed = VMSnapshotUsageParser.parse(account, currentStartDate, currentEndDate); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (!parsed) { - s_logger.debug("VM Snapshot usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + logger.debug("VM Snapshot usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); } } return parsed; @@ -1036,7 +1034,7 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna List usageInstances = _usageInstanceDao.search(sc, null); if (usageInstances != null) { if (usageInstances.size() > 0) { - s_logger.error("found entries for a vm running with id: " + vmId + ", which are not stopped. Ending them all..."); + logger.error("found entries for a vm running with id: " + vmId + ", which are not stopped. Ending them all..."); for (UsageVMInstanceVO usageInstance : usageInstances) { usageInstance.setEndDate(event.getCreateDate()); _usageInstanceDao.update(usageInstance); @@ -1050,7 +1048,7 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna sc.addAnd("usageType", SearchCriteria.Op.EQ, UsageTypes.ALLOCATED_VM); usageInstances = _usageInstanceDao.search(sc, null); if (usageInstances == null || (usageInstances.size() == 0)) { - s_logger.error("Cannot find allocated vm entry for a vm running with id: " + vmId); + logger.error("Cannot find allocated vm entry for a vm running with id: " + vmId); } else if (usageInstances.size() == 1) { UsageVMInstanceVO usageInstance = usageInstances.get(0); if (usageInstance.getSerivceOfferingId() != soId) { @@ -1074,7 +1072,7 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna null); populateDynamicComputeOfferingDetailsAndPersist(usageInstanceNew, event.getId()); } catch (Exception ex) { - s_logger.error("Error saving usage instance for vm: " + vmId, ex); + logger.error("Error saving usage instance for vm: " + vmId, ex); } } else if (EventTypes.EVENT_VM_STOP.equals(event.getType())) { // find the latest usage_VM_instance row, update the stop date (should be null) to the event date @@ -1086,7 +1084,7 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna List usageInstances = _usageInstanceDao.search(sc, null); if (usageInstances != null) { if (usageInstances.size() > 1) { - s_logger.warn("found multiple entries for a vm running with id: " + vmId + ", ending them all..."); + logger.warn("found multiple entries for a vm running with id: " + vmId + ", ending them all..."); } for (UsageVMInstanceVO usageInstance : usageInstances) { usageInstance.setEndDate(event.getCreateDate()); @@ -1105,7 +1103,7 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna soId, templateId, hypervisorType, event.getCreateDate(), null); populateDynamicComputeOfferingDetailsAndPersist(usageInstanceNew, event.getId()); } catch (Exception ex) { - s_logger.error("Error saving usage instance for vm: " + vmId, ex); + logger.error("Error saving usage instance for vm: " + vmId, ex); } } else if (EventTypes.EVENT_VM_DESTROY.equals(event.getType())) { SearchCriteria sc = _usageInstanceDao.createSearchCriteria(); @@ -1115,7 +1113,7 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna List usageInstances = _usageInstanceDao.search(sc, null); if (usageInstances != null) { if (usageInstances.size() > 1) { - s_logger.warn("found multiple entries for a vm allocated with id: " + vmId + ", detroying them all..."); + logger.warn("found multiple entries for a vm allocated with id: " + vmId + ", detroying them all..."); } for (UsageVMInstanceVO usageInstance : usageInstances) { usageInstance.setEndDate(event.getCreateDate()); @@ -1130,7 +1128,7 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna List usageInstances = _usageInstanceDao.search(sc, null); if (usageInstances != null) { if (usageInstances.size() > 1) { - s_logger.warn("found multiple entries for a vm allocated with id: " + vmId + ", updating end_date for all of them..."); + logger.warn("found multiple entries for a vm allocated with id: " + vmId + ", updating end_date for all of them..."); } for (UsageVMInstanceVO usageInstance : usageInstances) { usageInstance.setEndDate(event.getCreateDate()); @@ -1153,7 +1151,7 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna List usageInstances = _usageInstanceDao.search(sc, null); if (usageInstances != null) { if (usageInstances.size() > 1) { - s_logger.warn("found multiple entries for a vm running with id: " + vmId + ", ending them all..."); + logger.warn("found multiple entries for a vm running with id: " + vmId + ", ending them all..."); } for (UsageVMInstanceVO usageInstance : usageInstances) { usageInstance.setEndDate(event.getCreateDate()); @@ -1167,7 +1165,7 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna sc.addAnd("usageType", SearchCriteria.Op.EQ, UsageTypes.ALLOCATED_VM); usageInstances = _usageInstanceDao.search(sc, null); if (usageInstances == null || (usageInstances.size() == 0)) { - s_logger.error("Cannot find allocated vm entry for a vm running with id: " + vmId); + logger.error("Cannot find allocated vm entry for a vm running with id: " + vmId); } else if (usageInstances.size() == 1) { UsageVMInstanceVO usageInstance = usageInstances.get(0); if (usageInstance.getSerivceOfferingId() != soId) { @@ -1222,8 +1220,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna long currentAccountedBytesSent = 0L; long currentAccountedBytesReceived = 0L; if (usageNetworkStats != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("getting current accounted bytes for... accountId: " + usageNetworkStats.getAccountId() + " in zone: " + userStat.getDataCenterId() + + if (logger.isDebugEnabled()) { + logger.debug("getting current accounted bytes for... accountId: " + usageNetworkStats.getAccountId() + " in zone: " + userStat.getDataCenterId() + "; abr: " + usageNetworkStats.getAggBytesReceived() + "; abs: " + usageNetworkStats.getAggBytesSent()); } currentAccountedBytesSent = usageNetworkStats.getAggBytesSent(); @@ -1233,12 +1231,12 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna long bytesReceived = userStat.getAggBytesReceived() - currentAccountedBytesReceived; if (bytesSent < 0) { - s_logger.warn("Calculated negative value for bytes sent: " + bytesSent + ", user stats say: " + userStat.getAggBytesSent() + + logger.warn("Calculated negative value for bytes sent: " + bytesSent + ", user stats say: " + userStat.getAggBytesSent() + ", previous network usage was: " + currentAccountedBytesSent); bytesSent = 0; } if (bytesReceived < 0) { - s_logger.warn("Calculated negative value for bytes received: " + bytesReceived + ", user stats say: " + userStat.getAggBytesReceived() + + logger.warn("Calculated negative value for bytes received: " + bytesReceived + ", user stats say: " + userStat.getAggBytesReceived() + ", previous network usage was: " + currentAccountedBytesReceived); bytesReceived = 0; } @@ -1252,8 +1250,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna UsageNetworkVO usageNetworkVO = new UsageNetworkVO(userStat.getAccountId(), userStat.getDataCenterId(), hostId, userStat.getDeviceType(), userStat.getNetworkId(), bytesSent, bytesReceived, userStat.getAggBytesReceived(), userStat.getAggBytesSent(), timestamp); - if (s_logger.isDebugEnabled()) { - s_logger.debug("creating networkHelperEntry... accountId: " + userStat.getAccountId() + " in zone: " + userStat.getDataCenterId() + "; abr: " + + if (logger.isDebugEnabled()) { + logger.debug("creating networkHelperEntry... accountId: " + userStat.getAccountId() + " in zone: " + userStat.getDataCenterId() + "; abr: " + userStat.getAggBytesReceived() + "; abs: " + userStat.getAggBytesSent() + "; curABS: " + currentAccountedBytesSent + "; curABR: " + currentAccountedBytesReceived + "; ubs: " + bytesSent + "; ubr: " + bytesReceived); } @@ -1266,8 +1264,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna long currentAccountedBytesRead = 0L; long currentAccountedBytesWrite = 0L; if (usageVmDiskStat != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("getting current accounted bytes for... accountId: " + usageVmDiskStat.getAccountId() + " in zone: " + vmDiskStat.getDataCenterId() + + if (logger.isDebugEnabled()) { + logger.debug("getting current accounted bytes for... accountId: " + usageVmDiskStat.getAccountId() + " in zone: " + vmDiskStat.getDataCenterId() + "; aiw: " + vmDiskStat.getAggIOWrite() + "; air: " + usageVmDiskStat.getAggIORead() + "; abw: " + vmDiskStat.getAggBytesWrite() + "; abr: " + usageVmDiskStat.getAggBytesRead()); } @@ -1282,22 +1280,22 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna long bytesWrite = vmDiskStat.getAggBytesWrite() - currentAccountedBytesWrite; if (ioRead < 0) { - s_logger.warn("Calculated negative value for io read: " + ioRead + ", vm disk stats say: " + vmDiskStat.getAggIORead() + ", previous vm disk usage was: " + + logger.warn("Calculated negative value for io read: " + ioRead + ", vm disk stats say: " + vmDiskStat.getAggIORead() + ", previous vm disk usage was: " + currentAccountedIORead); ioRead = 0; } if (ioWrite < 0) { - s_logger.warn("Calculated negative value for io write: " + ioWrite + ", vm disk stats say: " + vmDiskStat.getAggIOWrite() + ", previous vm disk usage was: " + + logger.warn("Calculated negative value for io write: " + ioWrite + ", vm disk stats say: " + vmDiskStat.getAggIOWrite() + ", previous vm disk usage was: " + currentAccountedIOWrite); ioWrite = 0; } if (bytesRead < 0) { - s_logger.warn("Calculated negative value for bytes read: " + bytesRead + ", vm disk stats say: " + vmDiskStat.getAggBytesRead() + + logger.warn("Calculated negative value for bytes read: " + bytesRead + ", vm disk stats say: " + vmDiskStat.getAggBytesRead() + ", previous vm disk usage was: " + currentAccountedBytesRead); bytesRead = 0; } if (bytesWrite < 0) { - s_logger.warn("Calculated negative value for bytes write: " + bytesWrite + ", vm disk stats say: " + vmDiskStat.getAggBytesWrite() + + logger.warn("Calculated negative value for bytes write: " + bytesWrite + ", vm disk stats say: " + vmDiskStat.getAggBytesWrite() + ", previous vm disk usage was: " + currentAccountedBytesWrite); bytesWrite = 0; } @@ -1311,8 +1309,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna UsageVmDiskVO usageVmDiskVO = new UsageVmDiskVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmId, vmDiskStat.getVolumeId(), ioRead, ioWrite, vmDiskStat.getAggIORead(), vmDiskStat.getAggIOWrite(), bytesRead, bytesWrite, vmDiskStat.getAggBytesRead(), vmDiskStat.getAggBytesWrite(), timestamp); - if (s_logger.isDebugEnabled()) { - s_logger.debug("creating vmDiskHelperEntry... accountId: " + vmDiskStat.getAccountId() + " in zone: " + vmDiskStat.getDataCenterId() + "; aiw: " + + if (logger.isDebugEnabled()) { + logger.debug("creating vmDiskHelperEntry... accountId: " + vmDiskStat.getAccountId() + " in zone: " + vmDiskStat.getDataCenterId() + "; aiw: " + vmDiskStat.getAggIOWrite() + "; air: " + vmDiskStat.getAggIORead() + "; curAIR: " + currentAccountedIORead + "; curAIW: " + currentAccountedIOWrite + "; uir: " + ioRead + "; uiw: " + ioWrite + "; abw: " + vmDiskStat.getAggBytesWrite() + "; abr: " + vmDiskStat.getAggBytesRead() + "; curABR: " + currentAccountedBytesRead + "; curABW: " + currentAccountedBytesWrite + "; ubr: " + bytesRead + "; ubw: " + bytesWrite); @@ -1325,8 +1323,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna String ipAddress = event.getResourceName(); if (EventTypes.EVENT_NET_IP_ASSIGN.equals(event.getType())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("assigning ip address: " + ipAddress + " to account: " + event.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("assigning ip address: " + ipAddress + " to account: " + event.getAccountId()); } Account acct = _accountDao.findByIdIncludingRemoved(event.getAccountId()); long zoneId = event.getZoneId(); @@ -1344,12 +1342,12 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna sc.addAnd("released", SearchCriteria.Op.NULL); List ipAddressVOs = _usageIPAddressDao.search(sc, null); if (ipAddressVOs.size() > 1) { - s_logger.warn("More that one usage entry for ip address: " + ipAddress + " assigned to account: " + event.getAccountId() + + logger.warn("More that one usage entry for ip address: " + ipAddress + " assigned to account: " + event.getAccountId() + "; marking them all as released..."); } for (UsageIPAddressVO ipAddressVO : ipAddressVOs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("releasing ip address: " + ipAddressVO.getAddress() + " from account: " + ipAddressVO.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("releasing ip address: " + ipAddressVO.getAddress() + " from account: " + ipAddressVO.getAccountId()); } ipAddressVO.setReleased(event.getCreateDate()); // there really shouldn't be more than one _usageIPAddressDao.update(ipAddressVO); @@ -1369,18 +1367,18 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna List volumesVOs = _usageVolumeDao.search(sc, null); if (volumesVOs.size() > 0) { //This is a safeguard to avoid double counting of volumes. - s_logger.error("Found duplicate usage entry for volume: " + volId + " assigned to account: " + event.getAccountId() + "; marking as deleted..."); + logger.error("Found duplicate usage entry for volume: " + volId + " assigned to account: " + event.getAccountId() + "; marking as deleted..."); } //an entry exists if it is a resize volume event. marking the existing deleted and creating a new one in the case of resize. for (UsageVolumeVO volumesVO : volumesVOs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("deleting volume: " + volumesVO.getId() + " from account: " + volumesVO.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("deleting volume: " + volumesVO.getId() + " from account: " + volumesVO.getAccountId()); } volumesVO.setDeleted(event.getCreateDate()); _usageVolumeDao.update(volumesVO); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("create volume with id : " + volId + " for account: " + event.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("create volume with id : " + volId + " for account: " + event.getAccountId()); } Account acct = _accountDao.findByIdIncludingRemoved(event.getAccountId()); UsageVolumeVO volumeVO = new UsageVolumeVO(volId, event.getZoneId(), event.getAccountId(), acct.getDomainId(), event.getOfferingId(), event.getTemplateId(), event.getSize(), event.getCreateDate(), null); @@ -1392,11 +1390,11 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna sc.addAnd("deleted", SearchCriteria.Op.NULL); List volumesVOs = _usageVolumeDao.search(sc, null); if (volumesVOs.size() > 1) { - s_logger.warn("More that one usage entry for volume: " + volId + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); + logger.warn("More that one usage entry for volume: " + volId + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); } for (UsageVolumeVO volumesVO : volumesVOs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("deleting volume: " + volumesVO.getId() + " from account: " + volumesVO.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("deleting volume: " + volumesVO.getId() + " from account: " + volumesVO.getAccountId()); } volumesVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one _usageVolumeDao.update(volumesVO); @@ -1415,22 +1413,22 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna if (EventTypes.EVENT_TEMPLATE_CREATE.equals(event.getType()) || EventTypes.EVENT_TEMPLATE_COPY.equals(event.getType())) { templateSize = event.getSize(); if (templateSize < 1) { - s_logger.error("Incorrect size for template with Id " + templateId); + logger.error("Incorrect size for template with Id " + templateId); return; } if (zoneId == -1L) { - s_logger.error("Incorrect zoneId for template with Id " + templateId); + logger.error("Incorrect zoneId for template with Id " + templateId); return; } } if (EventTypes.EVENT_TEMPLATE_CREATE.equals(event.getType()) || EventTypes.EVENT_TEMPLATE_COPY.equals(event.getType())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("create template with id : " + templateId + " for account: " + event.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("create template with id : " + templateId + " for account: " + event.getAccountId()); } List storageVOs = _usageStorageDao.listByIdAndZone(event.getAccountId(), templateId, StorageTypes.TEMPLATE, zoneId); if (storageVOs.size() > 0) { - s_logger.warn("Usage entry for Template: " + templateId + " assigned to account: " + event.getAccountId() + "already exists in zone " + zoneId); + logger.warn("Usage entry for Template: " + templateId + " assigned to account: " + event.getAccountId() + "already exists in zone " + zoneId); return; } Account acct = _accountDao.findByIdIncludingRemoved(event.getAccountId()); @@ -1446,12 +1444,12 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna storageVOs = _usageStorageDao.listById(event.getAccountId(), templateId, StorageTypes.TEMPLATE); } if (storageVOs.size() > 1) { - s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + event.getAccountId() + + logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); } for (UsageStorageVO storageVO : storageVOs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId()); } storageVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one _usageStorageDao.update(storageVO); @@ -1469,12 +1467,12 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna } if (EventTypes.EVENT_ISO_CREATE.equals(event.getType()) || EventTypes.EVENT_ISO_COPY.equals(event.getType())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("create iso with id : " + isoId + " for account: " + event.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("create iso with id : " + isoId + " for account: " + event.getAccountId()); } List storageVOs = _usageStorageDao.listByIdAndZone(event.getAccountId(), isoId, StorageTypes.ISO, zoneId); if (storageVOs.size() > 0) { - s_logger.warn("Usage entry for ISO: " + isoId + " assigned to account: " + event.getAccountId() + "already exists in zone " + zoneId); + logger.warn("Usage entry for ISO: " + isoId + " assigned to account: " + event.getAccountId() + "already exists in zone " + zoneId); return; } Account acct = _accountDao.findByIdIncludingRemoved(event.getAccountId()); @@ -1490,11 +1488,11 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna } if (storageVOs.size() > 1) { - s_logger.warn("More that one usage entry for storage: " + isoId + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); + logger.warn("More that one usage entry for storage: " + isoId + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); } for (UsageStorageVO storageVO : storageVOs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("deleting iso: " + storageVO.getId() + " from account: " + storageVO.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("deleting iso: " + storageVO.getId() + " from account: " + storageVO.getAccountId()); } storageVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one _usageStorageDao.update(storageVO); @@ -1513,8 +1511,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna } if (EventTypes.EVENT_SNAPSHOT_CREATE.equals(event.getType())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("create snapshot with id : " + snapId + " for account: " + event.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("create snapshot with id : " + snapId + " for account: " + event.getAccountId()); } Account acct = _accountDao.findByIdIncludingRemoved(event.getAccountId()); UsageStorageVO storageVO = @@ -1523,11 +1521,11 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna } else if (EventTypes.EVENT_SNAPSHOT_DELETE.equals(event.getType())) { List storageVOs = _usageStorageDao.listById(event.getAccountId(), snapId, StorageTypes.SNAPSHOT); if (storageVOs.size() > 1) { - s_logger.warn("More that one usage entry for storage: " + snapId + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); + logger.warn("More that one usage entry for storage: " + snapId + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); } for (UsageStorageVO storageVO : storageVOs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("deleting snapshot: " + storageVO.getId() + " from account: " + storageVO.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("deleting snapshot: " + storageVO.getId() + " from account: " + storageVO.getAccountId()); } storageVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one _usageStorageDao.update(storageVO); @@ -1542,8 +1540,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna long id = event.getResourceId(); if (EventTypes.EVENT_LOAD_BALANCER_CREATE.equals(event.getType())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating load balancer : " + id + " for account: " + event.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("Creating load balancer : " + id + " for account: " + event.getAccountId()); } zoneId = event.getZoneId(); Account acct = _accountDao.findByIdIncludingRemoved(event.getAccountId()); @@ -1556,12 +1554,12 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna sc.addAnd("deleted", SearchCriteria.Op.NULL); List lbVOs = _usageLoadBalancerPolicyDao.search(sc, null); if (lbVOs.size() > 1) { - s_logger.warn("More that one usage entry for load balancer policy: " + id + " assigned to account: " + event.getAccountId() + + logger.warn("More that one usage entry for load balancer policy: " + id + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); } for (UsageLoadBalancerPolicyVO lbVO : lbVOs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("deleting load balancer policy: " + lbVO.getId() + " from account: " + lbVO.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("deleting load balancer policy: " + lbVO.getId() + " from account: " + lbVO.getAccountId()); } lbVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one _usageLoadBalancerPolicyDao.update(lbVO); @@ -1576,8 +1574,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna long id = event.getResourceId(); if (EventTypes.EVENT_NET_RULE_ADD.equals(event.getType())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating port forwarding rule : " + id + " for account: " + event.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("Creating port forwarding rule : " + id + " for account: " + event.getAccountId()); } zoneId = event.getZoneId(); Account acct = _accountDao.findByIdIncludingRemoved(event.getAccountId()); @@ -1590,12 +1588,12 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna sc.addAnd("deleted", SearchCriteria.Op.NULL); List pfVOs = _usagePortForwardingRuleDao.search(sc, null); if (pfVOs.size() > 1) { - s_logger.warn("More that one usage entry for port forwarding rule: " + id + " assigned to account: " + event.getAccountId() + + logger.warn("More that one usage entry for port forwarding rule: " + id + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); } for (UsagePortForwardingRuleVO pfVO : pfVOs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("deleting port forwarding rule: " + pfVO.getId() + " from account: " + pfVO.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("deleting port forwarding rule: " + pfVO.getId() + " from account: " + pfVO.getAccountId()); } pfVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one _usagePortForwardingRuleDao.update(pfVO); @@ -1613,12 +1611,12 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna try { nicId = Long.parseLong(event.getResourceName()); } catch (Exception e) { - s_logger.warn("failed to get nic id from resource name, resource name is: " + event.getResourceName()); + logger.warn("failed to get nic id from resource name, resource name is: " + event.getResourceName()); } if (EventTypes.EVENT_NETWORK_OFFERING_CREATE.equals(event.getType()) || EventTypes.EVENT_NETWORK_OFFERING_ASSIGN.equals(event.getType())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating networking offering: " + networkOfferingId + " for Vm: " + vmId + " for account: " + event.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("Creating networking offering: " + networkOfferingId + " for Vm: " + vmId + " for account: " + event.getAccountId()); } zoneId = event.getZoneId(); Account acct = _accountDao.findByIdIncludingRemoved(event.getAccountId()); @@ -1635,12 +1633,12 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna sc.addAnd("deleted", SearchCriteria.Op.NULL); List noVOs = _usageNetworkOfferingDao.search(sc, null); if (noVOs.size() > 1) { - s_logger.warn("More that one usage entry for networking offering: " + networkOfferingId + " for Vm: " + vmId + " assigned to account: " + + logger.warn("More that one usage entry for networking offering: " + networkOfferingId + " for Vm: " + vmId + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); } for (UsageNetworkOfferingVO noVO : noVOs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("deleting network offering: " + noVO.getNetworkOfferingId() + " from Vm: " + noVO.getVmInstanceId()); + if (logger.isDebugEnabled()) { + logger.debug("deleting network offering: " + noVO.getNetworkOfferingId() + " from Vm: " + noVO.getVmInstanceId()); } noVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one _usageNetworkOfferingDao.update(noVO); @@ -1655,8 +1653,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna long userId = event.getResourceId(); if (EventTypes.EVENT_VPN_USER_ADD.equals(event.getType())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating VPN user: " + userId + " for account: " + event.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("Creating VPN user: " + userId + " for account: " + event.getAccountId()); } Account acct = _accountDao.findByIdIncludingRemoved(event.getAccountId()); String userName = event.getResourceName(); @@ -1669,11 +1667,11 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna sc.addAnd("deleted", SearchCriteria.Op.NULL); List vuVOs = _usageVPNUserDao.search(sc, null); if (vuVOs.size() > 1) { - s_logger.warn("More that one usage entry for vpn user: " + userId + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); + logger.warn("More that one usage entry for vpn user: " + userId + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); } for (UsageVPNUserVO vuVO : vuVOs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("deleting vpn user: " + vuVO.getUserId()); + if (logger.isDebugEnabled()) { + logger.debug("deleting vpn user: " + vuVO.getUserId()); } vuVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one _usageVPNUserDao.update(vuVO); @@ -1689,8 +1687,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna long sgId = event.getOfferingId(); if (EventTypes.EVENT_SECURITY_GROUP_ASSIGN.equals(event.getType())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Assigning : security group" + sgId + " to Vm: " + vmId + " for account: " + event.getAccountId()); + if (logger.isDebugEnabled()) { + logger.debug("Assigning : security group" + sgId + " to Vm: " + vmId + " for account: " + event.getAccountId()); } zoneId = event.getZoneId(); Account acct = _accountDao.findByIdIncludingRemoved(event.getAccountId()); @@ -1704,12 +1702,12 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna sc.addAnd("deleted", SearchCriteria.Op.NULL); List sgVOs = _usageSecurityGroupDao.search(sc, null); if (sgVOs.size() > 1) { - s_logger.warn("More that one usage entry for security group: " + sgId + " for Vm: " + vmId + " assigned to account: " + event.getAccountId() + + logger.warn("More that one usage entry for security group: " + sgId + " for Vm: " + vmId + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); } for (UsageSecurityGroupVO sgVO : sgVOs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("deleting security group: " + sgVO.getSecurityGroupId() + " from Vm: " + sgVO.getVmInstanceId()); + if (logger.isDebugEnabled()) { + logger.debug("deleting security group: " + sgVO.getSecurityGroupId() + " from Vm: " + sgVO.getVmInstanceId()); } sgVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one _usageSecurityGroupDao.update(sgVO); @@ -1738,8 +1736,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); try { if (!_heartbeatLock.lock(3)) { // 3 second timeout - if (s_logger.isTraceEnabled()) - s_logger.trace("Heartbeat lock is in use by others, returning true as someone else will take over the job if required"); + if (logger.isTraceEnabled()) + logger.trace("Heartbeat lock is in use by others, returning true as someone else will take over the job if required"); return; } @@ -1769,8 +1767,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna if ((timeSinceJob > 0) && (timeSinceJob > (aggregationDurationMillis - 100))) { if (timeToJob > (aggregationDurationMillis / 2)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("it's been " + timeSinceJob + " ms since last usage job and " + timeToJob + + if (logger.isDebugEnabled()) { + logger.debug("it's been " + timeSinceJob + " ms since last usage job and " + timeToJob + " ms until next job, scheduling an immediate job to catch up (aggregation duration is " + _aggregationDuration + " minutes)"); } scheduleParse(); @@ -1786,7 +1784,7 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna _heartbeatLock.unlock(); } } catch (Exception ex) { - s_logger.error("error in heartbeat", ex); + logger.error("error in heartbeat", ex); } finally { usageTxn.close(); } @@ -1821,7 +1819,7 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna txn.commit(); } catch (Exception dbEx) { txn.rollback(); - s_logger.error("error updating usage job", dbEx); + logger.error("error updating usage job", dbEx); } return changeOwner; } @@ -1852,7 +1850,7 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna _alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SANITY_RESULT, 0, 0); } } catch (SQLException e) { - s_logger.error("Error in sanity check", e); + logger.error("Error in sanity check", e); } } } diff --git a/utils/src/main/java/com/cloud/utils/component/ComponentLifecycleBase.java b/utils/src/main/java/com/cloud/utils/component/ComponentLifecycleBase.java index 829dc9b9e84..c39afc307a0 100644 --- a/utils/src/main/java/com/cloud/utils/component/ComponentLifecycleBase.java +++ b/utils/src/main/java/com/cloud/utils/component/ComponentLifecycleBase.java @@ -27,7 +27,7 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; public class ComponentLifecycleBase implements ComponentLifecycle { - private static final Logger s_logger = Logger.getLogger(ComponentLifecycleBase.class); + protected Logger logger = Logger.getLogger(getClass()); protected String _name; protected int _runLevel; diff --git a/utils/src/main/java/org/apache/cloudstack/utils/identity/ManagementServerNode.java b/utils/src/main/java/org/apache/cloudstack/utils/identity/ManagementServerNode.java index fdd80f7aa65..cce18759b67 100644 --- a/utils/src/main/java/org/apache/cloudstack/utils/identity/ManagementServerNode.java +++ b/utils/src/main/java/org/apache/cloudstack/utils/identity/ManagementServerNode.java @@ -21,7 +21,6 @@ package org.apache.cloudstack.utils.identity; import javax.ejb.Local; -import org.apache.log4j.Logger; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.component.ComponentLifecycle; @@ -31,7 +30,6 @@ import com.cloud.utils.net.MacAddress; @Local(value = {SystemIntegrityChecker.class}) public class ManagementServerNode extends AdapterBase implements SystemIntegrityChecker { - private static final Logger s_logger = Logger.getLogger(ManagementServerNode.class); private static final long s_nodeId = MacAddress.getMacAddress().toLong(); @@ -55,7 +53,7 @@ public class ManagementServerNode extends AdapterBase implements SystemIntegrity try { check(); } catch (Exception e) { - s_logger.error("System integrity check exception", e); + logger.error("System integrity check exception", e); System.exit(1); } return true;