Improve logging to include more identifiable information (#9873)

* Improve logging to include more identifiable information for kvm plugin

* Update logging for scaleio plugin

* Improve logging to include more identifiable information for default volume storage plugin

* Improve logging to include more identifiable information for agent managers

* Improve logging to include more identifiable information for Listeners

* Replace ids with objects or uuids


* Improve logging to include more identifiable information for engine

* Improve logging to include more identifiable information for server

* Fixups in engine

* Improve logging to include more identifiable information for plugins

* Improve logging to include more identifiable information for Cmd classes

* Fix toString method for StorageFilterTO.java
This commit is contained in:
Vishesh 2025-01-06 16:42:37 +05:30 committed by GitHub
parent cfafcaeb01
commit a4224e58cc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
468 changed files with 5472 additions and 4706 deletions

View File

@ -132,6 +132,8 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
ServerResource _resource;
Link _link;
Long _id;
String _uuid;
String _name;
Timer _timer = new Timer("Agent Timer");
Timer certTimer;
@ -182,8 +184,10 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
resource.setAgentControl(this);
final String value = _shell.getPersistentProperty(getResourceName(), "id");
_uuid = _shell.getPersistentProperty(getResourceName(), "uuid");
_name = _shell.getPersistentProperty(getResourceName(), "name");
_id = value != null ? Long.parseLong(value) : null;
logger.info("id is {}", ObjectUtils.defaultIfNull(_id, ""));
logger.info("Initialising agent [id: {}, uuid: {}, name: {}]", ObjectUtils.defaultIfNull(_id, ""), _uuid, _name);
final Map<String, Object> params = new HashMap<>();
@ -212,8 +216,9 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory(
"agentRequest-Handler"));
logger.info("Agent [id = {} : type = {} : zone = {} : pod = {} : workers = {} : host = {} : port = {}", ObjectUtils.defaultIfNull(_id, "new"), getResourceName(),
_shell.getZone(), _shell.getPod(), _shell.getWorkers(), host, _shell.getPort());
logger.info("Agent [id = {}, uuid: {}, name: {}] : type = {} : zone = {} : pod = {} : workers = {} : host = {} : port = {}",
ObjectUtils.defaultIfNull(_id, "new"), _uuid, _name, getResourceName(),
_shell.getZone(), _shell.getPod(), _shell.getWorkers(), host, _shell.getPort());
}
public String getVersion() {
@ -377,11 +382,28 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
}
public void setId(final Long id) {
logger.debug("Set agent id {}", id);
_id = id;
_shell.setPersistentProperty(getResourceName(), "id", Long.toString(id));
}
public String getUuid() {
return _uuid;
}
public void setUuid(String uuid) {
this._uuid = uuid;
_shell.setPersistentProperty(getResourceName(), "uuid", uuid);
}
public String getName() {
return _name;
}
public void setName(String name) {
this._name = name;
_shell.setPersistentProperty(getResourceName(), "name", name);
}
private synchronized void scheduleServicesRestartTask() {
if (certTimer != null) {
certTimer.cancel();
@ -594,9 +616,12 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
return;
}
logger.info("Process agent startup answer, agent id = {}", startup.getHostId());
logger.info("Process agent startup answer, agent [id: {}, uuid: {}, name: {}] connected to the server",
startup.getHostId(), startup.getHostUuid(), startup.getHostName());
setId(startup.getHostId());
setUuid(startup.getHostUuid());
setName(startup.getHostName());
_pingInterval = (long)startup.getPingInterval() * 1000; // change to ms.
setLastPingResponseTime();
@ -604,7 +629,8 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
_ugentTaskPool.setKeepAliveTime(2 * _pingInterval, TimeUnit.MILLISECONDS);
logger.info("Startup Response Received: agent id = {}", getId());
logger.info("Startup Response Received: agent [id: {}, uuid: {}, name: {}]",
startup.getHostId(), startup.getHostUuid(), startup.getHostName());
}
protected void processRequest(final Request request, final Link link) {
@ -860,15 +886,17 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
NumbersUtil.enableHumanReadableSizes = humanReadable;
}
logger.info("Processing agent ready command, agent id = {}", ready.getHostId());
logger.info("Processing agent ready command, agent id = {}, uuid = {}, name = {}", ready.getHostId(), ready.getHostUuid(), ready.getHostName());
if (ready.getHostId() != null) {
setId(ready.getHostId());
setUuid(ready.getHostUuid());
setName(ready.getHostName());
}
verifyAgentArch(ready.getArch());
processManagementServerList(ready.getMsHostList(), ready.getLbAlgorithm(), ready.getLbCheckInterval());
logger.info("Ready command is processed for agent id = {}", getId());
logger.info("Ready command is processed for agent [id: {}, uuid: {}, name: {}]", getId(), getUuid(), getName());
}
private void verifyAgentArch(String arch) {

View File

@ -374,13 +374,15 @@ public class LoadBalancerTO {
public static class CounterTO implements Serializable {
private static final long serialVersionUID = 2L;
private final Long id;
private final String uuid;
private final String name;
private final Counter.Source source;
private final String value;
private final String provider;
public CounterTO(Long id, String name, Counter.Source source, String value, String provider) {
public CounterTO(Long id, String uuid, String name, Counter.Source source, String value, String provider) {
this.id = id;
this.uuid = uuid;
this.name = name;
this.source = source;
this.value = value;
@ -391,6 +393,10 @@ public class LoadBalancerTO {
return id;
}
public String getUuid() {
return uuid;
}
public String getName() {
return name;
}
@ -411,12 +417,14 @@ public class LoadBalancerTO {
public static class ConditionTO implements Serializable {
private static final long serialVersionUID = 2L;
private final Long id;
private final String uuid;
private final long threshold;
private final Condition.Operator relationalOperator;
private final CounterTO counter;
public ConditionTO(Long id, long threshold, Condition.Operator relationalOperator, CounterTO counter) {
public ConditionTO(Long id, String uuid, long threshold, Condition.Operator relationalOperator, CounterTO counter) {
this.id = id;
this.uuid = uuid;
this.threshold = threshold;
this.relationalOperator = relationalOperator;
this.counter = counter;
@ -426,6 +434,10 @@ public class LoadBalancerTO {
return id;
}
public String getUuid() {
return uuid;
}
public long getThreshold() {
return threshold;
}
@ -442,6 +454,7 @@ public class LoadBalancerTO {
public static class AutoScalePolicyTO implements Serializable {
private static final long serialVersionUID = 2L;
private final long id;
private final String uuid;
private final int duration;
private final int quietTime;
private final Date lastQuietTime;
@ -449,8 +462,9 @@ public class LoadBalancerTO {
boolean revoked;
private final List<ConditionTO> conditions;
public AutoScalePolicyTO(long id, int duration, int quietTime, Date lastQuietTime, AutoScalePolicy.Action action, List<ConditionTO> conditions, boolean revoked) {
public AutoScalePolicyTO(long id, String uuid, int duration, int quietTime, Date lastQuietTime, AutoScalePolicy.Action action, List<ConditionTO> conditions, boolean revoked) {
this.id = id;
this.uuid = uuid;
this.duration = duration;
this.quietTime = quietTime;
this.lastQuietTime = lastQuietTime;
@ -463,6 +477,10 @@ public class LoadBalancerTO {
return id;
}
public String getUuid() {
return uuid;
}
public int getDuration() {
return duration;
}

View File

@ -17,6 +17,7 @@
package com.cloud.agent.api.to;
import com.cloud.storage.DataStoreRole;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
public class NfsTO implements DataStoreTO {
@ -41,6 +42,13 @@ public class NfsTO implements DataStoreTO {
}
@Override
public String toString() {
return String.format("NfsTO %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "uuid", "_url", "_role", "nfsVersion"));
}
@Override
public String getUrl() {
return _url;

View File

@ -22,6 +22,7 @@ import com.cloud.agent.api.LogLevel;
import com.cloud.agent.api.LogLevel.Log4jLevel;
import com.cloud.storage.DataStoreRole;
import com.cloud.utils.storage.S3.ClientOptions;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
public final class S3TO implements ClientOptions, DataStoreTO {
@ -68,6 +69,13 @@ public final class S3TO implements ClientOptions, DataStoreTO {
}
@Override
public String toString() {
return String.format("S3TO %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "bucketName"));
}
public Long getId() {
return this.id;
}

View File

@ -19,6 +19,7 @@ package com.cloud.agent.api.to;
import com.cloud.agent.api.LogLevel;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StoragePool;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
public class StorageFilerTO {
long id;
@ -73,6 +74,6 @@ public class StorageFilerTO {
@Override
public String toString() {
return new StringBuilder("Pool[").append(id).append("|").append(host).append(":").append(port).append("|").append(path).append("]").toString();
return String.format("Pool %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "host", "port", "path"));
}
}

View File

@ -18,6 +18,7 @@ package com.cloud.agent.api.to;
import com.cloud.storage.DataStoreRole;
import com.cloud.utils.SwiftUtil;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg {
Long id;
@ -41,6 +42,13 @@ public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg {
this.storagePolicy = storagePolicy;
}
@Override
public String toString() {
return String.format("SwiftTO %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "account", "userName"));
}
public Long getId() {
return id;
}

View File

@ -58,7 +58,7 @@ public interface Ipv6Service extends PluggableService, Configurable {
Pair<Integer, Integer> getUsedTotalIpv6SubnetForZone(long zoneId);
Pair<String, String> preAllocateIpv6SubnetForNetwork(long zoneId) throws ResourceAllocationException;
Pair<String, String> preAllocateIpv6SubnetForNetwork(DataCenter zone) throws ResourceAllocationException;
void assignIpv6SubnetToNetwork(String subnet, long networkId);

View File

@ -22,6 +22,7 @@ import java.util.Date;
import com.cloud.network.Networks.BroadcastDomainType;
import com.cloud.network.Networks.Mode;
import com.cloud.network.Networks.TrafficType;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
public class NetworkProfile implements Network {
private final long id;
@ -384,4 +385,11 @@ public class NetworkProfile implements Network {
return networkCidrSize;
}
@Override
public String toString() {
return String.format("NetworkProfile %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name", "networkOfferingId"));
}
}

View File

@ -63,6 +63,10 @@ public class LoadBalancingRule {
return lb.getId();
}
public LoadBalancer getLb() {
return lb;
}
public String getName() {
return lb.getName();
}

View File

@ -39,7 +39,7 @@ public interface RemoteAccessVpnService {
VpnUser addVpnUser(long vpnOwnerId, String userName, String password);
boolean removeVpnUser(long vpnOwnerId, String userName, Account caller);
boolean removeVpnUser(Account vpnOwner, String userName, Account caller);
List<? extends VpnUser> listVpnUsers(long vpnOwnerId, String userName);

View File

@ -19,6 +19,7 @@ package com.cloud.region.ha;
import java.util.List;
import com.cloud.user.Account;
import org.apache.cloudstack.api.command.user.region.ha.gslb.AssignToGlobalLoadBalancerRuleCmd;
import org.apache.cloudstack.api.command.user.region.ha.gslb.CreateGlobalLoadBalancerRuleCmd;
import org.apache.cloudstack.api.command.user.region.ha.gslb.DeleteGlobalLoadBalancerRuleCmd;
@ -39,7 +40,7 @@ public interface GlobalLoadBalancingRulesService {
GlobalLoadBalancerRule updateGlobalLoadBalancerRule(UpdateGlobalLoadBalancerRuleCmd updateGslbCmd);
boolean revokeAllGslbRulesForAccount(com.cloud.user.Account caller, long accountId) throws com.cloud.exception.ResourceUnavailableException;
boolean revokeAllGslbRulesForAccount(com.cloud.user.Account caller, Account account) throws com.cloud.exception.ResourceUnavailableException;
/*
* methods for managing sites participating in global load balancing

View File

@ -450,6 +450,9 @@ public class NicProfile implements InternalIdentity, Serializable {
@Override
public String toString() {
return String.format("NicProfile %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "vmId", "deviceId", "broadcastUri", "reservationId", "iPv4Address"));
return String.format("NicProfile %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "vmId", "deviceId",
"broadcastUri", "reservationId", "iPv4Address"));
}
}

View File

@ -125,8 +125,9 @@ public class UpdateHostCmd extends BaseCmd {
hostResponse.setResponseName(getCommandName());
this.setResponseObject(hostResponse);
} catch (Exception e) {
logger.debug("Failed to update host:" + getId(), e);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update host:" + getId() + "," + e.getMessage());
Host host = _entityMgr.findById(Host.class, getId());
logger.debug("Failed to update host: {} with id {}", host, getId(), e);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to update host: %s with id %d, %s", host, getId(), e.getMessage()));
}
}
}

View File

@ -124,10 +124,10 @@ public class CreateSnapshotFromVMSnapshotCmd extends BaseAsyncCreateCmd {
if (account.getType() == Account.Type.PROJECT) {
Project project = _projectService.findByProjectAccountId(vmsnapshot.getAccountId());
if (project == null) {
throw new InvalidParameterValueException("Unable to find project by account id=" + account.getUuid());
throw new InvalidParameterValueException(String.format("Unable to find project by account %s", account));
}
if (project.getState() != Project.State.Active) {
throw new PermissionDeniedException("Can't add resources to the project id=" + project.getUuid() + " in state=" + project.getState() + " as it's no longer active");
throw new PermissionDeniedException(String.format("Can't add resources to the project %s in state=%s as it's no longer active", project, project.getState()));
}
} else if (account.getState() == Account.State.DISABLED) {
throw new PermissionDeniedException("The owner of template is disabled: " + account);
@ -164,8 +164,9 @@ public class CreateSnapshotFromVMSnapshotCmd extends BaseAsyncCreateCmd {
@Override
public void execute() {
logger.info("CreateSnapshotFromVMSnapshotCmd with vm snapshot id:" + getVMSnapshotId() + " and snapshot id:" + getEntityId() + " starts:" + System.currentTimeMillis());
CallContext.current().setEventDetails("Vm Snapshot Id: "+ this._uuidMgr.getUuid(VMSnapshot.class, getVMSnapshotId()));
VMSnapshot vmSnapshot = _vmSnapshotService.getVMSnapshotById(getVMSnapshotId());
logger.info("CreateSnapshotFromVMSnapshotCmd with vm snapshot {} with id {} and snapshot [id: {}, uuid: {}]", vmSnapshot, getVMSnapshotId(), getEntityId(), getEntityUuid());
CallContext.current().setEventDetails("Vm Snapshot Id: " + vmSnapshot.getUuid());
Snapshot snapshot = null;
try {
snapshot = _snapshotService.backupSnapshotFromVmSnapshot(getEntityId(), getVmId(), getVolumeId(), getVMSnapshotId());
@ -174,19 +175,19 @@ public class CreateSnapshotFromVMSnapshotCmd extends BaseAsyncCreateCmd {
response.setResponseName(getCommandName());
this.setResponseObject(response);
} else {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot due to an internal error creating snapshot from vm snapshot " + getVMSnapshotId());
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to create snapshot due to an internal error creating snapshot from vm snapshot %s", vmSnapshot));
}
} catch (InvalidParameterValueException ex) {
throw ex;
} catch (Exception e) {
logger.debug("Failed to create snapshot", e);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot due to an internal error creating snapshot from vm snapshot " + getVMSnapshotId());
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to create snapshot due to an internal error creating snapshot from vm snapshot %s", vmSnapshot));
} finally {
if (snapshot == null) {
try {
_snapshotService.deleteSnapshot(getEntityId(), null);
} catch (Exception e) {
logger.debug("Failed to clean failed snapshot" + getEntityId());
logger.debug("Failed to clean failed snapshot {} with id {}", () -> _entityMgr.findById(Snapshot.class, getEntityId()), this::getEntityId);
}
}
}

View File

@ -104,7 +104,7 @@ public class RemoveVpnUserCmd extends BaseAsyncCmd {
public void execute() {
Account owner = _accountService.getAccount(getEntityOwnerId());
long ownerId = owner.getId();
boolean result = _ravService.removeVpnUser(ownerId, userName, CallContext.current().getCallingAccount());
boolean result = _ravService.removeVpnUser(owner, userName, CallContext.current().getCallingAccount());
if (!result) {
String errorMessage = String.format("Failed to remove VPN user=[%s]. VPN owner id=[%s].", userName, ownerId);
logger.error(errorMessage);

View File

@ -21,6 +21,7 @@ package org.apache.cloudstack.cluster;
import com.cloud.host.Host;
import com.cloud.offering.ServiceOffering;
import com.cloud.org.Cluster;
import com.cloud.utils.Pair;
import com.cloud.utils.Ternary;
import com.cloud.utils.component.Adapter;
@ -55,8 +56,8 @@ public interface ClusterDrsAlgorithm extends Adapter {
* @throws ConfigurationException
* if there is an error in the configuration
*/
boolean needsDrs(long clusterId, List<Ternary<Long, Long, Long>> cpuList,
List<Ternary<Long, Long, Long>> memoryList) throws ConfigurationException;
boolean needsDrs(Cluster cluster, List<Ternary<Long, Long, Long>> cpuList,
List<Ternary<Long, Long, Long>> memoryList) throws ConfigurationException;
/**
@ -79,7 +80,7 @@ public interface ClusterDrsAlgorithm extends Adapter {
*
* @return a ternary containing improvement, cost, benefit
*/
Ternary<Double, Double, Double> getMetrics(long clusterId, VirtualMachine vm, ServiceOffering serviceOffering,
Ternary<Double, Double, Double> getMetrics(Cluster cluster, VirtualMachine vm, ServiceOffering serviceOffering,
Host destHost, Map<Long, Ternary<Long, Long, Long>> hostCpuMap,
Map<Long, Ternary<Long, Long, Long>> hostMemoryMap,
Boolean requiresStorageMotion) throws ConfigurationException;

View File

@ -17,7 +17,7 @@
package org.apache.cloudstack.vm;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import java.util.List;
@ -179,6 +179,13 @@ public class UnmanagedInstanceTO {
this.vncPassword = vncPassword;
}
@Override
public String toString() {
return String.format("UnmanagedInstanceTO %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "name", "internalCSName", "hostName", "clusterName"));
}
public static class Disk {
private String diskId;
@ -322,12 +329,9 @@ public class UnmanagedInstanceTO {
@Override
public String toString() {
return "Disk {" +
"diskId='" + diskId + '\'' +
", capacity=" + toHumanReadableSize(capacity) +
", controller='" + controller + '\'' +
", controllerUnit=" + controllerUnit +
"}";
return String.format("Disk %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "diskId", "internalCSName", "controller", "controllerUnit"));
}
}
@ -424,11 +428,9 @@ public class UnmanagedInstanceTO {
@Override
public String toString() {
return "Nic{" +
"nicId='" + nicId + '\'' +
", adapterType='" + adapterType + '\'' +
", macAddress='" + macAddress + '\'' +
"}";
return String.format("Nic %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "nicId", "adapterType", "macAddress"));
}
}
}

View File

@ -41,16 +41,19 @@ public class LoadBalancerTOTest {
LoadBalancerTO.AutoScaleVmGroupTO vmGroup;
private static final Long counterId = 1L;
private static final String counterUuid = "1111-1111-1100";
private static final String counterName = "counter name";
private static final Counter.Source counterSource = Counter.Source.CPU;
private static final String counterValue = "counter value";
private static final String counterProvider = "VIRTUALROUTER";
private static final Long conditionId = 2L;
private static final String conditionUuid = "1111-1111-1110";
private static final Long threshold = 100L;
private static final Condition.Operator relationalOperator = Condition.Operator.GT;
private static final Long scaleUpPolicyId = 11L;
private static final String scaleUpPolicyUuid = "1111-1111-1111";
private static final int scaleUpPolicyDuration = 61;
private static final int scaleUpPolicyQuietTime = 31;
private static final Date scaleUpPolicyLastQuietTime = new Date();
@ -85,14 +88,14 @@ public class LoadBalancerTOTest {
@Before
public void setUp() {
counter = new LoadBalancerTO.CounterTO(counterId, counterName, counterSource, counterValue, counterProvider);
condition = new LoadBalancerTO.ConditionTO(conditionId, threshold, relationalOperator, counter);
scaleUpPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleUpPolicyId, scaleUpPolicyDuration, scaleUpPolicyQuietTime,
scaleUpPolicyLastQuietTime, AutoScalePolicy.Action.SCALEUP,
Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false);
scaleDownPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleDownPolicyId, scaleDownPolicyDuration, scaleDownPolicyQuietTime,
scaleDownPolicyLastQuietTime, AutoScalePolicy.Action.SCALEDOWN,
Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false);
counter = new LoadBalancerTO.CounterTO(counterId, counterUuid, counterName, counterSource, counterValue, counterProvider);
condition = new LoadBalancerTO.ConditionTO(conditionId, conditionUuid, threshold, relationalOperator, counter);
scaleUpPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleUpPolicyId, scaleUpPolicyUuid, scaleUpPolicyDuration,
scaleUpPolicyQuietTime, scaleUpPolicyLastQuietTime,
AutoScalePolicy.Action.SCALEUP, Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false);
scaleDownPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleDownPolicyId, scaleUpPolicyUuid, scaleDownPolicyDuration,
scaleDownPolicyQuietTime, scaleDownPolicyLastQuietTime,
AutoScalePolicy.Action.SCALEDOWN, Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false);
vmProfile = new LoadBalancerTO.AutoScaleVmProfileTO(zoneId, domainId, cloudStackApiUrl, autoScaleUserApiKey,
autoScaleUserSecretKey, serviceOfferingId, templateId, vmName, networkId, otherDeployParams,
counterParamList, expungeVmGracePeriod);
@ -113,6 +116,7 @@ public class LoadBalancerTOTest {
@Test
public void testConditionTO() {
Assert.assertEquals(conditionId, condition.getId());
Assert.assertEquals(conditionUuid, condition.getUuid());
Assert.assertEquals((long) threshold, condition.getThreshold());
Assert.assertEquals(relationalOperator, condition.getRelationalOperator());
Assert.assertEquals(counter, condition.getCounter());

View File

@ -19,6 +19,8 @@
package com.cloud.agent.api;
import com.cloud.host.Host;
import java.util.List;
public class ReadyCommand extends Command {
@ -30,6 +32,8 @@ public class ReadyCommand extends Command {
private Long dcId;
private Long hostId;
private String hostUuid;
private String hostName;
private List<String> msHostList;
private String lbAlgorithm;
private Long lbCheckInterval;
@ -41,9 +45,11 @@ public class ReadyCommand extends Command {
this.dcId = dcId;
}
public ReadyCommand(final Long dcId, final Long hostId, boolean enableHumanReadableSizes) {
this(dcId);
this.hostId = hostId;
public ReadyCommand(final Host host, boolean enableHumanReadableSizes) {
this(host.getDataCenterId());
this.hostId = host.getId();
this.hostUuid = host.getUuid();
this.hostName = host.getName();
this.enableHumanReadableSizes = enableHumanReadableSizes;
}
@ -68,6 +74,14 @@ public class ReadyCommand extends Command {
return hostId;
}
public String getHostUuid() {
return hostUuid;
}
public String getHostName() {
return hostName;
}
public List<String> getMsHostList() {
return msHostList;
}

View File

@ -21,14 +21,18 @@ package com.cloud.agent.api;
public class StartupAnswer extends Answer {
long hostId;
String hostName;
String hostUuid;
int pingInterval;
protected StartupAnswer() {
}
public StartupAnswer(StartupCommand cmd, long hostId, int pingInterval) {
public StartupAnswer(StartupCommand cmd, long hostId, String hostUuid, String hostName, int pingInterval) {
super(cmd);
this.hostId = hostId;
this.hostUuid = hostUuid;
this.hostName = hostName;
this.pingInterval = pingInterval;
}
@ -40,6 +44,14 @@ public class StartupAnswer extends Answer {
return hostId;
}
public String getHostUuid() {
return hostUuid;
}
public String getHostName() {
return hostName;
}
public int getPingInterval() {
return pingInterval;
}

View File

@ -23,6 +23,7 @@ import org.apache.cloudstack.storage.image.datastore.ImageStoreInfo;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.storage.DataStoreRole;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
public class ImageStoreTO implements DataStoreTO {
private String type;
@ -78,15 +79,9 @@ public class ImageStoreTO implements DataStoreTO {
@Override
public String toString() {
return new StringBuilder("ImageStoreTO[type=").append(type)
.append("|provider=")
.append(providerName)
.append("|role=")
.append(role)
.append("|uri=")
.append(uri)
.append("]")
.toString();
return String.format("ImageStoreTO %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "uuid", "type", "providerName", "role", "uri"));
}
@Override

View File

@ -26,6 +26,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.Storage.StoragePoolType;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
public class PrimaryDataStoreTO implements DataStoreTO {
public static final String MANAGED = PrimaryDataStore.MANAGED;
@ -145,15 +146,9 @@ public class PrimaryDataStoreTO implements DataStoreTO {
@Override
public String toString() {
return new StringBuilder("PrimaryDataStoreTO[uuid=").append(uuid)
.append("|name=")
.append(name)
.append("|id=")
.append(id)
.append("|pooltype=")
.append(poolType)
.append("]")
.toString();
return String.format("PrimaryDataStoreTO %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name", "poolType"));
}
public Boolean isFullCloneFlag() {

View File

@ -27,6 +27,7 @@ import com.cloud.agent.api.to.DataTO;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.template.VirtualMachineTemplate;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
public class TemplateObjectTO extends DownloadableObjectTO implements DataTO {
private String path;
@ -264,6 +265,8 @@ public class TemplateObjectTO extends DownloadableObjectTO implements DataTO {
@Override
public String toString() {
return new StringBuilder("TemplateTO[id=").append(id).append("|origUrl=").append(origUrl).append("|name").append(name).append("]").toString();
return String.format("TemplateTO %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name", "origUrl"));
}
}

View File

@ -30,6 +30,7 @@ import com.cloud.offering.DiskOffering.DiskCacheMode;
import com.cloud.storage.MigrationOptions;
import com.cloud.storage.Storage;
import com.cloud.storage.Volume;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import java.util.Arrays;
@ -258,7 +259,9 @@ public class VolumeObjectTO extends DownloadableObjectTO implements DataTO {
@Override
public String toString() {
return new StringBuilder("volumeTO[uuid=").append(uuid).append("|path=").append(path).append("|datastore=").append(dataStore).append("]").toString();
return String.format("volumeTO %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name", "path", "dataStore"));
}
public void setBytesReadRate(Long bytesReadRate) {

View File

@ -24,6 +24,8 @@ import com.cloud.agent.api.Command;
public interface EndPoint {
long getId();
String getUuid();
String getHostAddr();
String getPublicAddr();

View File

@ -19,12 +19,22 @@
package org.apache.cloudstack.engine.subsystem.api.storage;
import com.cloud.exception.StorageConflictException;
import com.cloud.host.Host;
import com.cloud.storage.StoragePool;
public interface HypervisorHostListener {
boolean hostAdded(long hostId);
default boolean hostConnect(Host host, StoragePool pool) throws StorageConflictException {
return hostConnect(host.getId(), pool.getId());
}
boolean hostConnect(long hostId, long poolId) throws StorageConflictException;
default boolean hostDisconnected(Host host, StoragePool pool) throws StorageConflictException {
return hostDisconnected(host.getId(), pool.getId());
}
boolean hostDisconnected(long hostId, long poolId);
boolean hostAboutToBeRemoved(long hostId);

View File

@ -22,6 +22,8 @@ import com.cloud.template.VirtualMachineTemplate;
import com.cloud.user.UserData;
public interface TemplateInfo extends DownloadableDataInfo, VirtualMachineTemplate {
VirtualMachineTemplate getImage();
@Override
String getUniqueName();

View File

@ -19,6 +19,7 @@
package org.apache.cloudstack.engine.subsystem.api.storage;
import com.cloud.agent.api.to.DatadiskTO;
import com.cloud.template.VirtualMachineTemplate;
import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.storage.command.CommandResult;
@ -60,7 +61,7 @@ public interface TemplateService {
AsyncCallFuture<TemplateApiResult> deleteTemplateOnPrimary(TemplateInfo template, StoragePool pool);
void syncTemplateToRegionStore(long templateId, DataStore store);
void syncTemplateToRegionStore(VirtualMachineTemplate templateId, DataStore store);
void handleSysTemplateDownload(HypervisorType hostHyper, Long dcId);

View File

@ -43,6 +43,10 @@ public interface Listener {
*/
boolean processAnswers(long agentId, long seq, Answer[] answers);
default boolean processAnswers(long agentId, String uuid, String name, long seq, Answer[] answers) {
return processAnswers(agentId, seq, answers);
}
/**
* This method is called by the AgentManager when an agent sent
* a command to the server. In order to process these commands,
@ -92,6 +96,10 @@ public interface Listener {
*/
boolean processDisconnect(long agentId, Status state);
default boolean processDisconnect(long agentId, String uuid, String name, Status state) {
return processDisconnect(agentId, state);
}
/**
* This method is called by AgentManager when a host is about to be removed from a cluster.
* @param long the ID of the host that's about to be removed

View File

@ -140,7 +140,7 @@ public interface CapacityManager {
* @param ram required RAM
* @param cpuOverprovisioningFactor factor to apply to the actual host cpu
*/
boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor, float memoryOvercommitRatio,
boolean checkIfHostHasCapacity(Host host, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor, float memoryOvercommitRatio,
boolean considerReservedCapacity);
void updateCapacityForHost(Host host);

View File

@ -238,7 +238,7 @@ public interface ConfigurationManager {
* @param domainId
* @return success/failure
*/
boolean releaseDomainSpecificVirtualRanges(long domainId);
boolean releaseDomainSpecificVirtualRanges(Domain domain);
/**
* Release dedicated virtual ip ranges of an account.
@ -246,7 +246,7 @@ public interface ConfigurationManager {
* @param accountId
* @return success/failure
*/
boolean releaseAccountSpecificVirtualRanges(long accountId);
boolean releaseAccountSpecificVirtualRanges(Account account);
/**
* Edits a pod in the database. Will not allow you to edit pods that are being used anywhere in the system.

View File

@ -19,6 +19,7 @@ package com.cloud.network;
import java.util.Date;
import java.util.List;
import com.cloud.user.User;
import org.apache.cloudstack.api.response.AcquirePodIpCmdResponse;
import org.apache.cloudstack.framework.config.ConfigKey;
@ -88,7 +89,7 @@ public interface IpAddressManager {
* @param caller
* @return true if it did; false if it didn't
*/
boolean disassociatePublicIpAddress(long id, long userId, Account caller);
boolean disassociatePublicIpAddress(IpAddress ipAddress, long userId, Account caller);
boolean applyRules(List<? extends FirewallRule> rules, FirewallRule.Purpose purpose, NetworkRuleApplier applier, boolean continueOnError)
throws ResourceUnavailableException;
@ -191,7 +192,7 @@ public interface IpAddressManager {
PublicIp assignDedicateIpAddress(Account owner, Long guestNtwkId, Long vpcId, long dcId, boolean isSourceNat)
throws ConcurrentOperationException, InsufficientAddressCapacityException;
IpAddress allocateIp(Account ipOwner, boolean isSystem, Account caller, long callerId, DataCenter zone, Boolean displayIp, String ipaddress)
IpAddress allocateIp(Account ipOwner, boolean isSystem, Account caller, User callerId, DataCenter zone, Boolean displayIp, String ipaddress)
throws ConcurrentOperationException, ResourceAllocationException, InsufficientAddressCapacityException;
PublicIp assignPublicIpAddressFromVlans(long dcId, Long podId, Account owner, VlanType type, List<Long> vlanDbIds, Long networkId, String requestedIp, String requestedGateway, boolean isSystem)

View File

@ -62,7 +62,7 @@ public interface LoadBalancingRulesManager {
*/
boolean removeVmFromLoadBalancers(long vmId);
boolean applyLoadBalancersForNetwork(long networkId, Scheme scheme) throws ResourceUnavailableException;
boolean applyLoadBalancersForNetwork(Network network, Scheme scheme) throws ResourceUnavailableException;
String getLBCapability(long networkid, String capabilityName);
@ -74,7 +74,7 @@ public interface LoadBalancingRulesManager {
boolean configureLbAutoScaleVmGroup(long vmGroupid, AutoScaleVmGroup.State currentState) throws ResourceUnavailableException;
boolean revokeLoadBalancersForNetwork(long networkId, Scheme scheme) throws ResourceUnavailableException;
boolean revokeLoadBalancersForNetwork(Network network, Scheme scheme) throws ResourceUnavailableException;
boolean validateLbRule(LoadBalancingRule lbRule);

View File

@ -20,6 +20,8 @@ import java.util.List;
import com.cloud.exception.NetworkRuleConflictException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.network.IpAddress;
import com.cloud.network.Network;
import com.cloud.network.dao.IPAddressVO;
import com.cloud.network.firewall.FirewallService;
import com.cloud.network.rules.FirewallRule.FirewallRuleType;
@ -53,7 +55,7 @@ public interface FirewallManager extends FirewallService {
public void revokeRule(FirewallRuleVO rule, Account caller, long userId, boolean needUsageEvent);
boolean revokeFirewallRulesForIp(long ipId, long userId, Account caller) throws ResourceUnavailableException;
boolean revokeFirewallRulesForIp(IpAddress ip, long userId, Account caller) throws ResourceUnavailableException;
// /**
// * Revokes a firewall rule
@ -75,7 +77,7 @@ public interface FirewallManager extends FirewallService {
FirewallRule createRuleForAllCidrs(long ipAddrId, Account caller, Integer startPort, Integer endPort, String protocol, Integer icmpCode, Integer icmpType,
Long relatedRuleId, long networkId) throws NetworkRuleConflictException;
boolean revokeAllFirewallRulesForNetwork(long networkId, long userId, Account caller) throws ResourceUnavailableException;
boolean revokeAllFirewallRulesForNetwork(Network network, long userId, Account caller) throws ResourceUnavailableException;
boolean revokeFirewallRulesForVm(long vmId);

View File

@ -22,6 +22,7 @@ import com.cloud.exception.InsufficientAddressCapacityException;
import com.cloud.exception.NetworkRuleConflictException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.network.IpAddress;
import com.cloud.network.Network;
import com.cloud.user.Account;
import com.cloud.uservm.UserVm;
import com.cloud.vm.Nic;
@ -47,7 +48,7 @@ public interface RulesManager extends RulesService {
FirewallRule[] reservePorts(IpAddress ip, String protocol, FirewallRule.Purpose purpose, boolean openFirewall, Account caller, int... ports)
throws NetworkRuleConflictException;
boolean applyStaticNatsForNetwork(long networkId, boolean continueOnError, Account caller);
boolean applyStaticNatsForNetwork(Network network, boolean continueOnError, Account caller);
void getSystemIpAndEnableStaticNatForVm(VirtualMachine vm, boolean getNewIp) throws InsufficientAddressCapacityException;
@ -60,7 +61,7 @@ public interface RulesManager extends RulesService {
* @param forRevoke
* @return
*/
boolean applyStaticNatForNetwork(long networkId, boolean continueOnError, Account caller, boolean forRevoke);
boolean applyStaticNatForNetwork(Network network, boolean continueOnError, Account caller, boolean forRevoke);
List<FirewallRuleVO> listAssociatedRulesForGuestNic(Nic nic);

View File

@ -19,6 +19,7 @@ package com.cloud.network.security;
import java.util.HashMap;
import java.util.List;
import com.cloud.uservm.UserVm;
import com.cloud.utils.Pair;
/**
@ -36,9 +37,9 @@ public interface SecurityGroupManager {
public SecurityGroupVO createDefaultSecurityGroup(Long accountId);
public boolean addInstanceToGroups(Long userVmId, List<Long> groups);
public boolean addInstanceToGroups(UserVm userVm, List<Long> groups);
public void removeInstanceFromGroups(long userVmId);
public void removeInstanceFromGroups(UserVm userVm);
public void fullSync(long agentId, HashMap<String, Pair<Long, Long>> newGroupStates);

View File

@ -20,6 +20,7 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import com.cloud.network.dao.IPAddressVO;
import com.cloud.utils.Pair;
import org.apache.cloudstack.acl.ControlledEntity.ACLType;
@ -82,6 +83,8 @@ public interface VpcManager {
*/
void unassignIPFromVpcNetwork(long ipId, long networkId);
void unassignIPFromVpcNetwork(final IPAddressVO ip, final Network network);
/**
* Creates guest network in the VPC
*

View File

@ -185,7 +185,7 @@ public interface ResourceManager extends ResourceService, Configurable {
* @param vgpuType the VGPU type
* @return true when the host has the capacity with given VGPU type
*/
boolean isGPUDeviceAvailable(long hostId, String groupName, String vgpuType);
boolean isGPUDeviceAvailable(Host host, String groupName, String vgpuType);
/**
* Get available GPU device

View File

@ -365,9 +365,9 @@ public interface StorageManager extends StorageService {
String getStoragePoolMountFailureReason(String error);
boolean connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException;
boolean connectHostToSharedPool(Host host, long poolId) throws StorageUnavailableException, StorageConflictException;
void disconnectHostFromSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException;
void disconnectHostFromSharedPool(Host host, StoragePool pool) throws StorageUnavailableException, StorageConflictException;
void enableHost(long hostId) throws StorageUnavailableException, StorageConflictException;

View File

@ -120,7 +120,7 @@ public interface TemplateManager {
DataStore getImageStore(long tmpltId);
Long getTemplateSize(long templateId, long zoneId);
Long getTemplateSize(VirtualMachineTemplate template, long zoneId);
DataStore getImageStore(String storeUuid, Long zoneId, VolumeVO volume);
@ -143,7 +143,7 @@ public interface TemplateManager {
TemplateType validateTemplateType(BaseCmd cmd, boolean isAdmin, boolean isCrossZones);
List<DatadiskTO> getTemplateDisksOnImageStore(Long templateId, DataStoreRole role, String configurationId);
List<DatadiskTO> getTemplateDisksOnImageStore(VirtualMachineTemplate template, DataStoreRole role, String configurationId);
static Boolean getValidateUrlIsResolvableBeforeRegisteringTemplateValue() {
return ValidateUrlIsResolvableBeforeRegisteringTemplate.value();

View File

@ -26,9 +26,7 @@ import org.apache.cloudstack.jobs.JobInfo;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import com.cloud.serializer.GsonHelper;
import com.cloud.utils.Pair;
import com.google.gson.Gson;
/**
* VmWorkJobHandlerProxy can not be used as standalone due to run-time
@ -44,10 +42,8 @@ public class VmWorkJobHandlerProxy implements VmWorkJobHandler {
private Object _target;
private Map<Class<?>, Method> _handlerMethodMap = new HashMap<Class<?>, Method>();
private Gson _gsonLogger;
public VmWorkJobHandlerProxy(Object target) {
_gsonLogger = GsonHelper.getGsonLogger();
buildLookupMap(target.getClass());
_target = target;
@ -123,10 +119,10 @@ public class VmWorkJobHandlerProxy implements VmWorkJobHandler {
throw e;
}
} else {
logger.error("Unable to find handler for VM work job: " + work.getClass().getName() + _gsonLogger.toJson(work));
logger.error("Unable to find handler for VM work job: {} {}", work.getClass().getName(), work);
RuntimeException ex = new RuntimeException("Unable to find handler for VM work job: " + work.getClass().getName());
return new Pair<JobInfo.Status, String>(JobInfo.Status.FAILED, JobSerializerHelper.toObjectSerializedString(ex));
return new Pair<>(JobInfo.Status.FAILED, JobSerializerHelper.toObjectSerializedString(ex));
}
}
}

View File

@ -34,6 +34,7 @@ import java.util.concurrent.TimeUnit;
import com.cloud.agent.api.CleanupPersistentNetworkResourceCommand;
import org.apache.cloudstack.agent.lb.SetupMSListCommand;
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
@ -111,6 +112,7 @@ public abstract class AgentAttache {
protected static String LOG_SEQ_FORMATTED_STRING;
protected final long _id;
protected String _uuid;
protected String _name = null;
protected final ConcurrentHashMap<Long, Listener> _waitForList;
protected final LinkedList<Request> _requests;
@ -133,8 +135,9 @@ public abstract class AgentAttache {
Arrays.sort(s_commandsNotAllowedInConnectingMode);
}
protected AgentAttache(final AgentManagerImpl agentMgr, final long id, final String name, final boolean maintenance) {
protected AgentAttache(final AgentManagerImpl agentMgr, final long id, final String uuid, final String name, final boolean maintenance) {
_id = id;
_uuid = uuid;
_name = name;
_waitForList = new ConcurrentHashMap<Long, Listener>();
_currentSequence = null;
@ -145,6 +148,13 @@ public abstract class AgentAttache {
LOG_SEQ_FORMATTED_STRING = String.format("Seq %d-{}: {}", _id);
}
@Override
public String toString() {
return String.format("AgentAttache %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "_id", "_uuid", "_name"));
}
public synchronized long getNextSequence() {
return ++_nextSequence;
}
@ -206,7 +216,7 @@ public abstract class AgentAttache {
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Cancelling.");
final Listener listener = _waitForList.remove(seq);
if (listener != null) {
listener.processDisconnect(_id, Status.Disconnected);
listener.processDisconnect(_id, _uuid, _name, Status.Disconnected);
}
int index = findRequest(seq);
if (index >= 0) {
@ -243,6 +253,10 @@ public abstract class AgentAttache {
return _id;
}
public String getUuid() {
return _uuid;
}
public String getName() {
return _name;
}
@ -316,7 +330,7 @@ public abstract class AgentAttache {
it.remove();
final Listener monitor = entry.getValue();
logger.debug(LOG_SEQ_FORMATTED_STRING, entry.getKey(), "Sending disconnect to " + monitor.getClass());
monitor.processDisconnect(_id, state);
monitor.processDisconnect(_id, _uuid, _name, state);
}
}
}

View File

@ -302,7 +302,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
}
logger.warn("No handling of agent control command: {} sent from {}", cmd, attache.getId());
logger.warn("No handling of agent control command: {} sent from {}", cmd, attache);
return new AgentControlAnswer(cmd);
}
@ -344,7 +344,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
answer = easySend(targetHostId, cmd);
} catch (final Exception e) {
String errorMsg = String.format("Error sending command %s to host %s, due to %s", cmd.getClass().getName(),
host.getUuid(), e.getLocalizedMessage());
host, e.getLocalizedMessage());
logger.error(errorMsg);
logger.debug(errorMsg, e);
}
@ -464,11 +464,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
final Long hostId = agent.getId();
final HostVO host = _hostDao.findById(hostId);
if (host != null && host.getType() != null && !host.getType().isVirtual()) {
logger.debug("Checking if agent ({}) is alive", hostId);
logger.debug("Checking if agent ({}) is alive", host);
final Answer answer = easySend(hostId, new CheckHealthCommand());
if (answer != null && answer.getResult()) {
final Status status = Status.Up;
logger.debug("Agent ({}) responded to checkHealthCommand, reporting that agent is {}", hostId, status);
logger.debug("Agent ({}) responded to checkHealthCommand, reporting that agent is {}", host, status);
return status;
}
return _haMgr.investigate(hostId);
@ -493,7 +493,9 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
public long send(final Long hostId, final Commands commands, final Listener listener) throws AgentUnavailableException {
final AgentAttache agent = getAttache(hostId);
if (agent.isClosed()) {
throw new AgentUnavailableException("Agent " + agent.getId() + " is closed", agent.getId());
throw new AgentUnavailableException(String.format(
"Agent [id: %d, name: %s] is closed",
agent.getId(), agent.getName()), agent.getId());
}
final Command[] cmds = checkForCommandsAndTag(commands);
@ -510,7 +512,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
return;
}
final long hostId = attache.getId();
logger.debug("Remove Agent : {}", hostId);
logger.debug("Remove Agent : {}", attache);
AgentAttache removed = null;
boolean conflict = false;
synchronized (_agents) {
@ -522,7 +524,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
}
if (conflict) {
logger.debug("Agent for host {} is created when it is being disconnected", hostId);
logger.debug("Agent for host {} is created when it is being disconnected", attache);
}
if (removed != null) {
removed.disconnect(nextState);
@ -530,7 +532,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
for (final Pair<Integer, Listener> monitor : _hostMonitors) {
logger.debug("Sending Disconnect to listener: {}", monitor.second().getClass().getName());
monitor.second().processDisconnect(hostId, nextState);
monitor.second().processDisconnect(hostId, attache.getUuid(), attache.getName(), nextState);
}
}
@ -555,28 +557,31 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
if (e instanceof ConnectionException) {
final ConnectionException ce = (ConnectionException)e;
if (ce.isSetupError()) {
logger.warn("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage());
logger.warn("Monitor {} says there is an error in the connect process for {} due to {}",
monitor.second().getClass().getSimpleName(), host, e.getMessage());
handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true);
throw ce;
} else {
logger.info("Monitor " + monitor.second().getClass().getSimpleName() + " says not to continue the connect process for " + hostId + " due to " + e.getMessage());
logger.info("Monitor {} says not to continue the connect process for {} due to {}",
monitor.second().getClass().getSimpleName(), host, e.getMessage());
handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true);
return attache;
}
} else if (e instanceof HypervisorVersionChangedException) {
handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true);
throw new CloudRuntimeException("Unable to connect " + attache.getId(), e);
throw new CloudRuntimeException(String.format("Unable to connect %s", attache), e);
} else {
logger.error("Monitor {} says there is an error in the connect process for {} due to {}", monitor.second().getClass().getSimpleName(), hostId, e.getMessage(), e);
logger.error("Monitor {} says there is an error in the connect process for {} due to {}",
monitor.second().getClass().getSimpleName(), host, e.getMessage(), e);
handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true);
throw new CloudRuntimeException("Unable to connect " + attache.getId(), e);
throw new CloudRuntimeException(String.format("Unable to connect %s", attache), e);
}
}
}
}
final Long dcId = host.getDataCenterId();
final ReadyCommand ready = new ReadyCommand(dcId, host.getId(), NumbersUtil.enableHumanReadableSizes);
final ReadyCommand ready = new ReadyCommand(host, NumbersUtil.enableHumanReadableSizes);
ready.setWait(ReadyCommandWait.value());
final Answer answer = easySend(hostId, ready);
if (answer == null || !answer.getResult()) {
@ -590,7 +595,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
Map<String, String> detailsMap = readyAnswer.getDetailsMap();
if (detailsMap != null) {
String uefiEnabled = detailsMap.get(Host.HOST_UEFI_ENABLE);
logger.debug("Got HOST_UEFI_ENABLE [{}] for hostId [{}]:", uefiEnabled, host.getUuid());
logger.debug("Got HOST_UEFI_ENABLE [{}] for host [{}]:", uefiEnabled, host);
if (uefiEnabled != null) {
_hostDao.loadDetails(host);
if (!uefiEnabled.equals(host.getDetails().get(Host.HOST_UEFI_ENABLE))) {
@ -707,14 +712,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
// load the respective discoverer
final Discoverer discoverer = _resourceMgr.getMatchingDiscover(host.getHypervisorType());
if (discoverer == null) {
logger.info("Could not to find a Discoverer to load the resource: {} for hypervisor type: {}", host.getId(), host.getHypervisorType());
logger.info("Could not to find a Discoverer to load the resource: {} for hypervisor type: {}", host, host.getHypervisorType());
resource = loadResourcesWithoutHypervisor(host);
} else {
resource = discoverer.reloadResource(host);
}
if (resource == null) {
logger.warn("Unable to load the resource: {}", host.getId());
logger.warn("Unable to load the resource: {}", host);
return false;
}
@ -734,14 +739,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
return h == null ? false : true;
} else {
_executor.execute(new SimulateStartTask(host.getId(), resource, host.getDetails()));
_executor.execute(new SimulateStartTask(host.getId(), host.getUuid(), host.getName(), resource, host.getDetails()));
return true;
}
}
protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) throws ConnectionException {
logger.debug("create DirectAgentAttache for {}", host.getId());
final DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getName(), resource, host.isInMaintenanceStates());
logger.debug("create DirectAgentAttache for {}", host);
final DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getUuid(), host.getName(), resource, host.isInMaintenanceStates());
AgentAttache old = null;
synchronized (_agents) {
@ -766,7 +771,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
for (final AgentAttache agent : _agents.values()) {
final HostVO host = _hostDao.findById(agent.getId());
if (host == null) {
logger.debug("Cant not find host {}", agent.getId());
logger.debug("Cannot find host {}", agent);
} else {
if (!agent.forForward()) {
agentStatusTransitTo(host, Event.ManagementServerDown, _nodeId);
@ -784,17 +789,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
final Status currentStatus = host.getStatus();
Status nextStatus;
if (currentStatus == Status.Down || currentStatus == Status.Alert || currentStatus == Status.Removed) {
logger.debug("Host {} is already {}", host.getUuid(), currentStatus);
logger.debug("Host {} is already {}", host, currentStatus);
nextStatus = currentStatus;
} else {
try {
nextStatus = currentStatus.getNextStatus(event);
} catch (final NoTransitionException e) {
final String err = String.format("Cannot find next status for %s as current status is %s for agent %s", event, currentStatus, host.getUuid());
final String err = String.format("Cannot find next status for %s as current status is %s for agent %s", event, currentStatus, host);
logger.debug(err);
throw new CloudRuntimeException(err);
}
logger.debug("The next status of agent {} is {}, current status is {}", host.getUuid(), nextStatus, currentStatus);
logger.debug("The next status of agent {} is {}, current status is {}", host, nextStatus, currentStatus);
}
return nextStatus;
}
@ -806,17 +811,18 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
GlobalLock joinLock = getHostJoinLock(hostId);
if (joinLock.lock(60)) {
try {
logger.info("Host {} is disconnecting with event {}", hostId, event);
logger.info("Host {} is disconnecting with event {}",
attache, event);
Status nextStatus = null;
final HostVO host = _hostDao.findById(hostId);
if (host == null) {
logger.warn("Can't find host with {}", hostId);
logger.warn("Can't find host with {} ({})", hostId, attache);
nextStatus = Status.Removed;
} else {
nextStatus = getNextStatusOnDisconnection(host, event);
caService.purgeHostCertificate(host);
}
logger.debug("Deregistering link for {} with state {}", hostId, nextStatus);
logger.debug("Deregistering link for {} with state {}", attache, nextStatus);
removeAgent(attache, nextStatus);
@ -851,28 +857,30 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
if (nextStatus == Status.Alert) {
/* OK, we are going to the bad status, let's see what happened */
logger.info("Investigating why host {} has disconnected with event", hostId, event);
logger.info("Investigating why host {} has disconnected with event", host, event);
Status determinedState = investigate(attache);
// if state cannot be determined do nothing and bail out
if (determinedState == null) {
if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) {
logger.warn("Agent {} state cannot be determined for more than {}({}) seconds, will go to Alert state", hostId, AlertWait, AlertWait.value());
logger.warn("Agent {} state cannot be determined for more than {} ({}) seconds, will go to Alert state",
host, AlertWait, AlertWait.value());
determinedState = Status.Alert;
} else {
logger.warn("Agent {} state cannot be determined, do nothing", hostId);
logger.warn("Agent {} state cannot be determined, do nothing", host);
return false;
}
}
final Status currentStatus = host.getStatus();
logger.info("The agent from host {} state determined is {}", hostId, determinedState);
logger.info("The agent from host {} state determined is {}", host, determinedState);
if (determinedState == Status.Down) {
final String message = "Host is down: " + host.getId() + "-" + host.getName() + ". Starting HA on the VMs";
final String message = String.format("Host %s is down. Starting HA on the VMs", host);
logger.error(message);
if (host.getType() != Host.Type.SecondaryStorage && host.getType() != Host.Type.ConsoleProxy) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host down, " + host.getId(), message);
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(),
host.getPodId(), String.format("Host down, %s", host), message);
}
event = Status.Event.HostDown;
} else if (determinedState == Status.Up) {
@ -881,21 +889,20 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
agentStatusTransitTo(host, Status.Event.Ping, _nodeId);
return false;
} else if (determinedState == Status.Disconnected) {
logger.warn("Agent is disconnected but the host is still up: {}-{}", host.getId(), host.getName() +
'-' + host.getResourceState());
logger.warn("Agent is disconnected but the host is still up: {} state: {}", host, host.getResourceState());
if (currentStatus == Status.Disconnected ||
(currentStatus == Status.Up && host.getResourceState() == ResourceState.PrepareForMaintenance)) {
if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) {
logger.warn("Host {} has been disconnected past the wait time it should be disconnected.", host.getId());
logger.warn("Host {} has been disconnected past the wait time it should be disconnected.", host);
event = Status.Event.WaitedTooLong;
} else {
logger.debug("Host {} has been determined to be disconnected but it hasn't passed the wait time yet.", host.getId());
logger.debug("Host {} has been determined to be disconnected but it hasn't passed the wait time yet.", host);
return false;
}
} else if (currentStatus == Status.Up) {
final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId());
final HostPodVO podVO = _podDao.findById(host.getPodId());
final String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName();
final String hostDesc = "name: " + host.getName() + " (id:" + host.getUuid() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName();
if (host.getType() != Host.Type.SecondaryStorage && host.getType() != Host.Type.ConsoleProxy) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host disconnected, " + hostDesc,
"If the agent for host [" + hostDesc + "] is not restarted within " + AlertWait + " seconds, host will go to Alert state");
@ -907,12 +914,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId());
final HostPodVO podVO = _podDao.findById(host.getPodId());
final String podName = podVO != null ? podVO.getName() : "NO POD";
final String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podName;
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host in ALERT state, " + hostDesc,
"In availability zone " + host.getDataCenterId() + ", host is in alert state: " + host.getId() + "-" + host.getName());
final String hostDesc = String.format("%s, availability zone: %s, pod: %s", host, dcVO, podName);
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST,
host.getDataCenterId(), host.getPodId(),
String.format("Host in ALERT state, %s", hostDesc),
String.format("In availability zone %s, host is in alert state: %s", dcVO, host));
}
} else {
logger.debug("The next status of agent {} is not Alert, no need to investigate what happened", host.getId());
logger.debug("The next status of agent {} is not Alert, no need to investigate what happened", host);
}
}
handleDisconnectWithoutInvestigation(attache, event, true, true);
@ -958,7 +967,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
final Status status = h.getStatus();
if (!status.equals(Status.Up) && !status.equals(Status.Connecting)) {
logger.debug("Can not send command {} due to Host {} not being up", cmd, hostId);
logger.debug("Can not send command {} due to Host {} not being up", cmd, h);
return null;
}
final Answer answer = send(hostId, cmd);
@ -1004,21 +1013,26 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
if (host.getRemoved() != null) {
throw new CloudRuntimeException("Host has already been removed: " + hostId);
throw new CloudRuntimeException(String.format(
"Host has already been removed: %s", host));
}
if (host.getStatus() == Status.Disconnected) {
logger.debug("Host is already disconnected, no work to be done: {}", hostId);
logger.debug("Host is already disconnected, no work to be done: {}", host);
return;
}
if (host.getStatus() != Status.Up && host.getStatus() != Status.Alert && host.getStatus() != Status.Rebalancing) {
throw new CloudRuntimeException("Unable to disconnect host because it is not in the correct state: host=" + hostId + "; Status=" + host.getStatus());
throw new CloudRuntimeException(String.format(
"Unable to disconnect host because it is not in the correct state: host=%s; Status=%s",
host, host.getStatus()));
}
AgentAttache attache = findAttache(hostId);
if (attache == null) {
throw new CloudRuntimeException("Unable to disconnect host because it is not connected to this server: " + hostId);
throw new CloudRuntimeException(String.format(
"Unable to disconnect host because it is not connected to this server: %s",
host));
}
disconnectWithoutInvestigation(attache, Event.ShutdownRequested);
}
@ -1043,9 +1057,9 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException {
if (event == Event.AgentDisconnected) {
logger.debug("Received agent disconnect event for host {}", hostId);
AgentAttache attache = null;
attache = findAttache(hostId);
logger.debug("Received agent disconnect event for host {} ({})", hostId, attache);
if (attache != null) {
handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true);
}
@ -1055,7 +1069,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
try {
reconnect(hostId);
} catch (CloudRuntimeException e) {
logger.debug("Error on shutdown request for hostID: {}", hostId, e);
logger.debug("Error on shutdown request for hostID: {} ({})", hostId, findAttache(hostId), e);
return false;
}
return true;
@ -1070,8 +1084,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) throws ConnectionException {
logger.debug("create ConnectedAgentAttache for {}", host.getId());
final AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates());
logger.debug("create ConnectedAgentAttache for {}", host);
final AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getUuid(), host.getName(), link, host.isInMaintenanceStates());
link.attach(attache);
AgentAttache old = null;
@ -1118,7 +1132,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
joinLock.unlock();
}
} else {
throw new ConnectionException(true, "Unable to acquire lock on host " + host.getUuid());
throw new ConnectionException(true,
String.format("Unable to acquire lock on host %s", host));
}
joinLock.releaseRef();
return attache;
@ -1131,7 +1146,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
final HostVO host = _resourceMgr.createHostVOForConnectedAgent(startup);
if (host != null) {
checkHostArchOnCluster(host);
ready = new ReadyCommand(host.getDataCenterId(), host.getId(), NumbersUtil.enableHumanReadableSizes);
ready = new ReadyCommand(host, NumbersUtil.enableHumanReadableSizes);
attache = sendReadyAndGetAttache(host, ready, link, startup);
}
} catch (final Exception e) {
@ -1171,8 +1186,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
ServerResource resource;
Map<String, String> details;
long id;
String uuid;
String name;
public SimulateStartTask(final long id, final ServerResource resource, final Map<String, String> details) {
public SimulateStartTask(final long id, String uuid, String name, final ServerResource resource, final Map<String, String> details) {
this.id = id;
this.resource = resource;
this.details = details;
@ -1181,26 +1198,26 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
@Override
protected void runInContext() {
try {
logger.debug("Simulating start for resource {} id {}", resource.getName(), id);
logger.debug("Simulating start for resource {} (id: {}, uuid: {}, name {})", resource.getName(), id, uuid, name);
if (tapLoadingAgents(id, TapAgentsAction.Add)) {
try {
final AgentAttache agentattache = findAttache(id);
if (agentattache == null) {
logger.debug("Creating agent for host {}", id);
logger.debug("Creating agent for host [id: {}, uuid: {}, name: {}]", id, uuid, name);
_resourceMgr.createHostAndAgent(id, resource, details, false, null, false);
logger.debug("Completed creating agent for host {}", id);
logger.debug("Completed creating agent for host [id: {}, uuid: {}, name: {}", id, uuid, name);
} else {
logger.debug("Agent already created in another thread for host {}, ignore this", id);
logger.debug("Agent already created in another thread for host [id: {}, uuid: {}, name: {}], ignore this", id, uuid, name);
}
} finally {
tapLoadingAgents(id, TapAgentsAction.Del);
}
} else {
logger.debug("Agent creation already getting processed in another thread for host {}, ignore this", id);
logger.debug("Agent creation already getting processed in another thread for host [id: {}, uuid: {}, name: {}], ignore this", id, uuid, name);
}
} catch (final Exception e) {
logger.warn("Unable to simulate start on resource {} name {}", id, resource.getName(), e);
logger.warn("Unable to simulate start on resource [id: {}, uuid: {}, name: {}] name {}", id, uuid, name, resource.getName(), e);
}
}
}
@ -1240,7 +1257,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
cmd = cmds[i];
if (cmd instanceof StartupRoutingCommand || cmd instanceof StartupProxyCommand || cmd instanceof StartupSecondaryStorageCommand ||
cmd instanceof StartupStorageCommand) {
answers[i] = new StartupAnswer((StartupCommand) cmds[i], 0, mgmtServiceConf.getPingInterval());
answers[i] = new StartupAnswer((StartupCommand) cmds[i], 0, "", "", mgmtServiceConf.getPingInterval());
break;
}
}
@ -1270,7 +1287,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
if (!BooleanUtils.toBoolean(EnableKVMAutoEnableDisable.valueIn(host.getClusterId()))) {
logger.debug("{} is disabled for the cluster {}, cannot process the health check result " +
"received for the host {}", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host.getName());
"received for the host {}", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host);
return;
}
@ -1280,10 +1297,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
logger.info("Host health check {}, auto {} KVM host: {}",
hostHealthCheckResult ? "succeeds" : "fails",
hostHealthCheckResult ? "enabling" : "disabling",
host.getName());
host);
_resourceMgr.autoUpdateHostAllocationState(hostId, resourceEvent);
} catch (NoTransitionException e) {
logger.error("Cannot Auto {} host: {}", resourceEvent, host.getName(), e);
logger.error("Cannot Auto {} host: {}", resourceEvent, host, e);
}
}
@ -1330,11 +1347,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
if (logger.isDebugEnabled()) {
if (cmd instanceof PingRoutingCommand) {
logD = false;
logger.debug("Ping from Routing host {}({})", hostId, hostName);
logger.debug("Ping from Routing host {}", attache);
logger.trace("SeqA {}-{}: Processing {}", hostId, request.getSequence(), request);
} else if (cmd instanceof PingCommand) {
logD = false;
logger.debug("Ping from {}({})", hostId, hostName);
logger.debug("Ping from {}", attache);
logger.trace("SeqA {}-{}: Processing {}", hostId, request.getSequence(), request);
} else {
logger.debug("SeqA {}-{}: {}", hostId, request.getSequence(), request);
@ -1349,20 +1366,20 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
if (cmd instanceof StartupRoutingCommand) {
final StartupRoutingCommand startup = (StartupRoutingCommand) cmd;
processStartupRoutingCommand(startup, hostId);
answer = new StartupAnswer(startup, attache.getId(), mgmtServiceConf.getPingInterval());
answer = new StartupAnswer(startup, attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval());
} else if (cmd instanceof StartupProxyCommand) {
final StartupProxyCommand startup = (StartupProxyCommand) cmd;
answer = new StartupAnswer(startup, attache.getId(), mgmtServiceConf.getPingInterval());
answer = new StartupAnswer(startup, attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval());
} else if (cmd instanceof StartupSecondaryStorageCommand) {
final StartupSecondaryStorageCommand startup = (StartupSecondaryStorageCommand) cmd;
answer = new StartupAnswer(startup, attache.getId(), mgmtServiceConf.getPingInterval());
answer = new StartupAnswer(startup, attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval());
} else if (cmd instanceof StartupStorageCommand) {
final StartupStorageCommand startup = (StartupStorageCommand) cmd;
answer = new StartupAnswer(startup, attache.getId(), mgmtServiceConf.getPingInterval());
answer = new StartupAnswer(startup, attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval());
} else if (cmd instanceof ShutdownCommand) {
final ShutdownCommand shutdown = (ShutdownCommand)cmd;
final String reason = shutdown.getReason();
logger.info("Host {} has informed us that it is shutting down with reason {} and detail {}", attache.getId(), reason, shutdown.getDetail());
logger.info("Host {} has informed us that it is shutting down with reason {} and detail {}", attache, reason, shutdown.getDetail());
if (reason.equals(ShutdownCommand.Update)) {
// disconnectWithoutInvestigation(attache, Event.UpdateNeeded);
throw new CloudRuntimeException("Agent update not implemented");
@ -1392,7 +1409,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
// gateway (cannot ping the default route)
final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId());
final HostPodVO podVO = _podDao.findById(host.getPodId());
final String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName();
final String hostDesc = String.format("%s, availability zone: %s, pod: %s", host, dcVO, podVO);
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId(), "Host lost connection to gateway, " + hostDesc,
"Host [" + hostDesc + "] lost connection to gateway (default route) and is possibly having network connection issues.");
@ -1410,7 +1427,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
} else if (cmd instanceof ReadyAnswer) {
final HostVO host = _hostDao.findById(attache.getId());
if (host == null) {
logger.debug("Cant not find host {}", attache.getId());
logger.debug("Cant not find host with id: {} ({})", attache.getId(), attache);
}
answer = new Answer(cmd);
} else {
@ -1442,7 +1459,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
if (attache == null) {
logger.warn("Unable to process: {}", response);
} else if (!attache.processAnswers(response.getSequence(), response)) {
logger.info("Host {} - Seq {}: Response is not processed: {}", attache.getId(), response.getSequence(), response);
logger.info("Host {} - Seq {}: Response is not processed: {}", attache, response.getSequence(), response);
}
}
@ -1512,14 +1529,16 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
public boolean agentStatusTransitTo(final HostVO host, final Status.Event e, final long msId) {
try {
_agentStatusLock.lock();
logger.debug("[Resource state = {}, Agent event = , Host id = {}, name = {}]", host.getResourceState(), e.toString(), host.getId(), host.getName());
logger.debug("[Resource state = {}, Agent event = , Host = {}]",
host.getResourceState(), e.toString(), host);
host.setManagementServerId(msId);
try {
return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao);
} catch (final NoTransitionException e1) {
logger.debug("Cannot transit agent status with event {} for host {}, name={}, management server id is {}", e, host.getId(), host.getName(), msId);
throw new CloudRuntimeException("Cannot transit agent status with event " + e + " for host " + host.getId() + ", management server id is " + msId + "," + e1.getMessage());
logger.debug("Cannot transit agent status with event {} for host {}, management server id is {}", e, host, msId);
throw new CloudRuntimeException(String.format(
"Cannot transit agent status with event %s for host %s, management server id is %d, %s", e, host, msId, e1.getMessage()));
}
} finally {
_agentStatusLock.unlock();
@ -1600,7 +1619,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
attache = createAttacheForDirectConnect(host, resource);
final StartupAnswer[] answers = new StartupAnswer[cmds.length];
for (int i = 0; i < answers.length; i++) {
answers[i] = new StartupAnswer(cmds[i], attache.getId(), mgmtServiceConf.getPingInterval());
answers[i] = new StartupAnswer(cmds[i], attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval());
}
attache.process(answers);
@ -1650,7 +1669,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
public void pingBy(final long agentId) {
// Update PingMap with the latest time if agent entry exists in the PingMap
if (_pingMap.replace(agentId, InaccurateClock.getTimeInSeconds()) == null) {
logger.info("PingMap for agent: " + agentId + " will not be updated because agent is no longer in the PingMap");
logger.info("PingMap for agent: {} ({}) will not be updated because agent is no longer in the PingMap", agentId, findAttache(agentId));
}
}
@ -1671,17 +1690,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
/*
* Host is in non-operation state, so no investigation and direct put agent to Disconnected
*/
logger.debug("Ping timeout but agent {} is in resource state of {}, so no investigation", agentId, resourceState);
logger.debug("Ping timeout but agent {} is in resource state of {}, so no investigation", h, resourceState);
disconnectWithoutInvestigation(agentId, Event.ShutdownRequested);
} else {
final HostVO host = _hostDao.findById(agentId);
if (host != null
&& (host.getType() == Host.Type.ConsoleProxy || host.getType() == Host.Type.SecondaryStorageVM || host.getType() == Host.Type.SecondaryStorageCmdExecutor)) {
logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: {}", host.getId());
logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: {}", host);
disconnectWithoutInvestigation(agentId, Event.ShutdownRequested);
} else {
logger.debug("Ping timeout for agent {}, do investigation", agentId);
logger.debug("Ping timeout for agent {}, do investigation", h);
disconnectWithInvestigation(agentId, Event.PingTimeout);
}
}
@ -1844,7 +1863,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
Commands c = new Commands(cmds);
send(host.getId(), c, this);
} catch (AgentUnavailableException e) {
logger.debug("Failed to send host params on host: " + host.getId());
logger.debug("Failed to send host params on host: {}", host);
}
}
}
@ -1903,7 +1922,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
for (Long hostId : hostIds) {
Answer answer = easySend(hostId, cmds);
if (answer == null || !answer.getResult()) {
logger.error("Error sending parameters to agent {}", hostId);
logger.error("Error sending parameters to agent {} ({})", hostId, findAttache(hostId));
}
}
}

View File

@ -44,14 +44,14 @@ public class ClusteredAgentAttache extends ConnectedAgentAttache implements Rout
s_clusteredAgentMgr = agentMgr;
}
public ClusteredAgentAttache(final AgentManagerImpl agentMgr, final long id, final String name) {
super(agentMgr, id, name, null, false);
public ClusteredAgentAttache(final AgentManagerImpl agentMgr, final long id, final String uuid, final String name) {
super(agentMgr, id, uuid, name, null, false);
_forward = true;
_transferRequests = new LinkedList<Request>();
}
public ClusteredAgentAttache(final AgentManagerImpl agentMgr, final long id, final String name, final Link link, final boolean maintenance) {
super(agentMgr, id, name, link, maintenance);
public ClusteredAgentAttache(final AgentManagerImpl agentMgr, final long id, final String uuid, final String name, final Link link, final boolean maintenance) {
super(agentMgr, id, uuid, name, link, maintenance);
_forward = link == null;
_transferRequests = new LinkedList<Request>();
}

View File

@ -216,10 +216,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
}
logger.debug("Loading directly connected host {}({})", host.getId(), host.getName());
logger.debug("Loading directly connected host {}", host);
loadDirectlyConnectedHost(host, false);
} catch (final Throwable e) {
logger.warn(" can not load directly connected host {}({}) due to ", host.getId(), host.getName(), e);
logger.warn(" can not load directly connected host {}({}) due to ",
host, e);
}
}
}
@ -243,10 +244,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
return new ClusteredAgentHandler(type, link, data);
}
protected AgentAttache createAttache(final long id) {
logger.debug("create forwarding ClusteredAgentAttache for {}", id);
final HostVO host = _hostDao.findById(id);
final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getName());
protected AgentAttache createAttache(final HostVO host) {
logger.debug("create forwarding ClusteredAgentAttache for {}", host);
long id = host.getId();
final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getUuid(), host.getName());
AgentAttache old = null;
synchronized (_agents) {
old = _agents.get(id);
@ -261,8 +262,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) {
logger.debug("create ClusteredAgentAttache for {}", host.getId());
final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates());
logger.debug("create ClusteredAgentAttache for {}", host);
final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getUuid(), host.getName(), link, host.isInMaintenanceStates());
link.attach(attache);
AgentAttache old = null;
synchronized (_agents) {
@ -278,7 +279,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) {
logger.debug("Create ClusteredDirectAgentAttache for {}.", host);
final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), host.getName(), _nodeId, resource, host.isInMaintenanceStates());
final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), host.getUuid(), host.getName(), _nodeId, resource, host.isInMaintenanceStates());
AgentAttache old = null;
synchronized (_agents) {
old = _agents.get(host.getId());
@ -321,15 +322,17 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException {
if (event == Event.AgentDisconnected) {
logger.debug("Received agent disconnect event for host {}", hostId);
final AgentAttache attache = findAttache(hostId);
logger.debug("Received agent disconnect event for host {} ({})", hostId, attache);
if (attache != null) {
// don't process disconnect if the host is being rebalanced
if (isAgentRebalanceEnabled()) {
final HostTransferMapVO transferVO = _hostTransferDao.findById(hostId);
if (transferVO != null) {
if (transferVO.getFutureOwner() == _nodeId && transferVO.getState() == HostTransferState.TransferStarted) {
logger.debug("Not processing {} event for the host id={} as the host is being connected to {}",Event.AgentDisconnected, hostId, _nodeId);
logger.debug(
"Not processing {} event for the host [id: {}, uuid: {}, name: {}] as the host is being connected to {}",
Event.AgentDisconnected, hostId, attache.getUuid(), attache.getName(), _nodeId);
return true;
}
}
@ -338,7 +341,9 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
// don't process disconnect if the disconnect came for the host via delayed cluster notification,
// but the host has already reconnected to the current management server
if (!attache.forForward()) {
logger.debug("Not processing {} event for the host id={} as the host is directly connected to the current management server {}", Event.AgentDisconnected, hostId, _nodeId);
logger.debug(
"Not processing {} event for the host [id: {}, uuid: {}, name: {}] as the host is directly connected to the current management server {}",
Event.AgentDisconnected, hostId, attache.getUuid(), attache.getName(), _nodeId);
return true;
}
@ -545,8 +550,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
AgentAttache agent = findAttache(hostId);
if (agent == null || !agent.forForward()) {
if (isHostOwnerSwitched(host)) {
logger.debug("Host {} has switched to another management server, need to update agent map with a forwarding agent attache", hostId);
agent = createAttache(hostId);
logger.debug("Host {} has switched to another management server, need to update agent map with a forwarding agent attache", host);
agent = createAttache(host);
}
}
if (agent == null) {
@ -712,12 +717,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
public void onManagementNodeLeft(final List<? extends ManagementServerHost> nodeList, final long selfNodeId) {
for (final ManagementServerHost vo : nodeList) {
logger.info("Marking hosts as disconnected on Management server {}", vo.getMsid());
logger.info("Marking hosts as disconnected on Management server {}", vo);
final long lastPing = (System.currentTimeMillis() >> 10) - mgmtServiceConf.getTimeout();
_hostDao.markHostsAsDisconnected(vo.getMsid(), lastPing);
outOfBandManagementDao.expireServerOwnership(vo.getMsid());
haConfigDao.expireServerOwnership(vo.getMsid());
logger.info("Deleting entries from op_host_transfer table for Management server {}", vo.getMsid());
logger.info("Deleting entries from op_host_transfer table for Management server {}", vo);
cleanupTransferMap(vo.getMsid());
}
}
@ -744,7 +749,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
result = rebalanceHost(agentId, currentOwnerId, futureOwnerId);
} catch (final Exception e) {
logger.warn("Unable to rebalance host id={}", agentId, e);
logger.warn("Unable to rebalance host id={} ({})", agentId, findAttache(agentId), e);
}
}
return result;
@ -814,22 +819,24 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
List<HostVO> hostsToRebalance = new ArrayList<HostVO>();
for (final AgentLoadBalancerPlanner lbPlanner : _lbPlanners) {
hostsToRebalance = lbPlanner.getHostsToRebalance(node.getMsid(), avLoad);
hostsToRebalance = lbPlanner.getHostsToRebalance(node, avLoad);
if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) {
break;
}
logger.debug("Agent load balancer planner " + lbPlanner.getName() + " found no hosts to be rebalanced from management server " + node.getMsid());
logger.debug(
"Agent load balancer planner {} found no hosts to be rebalanced from management server {}",
lbPlanner.getName(), node);
}
if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) {
logger.debug("Found {} hosts to rebalance from management server {}", hostsToRebalance.size(), node.getMsid());
logger.debug("Found {} hosts to rebalance from management server {}", hostsToRebalance.size(), node);
for (final HostVO host : hostsToRebalance) {
final long hostId = host.getId();
logger.debug("Asking management server {} to give away host id={}", node.getMsid(), hostId);
logger.debug("Asking management server {} to give away host id={}", node, host);
boolean result = true;
if (_hostTransferDao.findById(hostId) != null) {
logger.warn("Somebody else is already rebalancing host id: {}", hostId);
logger.warn("Somebody else is already rebalancing host: {}", host);
continue;
}
@ -838,11 +845,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
transfer = _hostTransferDao.startAgentTransfering(hostId, node.getMsid(), _nodeId);
final Answer[] answer = sendRebalanceCommand(node.getMsid(), hostId, node.getMsid(), _nodeId, Event.RequestAgentRebalance);
if (answer == null) {
logger.warn("Failed to get host id={} from management server {}", hostId, node.getMsid());
logger.warn("Failed to get host {} from management server {}", host, node);
result = false;
}
} catch (final Exception ex) {
logger.warn("Failed to get host id={} from management server {}", hostId, node.getMsid(), ex);
logger.warn("Failed to get host {} from management server {}", host, node, ex);
result = false;
} finally {
if (transfer != null) {
@ -857,7 +864,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
}
} else {
logger.debug("Found no hosts to rebalance from the management server {}", node.getMsid());
logger.debug("Found no hosts to rebalance from the management server {}", node);
}
}
}
@ -902,7 +909,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
return null;
}
logger.debug("Propagating agent change request event: {} to agent: {}", event.toString(), agentId);
logger.debug("Propagating agent change request event: {} to agent: {} ({})", event.toString(), agentId, findAttache(agentId));
final Command[] cmds = new Command[1];
cmds[0] = new ChangeAgentCommand(agentId, event);
@ -942,14 +949,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
final HostTransferMapVO transferMap = _hostTransferDao.findActiveHostTransferMapByHostId(hostId, new Date(cutTime.getTime() - rebalanceTimeOut));
if (transferMap == null) {
logger.debug("Timed out waiting for the host id={} to be ready to transfer, skipping rebalance for the host" + hostId);
logger.debug("Timed out waiting for the host id={} ({}) to be ready to transfer, skipping rebalance for the host", hostId, attache);
iterator.remove();
_hostTransferDao.completeAgentTransfer(hostId);
continue;
}
if (transferMap.getInitialOwner() != _nodeId || attache == null || attache.forForward()) {
logger.debug("Management server {} doesn't own host id={} any more, skipping rebalance for the host", _nodeId, hostId);
logger.debug(String.format("Management server %d doesn't own host id=%d (%s) any more, skipping rebalance for the host", _nodeId, hostId, attache));
iterator.remove();
_hostTransferDao.completeAgentTransfer(hostId);
continue;
@ -957,7 +964,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
final ManagementServerHostVO ms = _mshostDao.findByMsid(transferMap.getFutureOwner());
if (ms != null && ms.getState() != ManagementServerHost.State.Up) {
logger.debug("Can't transfer host {} as it's future owner is not in UP state: {}, skipping rebalance for the host", hostId, ms);
logger.debug("Can't transfer host {} ({}) as it's future owner is not in UP state: {}, skipping rebalance for the host", hostId, attache, ms);
iterator.remove();
_hostTransferDao.completeAgentTransfer(hostId);
continue;
@ -968,13 +975,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
_executor.execute(new RebalanceTask(hostId, transferMap.getInitialOwner(), transferMap.getFutureOwner()));
} catch (final RejectedExecutionException ex) {
logger.warn("Failed to submit rebalance task for host id={}; postponing the execution", hostId);
logger.warn("Failed to submit rebalance task for host id={} ({}); postponing the execution", hostId, attache);
continue;
}
} else {
logger.debug("Agent {} can't be transferred yet as its request queue size is {} and listener queue size is {}",
hostId, attache.getQueueSize(), attache.getNonRecurringListenersSize());
logger.debug("Agent {} ({}) can't be transferred yet as its request queue size is {} and listener queue size is {}",
hostId, attache, attache.getQueueSize(), attache.getNonRecurringListenersSize());
}
}
} else {
@ -990,7 +997,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
private boolean setToWaitForRebalance(final long hostId, final long currentOwnerId, final long futureOwnerId) {
logger.debug("Adding agent {} to the list of agents to transfer", hostId);
logger.debug("Adding agent {} ({}) to the list of agents to transfer", hostId, findAttache(hostId));
synchronized (_agentToTransferIds) {
return _agentToTransferIds.add(hostId);
}
@ -1012,7 +1019,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
} catch (final Exception ex) {
logger.warn("Host {} failed to connect to the management server {} as a part of rebalance process", hostId, futureOwnerId, ex);
logger.warn("Host {} ({}) failed to connect to the management server {} as a part of rebalance process", hostId, findAttache(hostId), futureOwnerId, ex);
result = false;
}
@ -1027,7 +1034,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
} else if (futureOwnerId == _nodeId) {
final HostVO host = _hostDao.findById(hostId);
try {
logger.debug("Disconnecting host {}({}) as a part of rebalance process without notification", host.getId(), host.getName());
logger.debug("Disconnecting host {} as a part of rebalance process without notification", host);
final AgentAttache attache = findAttache(hostId);
if (attache != null) {
@ -1035,21 +1042,21 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
if (result) {
logger.debug("Loading directly connected host {}({}) to the management server {} as a part of rebalance process", host.getId(), host.getName(), _nodeId);
logger.debug("Loading directly connected host {} to the management server {} as a part of rebalance process", host, _nodeId);
result = loadDirectlyConnectedHost(host, true);
} else {
logger.warn("Failed to disconnect {}({}) as a part of rebalance process without notification" + host.getId(), host.getName());
logger.warn("Failed to disconnect {} as a part of rebalance process without notification", host);
}
} catch (final Exception ex) {
logger.warn("Failed to load directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId, ex);
logger.warn("Failed to load directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId, ex);
result = false;
}
if (result) {
logger.debug("Successfully loaded directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId);
logger.debug("Successfully loaded directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId);
} else {
logger.warn("Failed to load directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId);
logger.warn("Failed to load directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId);
}
}
@ -1059,9 +1066,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
protected void finishRebalance(final long hostId, final long futureOwnerId, final Event event) {
final boolean success = event == Event.RebalanceCompleted ? true : false;
logger.debug("Finishing rebalancing for the agent {} with event {}", hostId, event);
final AgentAttache attache = findAttache(hostId);
logger.debug("Finishing rebalancing for the agent {} ({}) with event {}", hostId, attache, event);
if (attache == null || !(attache instanceof ClusteredAgentAttache)) {
logger.debug("Unable to find forward attache for the host id={} assuming that the agent disconnected already", hostId);
_hostTransferDao.completeAgentTransfer(hostId);
@ -1078,7 +1086,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
// 2) Get all transfer requests and route them to peer
Request requestToTransfer = forwardAttache.getRequestToTransfer();
while (requestToTransfer != null) {
logger.debug("Forwarding request {} held in transfer attache {} from the management server {} to {}", requestToTransfer.getSequence(), hostId, _nodeId, futureOwnerId);
logger.debug("Forwarding request {} held in transfer attache [id: {}, uuid: {}, name: {}] from the management server {} to {}",
requestToTransfer.getSequence(), hostId, attache.getUuid(), attache.getName(), _nodeId, futureOwnerId);
final boolean routeResult = routeToPeer(Long.toString(futureOwnerId), requestToTransfer.getBytes());
if (!routeResult) {
logD(requestToTransfer.getBytes(), "Failed to route request to peer");
@ -1087,23 +1096,25 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
requestToTransfer = forwardAttache.getRequestToTransfer();
}
logger.debug("Management server {} completed agent {} rebalance to {}", _nodeId, hostId, futureOwnerId);
logger.debug("Management server {} completed agent [id: {}, uuid: {}, name: {}] rebalance to {}",
_nodeId, hostId, attache.getUuid(), attache.getName(), futureOwnerId);
} else {
failRebalance(hostId);
}
logger.debug("Management server {} completed agent {} rebalance", _nodeId, hostId);
logger.debug("Management server {} completed agent [id: {}, uuid: {}, name: {}] rebalance", _nodeId, hostId, attache.getUuid(), attache.getName());
_hostTransferDao.completeAgentTransfer(hostId);
}
protected void failRebalance(final long hostId) {
AgentAttache attache = findAttache(hostId);
try {
logger.debug("Management server {} failed to rebalance agent {}", _nodeId, hostId);
logger.debug("Management server {} failed to rebalance agent {} ({})", _nodeId, hostId, attache);
_hostTransferDao.completeAgentTransfer(hostId);
handleDisconnectWithoutInvestigation(findAttache(hostId), Event.RebalanceFailed, true, true);
} catch (final Exception ex) {
logger.warn("Failed to reconnect host id={} as a part of failed rebalance task cleanup", hostId);
logger.warn("Failed to reconnect host id={} ({}) as a part of failed rebalance task cleanup", hostId, attache);
}
}
@ -1119,20 +1130,20 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
final ClusteredDirectAgentAttache attache = (ClusteredDirectAgentAttache)_agents.get(hostId);
if (attache != null && attache.getQueueSize() == 0 && attache.getNonRecurringListenersSize() == 0) {
handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true, true);
final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(hostId);
final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(host);
if (forwardAttache == null) {
logger.warn("Unable to create a forward attache for the host {} as a part of rebalance process", hostId);
logger.warn("Unable to create a forward attache for the host {} as a part of rebalance process", host);
return false;
}
logger.debug("Putting agent id={} to transfer mode", hostId);
logger.debug("Putting agent {} to transfer mode", host);
forwardAttache.setTransferMode(true);
_agents.put(hostId, forwardAttache);
} else {
if (attache == null) {
logger.warn("Attache for the agent {} no longer exists on management server, can't start host rebalancing", hostId, _nodeId);
logger.warn("Attache for the agent {} no longer exists on management server, can't start host rebalancing", host, _nodeId);
} else {
logger.warn("Attache for the agent {} has request queue size= {} and listener queue size {}, can't start host rebalancing",
hostId, attache.getQueueSize(), attache.getNonRecurringListenersSize());
logger.warn("Attache for the agent {} has request queue size {} and listener queue size {}, can't start host rebalancing",
host, attache.getQueueSize(), attache.getNonRecurringListenersSize());
}
return false;
}
@ -1167,11 +1178,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
protected void runInContext() {
AgentAttache attache = findAttache(hostId);
try {
logger.debug("Rebalancing host id={}", hostId);
logger.debug("Rebalancing host id={} ({})", hostId, attache);
rebalanceHost(hostId, currentOwnerId, futureOwnerId);
} catch (final Exception e) {
logger.warn("Unable to rebalance host id={}", hostId, e);
logger.warn("Unable to rebalance host id={} ({})", hostId, attache, e);
}
}
}
@ -1260,7 +1272,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
} else if (cmds.length == 1 && cmds[0] instanceof PropagateResourceEventCommand) {
final PropagateResourceEventCommand cmd = (PropagateResourceEventCommand)cmds[0];
logger.debug("Intercepting command to propagate event {} for host {}", cmd.getEvent().name(), cmd.getHostId());
logger.debug("Intercepting command to propagate event {} for host {} ({})", () -> cmd.getEvent().name(), cmd::getHostId, () -> _hostDao.findById(cmd.getHostId()));
boolean result = false;
try {

View File

@ -26,8 +26,8 @@ import com.cloud.utils.exception.CloudRuntimeException;
public class ClusteredDirectAgentAttache extends DirectAgentAttache implements Routable {
private final long _nodeId;
public ClusteredDirectAgentAttache(ClusteredAgentManagerImpl agentMgr, long id, String name, long mgmtId, ServerResource resource, boolean maintenance) {
super(agentMgr, id, name, resource, maintenance);
public ClusteredDirectAgentAttache(ClusteredAgentManagerImpl agentMgr, long id, String uuid, String name, long mgmtId, ServerResource resource, boolean maintenance) {
super(agentMgr, id, uuid, name, resource, maintenance);
_nodeId = mgmtId;
}
@ -37,9 +37,9 @@ public class ClusteredDirectAgentAttache extends DirectAgentAttache implements R
try {
req = Request.parse(data);
} catch (ClassNotFoundException e) {
throw new CloudRuntimeException("Unable to rout to an agent ", e);
throw new CloudRuntimeException("Unable to route to an agent ", e);
} catch (UnsupportedVersionException e) {
throw new CloudRuntimeException("Unable to rout to an agent ", e);
throw new CloudRuntimeException("Unable to route to an agent ", e);
}
if (req instanceof Response) {

View File

@ -31,8 +31,8 @@ public class ConnectedAgentAttache extends AgentAttache {
protected Link _link;
public ConnectedAgentAttache(final AgentManagerImpl agentMgr, final long id, final String name, final Link link, final boolean maintenance) {
super(agentMgr, id, name, maintenance);
public ConnectedAgentAttache(final AgentManagerImpl agentMgr, final long id, final String uuid, final String name, final Link link, final boolean maintenance) {
super(agentMgr, id, uuid, name, maintenance);
_link = link;
}

View File

@ -51,8 +51,8 @@ public class DirectAgentAttache extends AgentAttache {
AtomicInteger _outstandingTaskCount;
AtomicInteger _outstandingCronTaskCount;
public DirectAgentAttache(AgentManagerImpl agentMgr, long id, String name, ServerResource resource, boolean maintenance) {
super(agentMgr, id, name, maintenance);
public DirectAgentAttache(AgentManagerImpl agentMgr, long id, String uuid,String name, ServerResource resource, boolean maintenance) {
super(agentMgr, id, uuid, name, maintenance);
_resource = resource;
_outstandingTaskCount = new AtomicInteger(0);
_outstandingCronTaskCount = new AtomicInteger(0);
@ -60,7 +60,7 @@ public class DirectAgentAttache extends AgentAttache {
@Override
public void disconnect(Status state) {
logger.debug("Processing disconnect {}({})", _id, _name);
logger.debug("Processing disconnect [id: {}, uuid: {}, name: {}]", _id, _uuid, _name);
for (ScheduledFuture<?> future : _futures) {
future.cancel(false);
@ -115,7 +115,7 @@ public class DirectAgentAttache extends AgentAttache {
if (answers != null && answers[0] instanceof StartupAnswer) {
StartupAnswer startup = (StartupAnswer)answers[0];
int interval = startup.getPingInterval();
logger.info("StartupAnswer received {} Interval = {}", startup.getHostId(), interval);
logger.info("StartupAnswer received [id: {}, uuid: {}, name: {}, interval: {}]", startup.getHostId(), startup.getHostUuid(), startup.getHostName(), interval);
_futures.add(_agentMgr.getCronJobPool().scheduleAtFixedRate(new PingTask(), interval, interval, TimeUnit.SECONDS));
}
}
@ -126,7 +126,7 @@ public class DirectAgentAttache extends AgentAttache {
assert _resource == null : "Come on now....If you're going to dabble in agent code, you better know how to close out our resources. Ever considered why there's a method called disconnect()?";
synchronized (this) {
if (_resource != null) {
logger.warn("Lost attache for {}({})", _id, _name);
logger.warn("Lost attache for [id: {}, uuid: {}, name: {}]", _id, _uuid, _name);
disconnect(Status.Alert);
}
}
@ -140,7 +140,8 @@ public class DirectAgentAttache extends AgentAttache {
}
private synchronized void scheduleFromQueue() {
logger.trace("Agent attache={}, task queue size={}, outstanding tasks={}", _id, tasks.size(), _outstandingTaskCount.get());
logger.trace("Agent attache [id: {}, uuid: {}, name: {}], task queue size={}, outstanding tasks={}",
_id, _uuid, _name, tasks.size(), _outstandingTaskCount.get());
while (!tasks.isEmpty() && _outstandingTaskCount.get() < _agentMgr.getDirectAgentThreadCap()) {
_outstandingTaskCount.incrementAndGet();
_agentMgr.getDirectAgentPool().execute(tasks.remove());
@ -152,7 +153,9 @@ public class DirectAgentAttache extends AgentAttache {
protected synchronized void runInContext() {
try {
if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) {
logger.warn("PingTask execution for direct attache({}) has reached maximum outstanding limit({}), bailing out", _id, _agentMgr.getDirectAgentThreadCap());
logger.warn(
"PingTask execution for direct attache [id: {}, uuid: {}, name: {}] has reached maximum outstanding limit({}), bailing out",
_id, _uuid, _name, _agentMgr.getDirectAgentThreadCap());
return;
}
@ -167,21 +170,21 @@ public class DirectAgentAttache extends AgentAttache {
}
if (cmd == null) {
logger.warn("Unable to get current status on {}({})", _id, _name);
logger.warn("Unable to get current status on [id: {}, uuid: {}, name: {}]", _id, _uuid, _name);
return;
}
if (cmd.getContextParam("logid") != null) {
ThreadContext.put("logcontextid", cmd.getContextParam("logid"));
}
logger.debug("Ping from {}({})", _id, _name);
logger.debug("Ping from [id: {}, uuid: {}, name: {}]", _id, _uuid, _name);
long seq = _seq++;
logger.trace("SeqA {}-{}: {}", _id, seq, new Request(_id, -1, cmd, false).toString());
_agentMgr.handleCommands(DirectAgentAttache.this, seq, new Command[] {cmd});
} else {
logger.debug("Unable to send ping because agent is disconnected {}", _id, _name);
logger.debug("Unable to send ping because agent is disconnected [id: {}, uuid: {}, name: {}]", _id, _uuid, _name);
}
} catch (Exception e) {
logger.warn("Unable to complete the ping task", e);
@ -219,7 +222,9 @@ public class DirectAgentAttache extends AgentAttache {
long seq = _req.getSequence();
try {
if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) {
logger.warn("CronTask execution for direct attache({}) has reached maximum outstanding limit({}), bailing out", _id, _agentMgr.getDirectAgentThreadCap());
logger.warn(
"CronTask execution for direct attache [id: {}, uuid: {}, name: {}] has reached maximum outstanding limit({}), bailing out",
_id, _uuid, _name, _agentMgr.getDirectAgentThreadCap());
bailout();
return;
}

View File

@ -22,8 +22,8 @@ import com.cloud.host.Status;
public class DummyAttache extends AgentAttache {
public DummyAttache(AgentManagerImpl agentMgr, long id, String name, boolean maintenance) {
super(agentMgr, id, name, maintenance);
public DummyAttache(AgentManagerImpl agentMgr, long id, String uuid, String name, boolean maintenance) {
super(agentMgr, id, uuid, name, maintenance);
}
@Override

View File

@ -18,11 +18,12 @@ package com.cloud.cluster.agentlb;
import java.util.List;
import com.cloud.cluster.ManagementServerHostVO;
import com.cloud.host.HostVO;
import com.cloud.utils.component.Adapter;
public interface AgentLoadBalancerPlanner extends Adapter {
List<HostVO> getHostsToRebalance(long msId, int avLoad);
List<HostVO> getHostsToRebalance(ManagementServerHostVO ms, int avLoad);
}

View File

@ -26,6 +26,7 @@ import java.util.Map;
import javax.inject.Inject;
import com.cloud.cluster.ManagementServerHostVO;
import org.springframework.stereotype.Component;
import com.cloud.host.Host;
@ -43,15 +44,17 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements
HostDao _hostDao = null;
@Override
public List<HostVO> getHostsToRebalance(long msId, int avLoad) {
public List<HostVO> getHostsToRebalance(ManagementServerHostVO ms, int avLoad) {
long msId = ms.getMsid();
QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing);
sc.and(sc.entity().getManagementServerId(), Op.EQ, msId);
List<HostVO> allHosts = sc.list();
if (allHosts.size() <= avLoad) {
logger.debug("Agent load = " + allHosts.size() + " for management server " + msId + " doesn't exceed average system agent load = " + avLoad +
"; so it doesn't participate in agent rebalancing process");
logger.debug("Agent load = {} for management server {} doesn't exceed average " +
"system agent load = {}; so it doesn't participate in agent rebalancing process",
allHosts.size(), ms, avLoad);
return null;
}
@ -62,8 +65,9 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements
List<HostVO> directHosts = sc.list();
if (directHosts.isEmpty()) {
logger.debug("No direct agents in status " + Status.Up + " exist for the management server " + msId +
"; so it doesn't participate in agent rebalancing process");
logger.debug("No direct agents in status {} exist for the management server " +
"{}; so it doesn't participate in agent rebalancing process",
Status.Up, ms);
return null;
}
@ -88,8 +92,9 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements
int hostsLeft = directHosts.size();
List<HostVO> hostsToReturn = new ArrayList<HostVO>();
logger.debug("Management server " + msId + " can give away " + hostsToGive + " as it currently owns " + allHosts.size() +
" and the average agent load in the system is " + avLoad + "; finalyzing list of hosts to give away...");
logger.debug("Management server {} can give away {} as it currently owns {} and the " +
"average agent load in the system is {}; finalyzing list of hosts to give away...",
ms, hostsToGive, allHosts.size(), avLoad);
for (Long cluster : hostToClusterMap.keySet()) {
List<HostVO> hostsInCluster = hostToClusterMap.get(cluster);
hostsLeft = hostsLeft - hostsInCluster.size();
@ -113,7 +118,7 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements
}
}
logger.debug("Management server " + msId + " is ready to give away " + hostsToReturn.size() + " hosts");
logger.debug("Management server {} is ready to give away {} hosts", ms, hostsToReturn.size());
return hostsToReturn;
}

View File

@ -480,7 +480,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
final LinkedHashMap<? extends Network, List<? extends NicProfile>> auxiliaryNetworks, final DeploymentPlan plan, final HypervisorType hyperType, final Map<String, Map<Integer, String>> extraDhcpOptions, final Map<Long, DiskOffering> datadiskTemplateToDiskOfferingMap)
throws InsufficientCapacityException {
logger.info("allocating virtual machine from template:{} with hostname:{} and {} networks", template.getUuid(), vmInstanceName, auxiliaryNetworks.size());
logger.info("allocating virtual machine from template: {} with hostname: {} and {} networks", template, vmInstanceName, auxiliaryNetworks.size());
VMInstanceVO persistedVm = null;
try {
final VMInstanceVO vm = _vmDao.findVMByInstanceName(vmInstanceName);
@ -1196,8 +1196,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
final Long clusterIdSpecified = planToDeploy.getClusterId();
if (clusterIdSpecified != null && rootVolClusterId != null) {
if (!rootVolClusterId.equals(clusterIdSpecified)) {
logger.debug("Cannot satisfy the deployment plan passed in since the ready Root volume is in different cluster. volume's cluster: " +
rootVolClusterId + ", cluster specified: " + clusterIdSpecified);
logger.debug("Cannot satisfy the deployment plan passed in since " +
"the ready Root volume is in different cluster. volume's cluster: {}, cluster specified: {}",
() -> _clusterDao.findById(rootVolClusterId), () -> _clusterDao.findById(clusterIdSpecified));
throw new ResourceUnavailableException(
"Root volume is ready in different cluster, Deployment plan provided cannot be satisfied, unable to create a deployment for " +
vm, Cluster.class, clusterIdSpecified);
@ -1320,8 +1321,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
syncDiskChainChange(startAnswer);
if (!changeState(vm, Event.OperationSucceeded, destHostId, work, Step.Done)) {
logger.error("Unable to transition to a new state. VM uuid: {}, VM oldstate: {}, Event: {}", vm.getUuid(), vm.getState(), Event.OperationSucceeded);
throw new ConcurrentOperationException("Failed to deploy VM"+ vm.getUuid());
logger.error("Unable to transition to a new state. VM uuid: {}, VM oldstate: {}, Event: {}", vm, vm.getState(), Event.OperationSucceeded);
throw new ConcurrentOperationException(String.format("Failed to deploy VM %s", vm));
}
final GPUDeviceTO gpuDevice = startAnswer.getVirtualMachine().getGpuDevice();
@ -1348,10 +1349,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
}
return;
} catch (final Exception e) {
logger.error("Retrying after catching exception while trying to secure agent for systemvm id={}", vm.getId(), e);
logger.error("Retrying after catching exception while trying to secure agent for systemvm {}", vm, e);
}
}
throw new CloudRuntimeException("Failed to setup and secure agent for systemvm id=" + vm.getId());
throw new CloudRuntimeException(String.format("Failed to setup and secure agent for systemvm %s", vm));
}
return;
} else {
@ -1390,7 +1391,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
}
} catch (OperationTimedoutException e) {
logger.debug("Unable to send the start command to host {} failed to start VM: {}", dest.getHost(), vm.getUuid());
logger.debug("Unable to send the start command to host {} failed to start VM: {}", dest.getHost(), vm);
if (e.isActive()) {
_haMgr.scheduleStop(vm, destHostId, WorkType.CheckStop);
}
@ -1745,7 +1746,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
final List<VmWorkJobVO> pendingWorkJobs = _workJobDao.listPendingWorkJobs(VirtualMachine.Type.Instance, vm.getId());
if (CollectionUtils.isNotEmpty(pendingWorkJobs) || _haMgr.hasPendingHaWork(vm.getId())) {
String msg = "There are pending jobs or HA tasks working on the VM with id: " + vm.getId() + ", can't unmanage the VM.";
String msg = String.format("There are pending jobs or HA tasks working on the VM: %s, can't unmanage the VM.", vm);
logger.info(msg);
throw new ConcurrentOperationException(msg);
}
@ -2124,8 +2125,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
} else {
HostVO host = _hostDao.findById(hostId);
if (!cleanUpEvenIfUnableToStop && vm.getState() == State.Running && host.getResourceState() == ResourceState.PrepareForMaintenance) {
logger.debug("Host is in PrepareForMaintenance state - Stop VM operation on the VM id: {} is not allowed", vm.getId());
throw new CloudRuntimeException("Stop VM operation on the VM id: " + vm.getId() + " is not allowed as host is preparing for maintenance mode");
logger.debug("Host is in PrepareForMaintenance state - Stop VM operation on the VM: {} is not allowed", vm);
throw new CloudRuntimeException(String.format("Stop VM operation on the VM %s is not allowed as host is preparing for maintenance mode", vm));
}
}
@ -2509,7 +2510,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
List<VolumeVO> volumes = _volsDao.findUsableVolumesForInstance(vm.getId());
logger.debug("Found {} volumes for VM {}(uuid:{}, id:{})", results.size(), vm.getInstanceName(), vm.getUuid(), vm.getId());
for (VolumeObjectTO result : results ) {
logger.debug("Updating volume ({}) with path '{}' on pool '{}'", result.getId(), result.getPath(), result.getDataStoreUuid());
logger.debug("Updating volume ({}) with path '{}' on pool '{}'", result.getUuid(), result.getPath(), result.getDataStoreUuid());
VolumeVO volume = _volsDao.findById(result.getId());
StoragePool pool = _storagePoolDao.findPoolByUUID(result.getDataStoreUuid());
if (volume == null || pool == null) {
@ -2660,14 +2661,15 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
private void removeStaleVmFromSource(VMInstanceVO vm, HostVO srcHost) {
logger.debug("Since VM's storage was successfully migrated across VMware Datacenters, unregistering VM: {} from source host: {}",
vm.getInstanceName(), srcHost.getId());
vm, srcHost);
final UnregisterVMCommand uvc = new UnregisterVMCommand(vm.getInstanceName());
uvc.setCleanupVmFiles(true);
try {
_agentMgr.send(srcHost.getId(), uvc);
} catch (AgentUnavailableException | OperationTimedoutException e) {
throw new CloudRuntimeException("Failed to unregister VM: " + vm.getInstanceName() + " from source host: " + srcHost.getId() +
" after successfully migrating VM's storage across VMware Datacenters", e);
throw new CloudRuntimeException(String.format(
"Failed to unregister VM: %s from source host: %s after successfully migrating VM's storage across VMware Datacenters",
vm, srcHost), e);
}
}
@ -2722,10 +2724,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
for (final VolumeVO volume : volumes) {
if (!_storagePoolDao.findById(volume.getPoolId()).getScope().equals(ScopeType.ZONE)) {
logger.info("Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: {}",
dest.getHost().getId());
throw new CloudRuntimeException(
"Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: "
+ dest.getHost().getId());
dest.getHost());
throw new CloudRuntimeException(String.format(
"Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: %s",
dest.getHost()));
}
}
}
@ -2852,13 +2854,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
try {
_agentMgr.send(srcHostId, new Commands(cleanup(vm, dpdkInterfaceMapping)), null);
} catch (final AgentUnavailableException e) {
logger.error("AgentUnavailableException while cleanup on source host: {}", srcHostId, e);
logger.error("AgentUnavailableException while cleanup on source host: {}", fromHost, e);
}
cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true);
throw new CloudRuntimeException("Unable to complete migration for " + vm);
}
} catch (final OperationTimedoutException e) {
logger.warn("Error while checking the vm {} on host {}", vm, dstHostId, e);
logger.warn("Error while checking the vm {} on host {}", vm, dest.getHost(), e);
}
migrated = true;
} finally {
@ -3302,7 +3304,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
try {
_agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null);
} catch (final AgentUnavailableException e) {
logger.error("AgentUnavailableException while cleanup on source host: {}", srcHostId, e);
logger.error("AgentUnavailableException while cleanup on source host: {}", srcHost, e);
}
cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true);
throw new CloudRuntimeException("VM not found on destination host. Unable to complete migration for " + vm);
@ -3834,9 +3836,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
return;
}
logger.debug("Received startup command from hypervisor host. host id: {}", agent.getId());
logger.debug("Received startup command from hypervisor host. host: {}", agent);
_syncMgr.resetHostSyncState(agent.getId());
_syncMgr.resetHostSyncState(agent);
if (forRebalance) {
logger.debug("Not processing listener {} as connect happens on rebalance process", this);
@ -3851,7 +3853,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
final long seq_no = _agentMgr.send(agentId, new Commands(syncVMMetaDataCmd), this);
logger.debug("Cluster VM metadata sync started with jobid {}", seq_no);
} catch (final AgentUnavailableException e) {
logger.fatal("The Cluster VM metadata sync process failed for cluster id {} with {}", clusterId, e);
logger.fatal("The Cluster VM metadata sync process failed for cluster {} with {}", _clusterDao.findById(clusterId), e);
}
}
}
@ -4224,10 +4226,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
logger.debug("Not need to remove the vm {} from network {} as the vm doesn't have nic in this network.", vm, network);
return true;
}
throw new ConcurrentOperationException("Unable to lock nic " + nic.getId());
throw new ConcurrentOperationException(String.format("Unable to lock nic %s", nic));
}
logger.debug("Lock is acquired for nic id {} as a part of remove vm {} from network {}", lock.getId(), vm, network);
logger.debug("Lock is acquired for nic {} as a part of remove vm {} from network {}", lock, vm, network);
try {
final NicProfile nicProfile =
@ -4256,7 +4258,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
return true;
} finally {
_nicsDao.releaseFromLockTable(lock.getId());
logger.debug("Lock is released for nic id {} as a part of remove vm {} from network {}", lock.getId(), vm, network);
logger.debug("Lock is released for nic {} as a part of remove vm {} from network {}", lock, vm, network);
}
}
@ -4348,9 +4350,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
vm.getServiceOfferingId();
final long dstHostId = dest.getHost().getId();
final Host fromHost = _hostDao.findById(srcHostId);
Host srcHost = _hostDao.findById(srcHostId);
if (fromHost == null) {
String logMessageUnableToFindHost = String.format("Unable to find host to migrate from %s.", srcHost);
String logMessageUnableToFindHost = String.format("Unable to find host to migrate from %s.", srcHostId);
logger.info(logMessageUnableToFindHost);
throw new CloudRuntimeException(logMessageUnableToFindHost);
}
@ -4359,7 +4360,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
long destHostClusterId = dest.getCluster().getId();
long fromHostClusterId = fromHost.getClusterId();
if (fromHostClusterId != destHostClusterId) {
String logMessageHostsOnDifferentCluster = String.format("Source and destination host are not in same cluster, unable to migrate to %s", srcHost);
String logMessageHostsOnDifferentCluster = String.format("Source and destination host are not in same cluster, unable to migrate to %s", fromHost);
logger.info(logMessageHostsOnDifferentCluster);
throw new CloudRuntimeException(logMessageHostsOnDifferentCluster);
}
@ -4406,7 +4407,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
if (pfma == null || !pfma.getResult()) {
final String details = pfma != null ? pfma.getDetails() : "null answer returned";
pfma = null;
throw new AgentUnavailableException(String.format("Unable to prepare for migration to destination host [%s] due to [%s].", dstHostId, details), dstHostId);
throw new AgentUnavailableException(String.format("Unable to prepare for migration to destination host [%s] due to [%s].", dest.getHost(), details), dstHostId);
}
} catch (final OperationTimedoutException e1) {
throw new AgentUnavailableException("Operation timed out", dstHostId);
@ -4466,7 +4467,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
try {
_agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null);
} catch (final AgentUnavailableException e) {
logger.error("Unable to cleanup source host [{}] due to [{}].", srcHostId, e.getMessage(), e);
logger.error("Unable to cleanup source host [{}] due to [{}].", fromHost, e.getMessage(), e);
}
cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true);
throw new CloudRuntimeException("Unable to complete migration for " + vm);
@ -4801,7 +4802,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
logger.warn("VM {} no longer exists when processing VM state report.", vmId);
}
} else {
logger.info("There is pending job or HA tasks working on the VM. vm id: {}, postpone power-change report by resetting power-change counters.", vmId );
logger.info("There is pending job or HA tasks working on the VM. vm: {}, postpone power-change report by resetting power-change counters.", () -> _vmDao.findById(vmId));
_vmDao.resetVmPowerStateTracking(vmId);
}
}
@ -4842,7 +4843,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
case Running:
try {
if (vm.getHostId() != null && !vm.getHostId().equals(vm.getPowerHostId())) {
logger.info("Detected out of band VM migration from host " + vm.getHostId() + " to host " + vm.getPowerHostId());
logger.info("Detected out of band VM migration from host {} to host {}", () -> _hostDao.findById(vm.getHostId()), () -> _hostDao.findById(vm.getPowerHostId()));
}
stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId());
} catch (final NoTransitionException e) {
@ -4871,22 +4872,22 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
case Destroyed:
case Expunging:
logger.info("Receive power on report when VM is in destroyed or expunging state. vm: {}, state: {}.", vm.getId(), vm.getState());
logger.info("Receive power on report when VM is in destroyed or expunging state. vm: {}, state: {}.", vm, vm.getState());
break;
case Migrating:
logger.info("VM {} is at {} and we received a power-on report while there is no pending jobs on it.", vm.getInstanceName(), vm.getState());
logger.info("VM {} is at {} and we received a power-on report while there is no pending jobs on it.", vm, vm.getState());
try {
stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId());
} catch (final NoTransitionException e) {
logger.warn("Unexpected VM state transition exception, race-condition?", e);
}
logger.info("VM {} is sync-ed to at Running state according to power-on report from hypervisor.", vm.getInstanceName());
logger.info("VM {} is sync-ed to at Running state according to power-on report from hypervisor.", vm);
break;
case Error:
default:
logger.info("Receive power on report when VM is in error or unexpected state. vm: {}, state: {}.", vm.getId(), vm.getState());
logger.info("Receive power on report when VM is in error or unexpected state. vm: {}, state: {}.", vm, vm.getState());
break;
}
}
@ -4901,16 +4902,16 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
EventTypes.EVENT_VM_STOP, "Out of band VM power off", vm.getId(), getApiCommandResourceTypeForVm(vm).toString());
case Migrating:
logger.info("VM {} is at {} and we received a {} report while there is no pending jobs on it"
, vm.getInstanceName(), vm.getState(), vm.getPowerState());
, vm, vm.getState(), vm.getPowerState());
if((HighAvailabilityManager.ForceHA.value() || vm.isHaEnabled()) && vm.getState() == State.Running
&& HaVmRestartHostUp.value()
&& vm.getHypervisorType() != HypervisorType.VMware
&& vm.getHypervisorType() != HypervisorType.Hyperv) {
logger.info("Detected out-of-band stop of a HA enabled VM {}, will schedule restart.", vm.getInstanceName());
logger.info("Detected out-of-band stop of a HA enabled VM {}, will schedule restart.", vm);
if (!_haMgr.hasPendingHaWork(vm.getId())) {
_haMgr.scheduleRestart(vm, true);
} else {
logger.info("VM {} already has a pending HA task working on it.", vm.getInstanceName());
logger.info("VM {} already has a pending HA task working on it.", vm);
}
return;
}
@ -4937,10 +4938,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
}
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(),
VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") state is sync-ed (" + vm.getState()
+ " -> Stopped) from out-of-context transition.");
VM_SYNC_ALERT_SUBJECT, String.format("VM %s(%s) state is sync-ed (%s -> Stopped) from out-of-context transition.",
vm.getHostName(), vm, vm.getState()));
logger.info("VM {} is sync-ed to at Stopped state according to power-off report from hypervisor.", vm.getInstanceName());
logger.info("VM {} is sync-ed to at Stopped state according to power-off report from hypervisor.", vm);
break;
@ -4983,8 +4984,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
final VMInstanceVO vm = _vmDao.findById(vmId);
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(),
VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") is stuck in " + vm.getState()
+ " state and its host is unreachable for too long");
VM_SYNC_ALERT_SUBJECT, String.format("VM %s(%s) is stuck in %s state and its host is unreachable for too long",
vm.getHostName(), vm, vm.getState()));
}
}
@ -5502,7 +5503,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
try {
orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), null);
} catch (final InsufficientServerCapacityException e) {
logger.warn("Failed to deploy vm {} with original planner, sending HAPlanner.", vm.getId(), e);
logger.warn("Failed to deploy vm {} with original planner, sending HAPlanner.", vm, e);
orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), _haMgr.getHAPlanner());
}
@ -5794,18 +5795,20 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
@Override
public Pair<Long, Long> findClusterAndHostIdForVm(VirtualMachine vm, boolean skipCurrentHostForStartingVm) {
Long hostId = null;
Host host = null;
if (!skipCurrentHostForStartingVm || !State.Starting.equals(vm.getState())) {
hostId = vm.getHostId();
}
Long clusterId = null;
if(hostId == null) {
hostId = vm.getLastHostId();
logger.debug("host id is null, using last host id {}", hostId);
}
if (hostId == null) {
return findClusterAndHostIdForVmFromVolumes(vm.getId());
if (vm.getLastHostId() == null) {
return findClusterAndHostIdForVmFromVolumes(vm.getId());
}
hostId = vm.getLastHostId();
host = _hostDao.findById(hostId);
logger.debug("host id is null, using last host {} with id {}", host, hostId);
}
HostVO host = _hostDao.findById(hostId);
host = host == null ? _hostDao.findById(hostId) : host;
if (host != null) {
clusterId = host.getClusterId();
}

View File

@ -19,15 +19,14 @@ package com.cloud.vm;
import java.util.Map;
import com.cloud.agent.api.HostVmStateReportEntry;
import com.cloud.host.Host;
public interface VirtualMachinePowerStateSync {
void resetHostSyncState(long hostId);
void resetHostSyncState(Host hostId);
void processHostVmStateReport(long hostId, Map<String, HostVmStateReportEntry> report);
// to adapt legacy ping report
void processHostVmStatePingReport(long hostId, Map<String, HostVmStateReportEntry> report, boolean force);
Map<Long, VirtualMachine.PowerState> convertVmStateReport(Map<String, HostVmStateReportEntry> states);
}

View File

@ -24,6 +24,10 @@ import java.util.Map;
import javax.inject.Inject;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.utils.Pair;
import org.apache.cloudstack.framework.messagebus.MessageBus;
import org.apache.cloudstack.framework.messagebus.PublishScope;
import org.apache.logging.log4j.Logger;
@ -40,54 +44,57 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
@Inject MessageBus _messageBus;
@Inject VMInstanceDao _instanceDao;
@Inject HostDao hostDao;
@Inject ManagementServiceConfiguration mgmtServiceConf;
public VirtualMachinePowerStateSyncImpl() {
}
@Override
public void resetHostSyncState(long hostId) {
logger.info("Reset VM power state sync for host: {}.", hostId);
_instanceDao.resetHostPowerStateTracking(hostId);
public void resetHostSyncState(Host host) {
logger.info("Reset VM power state sync for host: {}", host);
_instanceDao.resetHostPowerStateTracking(host.getId());
}
@Override
public void processHostVmStateReport(long hostId, Map<String, HostVmStateReportEntry> report) {
logger.debug("Process host VM state report. host: {}.", hostId);
HostVO host = hostDao.findById(hostId);
logger.debug("Process host VM state report. host: {}", host);
Map<Long, VirtualMachine.PowerState> translatedInfo = convertVmStateReport(report);
processReport(hostId, translatedInfo, false);
Map<Long, Pair<VirtualMachine.PowerState, VMInstanceVO>> translatedInfo = convertVmStateReport(report);
processReport(host, translatedInfo, false);
}
@Override
public void processHostVmStatePingReport(long hostId, Map<String, HostVmStateReportEntry> report, boolean force) {
logger.debug("Process host VM state report from ping process. host: {}.", hostId);
HostVO host = hostDao.findById(hostId);
logger.debug("Process host VM state report from ping process. host: {}", host);
Map<Long, VirtualMachine.PowerState> translatedInfo = convertVmStateReport(report);
processReport(hostId, translatedInfo, force);
Map<Long, Pair<VirtualMachine.PowerState, VMInstanceVO>> translatedInfo = convertVmStateReport(report);
processReport(host, translatedInfo, force);
}
private void processReport(long hostId, Map<Long, VirtualMachine.PowerState> translatedInfo, boolean force) {
private void processReport(HostVO host, Map<Long, Pair<VirtualMachine.PowerState, VMInstanceVO>> translatedInfo, boolean force) {
logger.debug("Process VM state report. host: {}, number of records in report: {}.", hostId, translatedInfo.size());
logger.debug("Process VM state report. host: {}, number of records in report: {}.", host, translatedInfo.size());
for (Map.Entry<Long, VirtualMachine.PowerState> entry : translatedInfo.entrySet()) {
for (Map.Entry<Long, Pair<VirtualMachine.PowerState, VMInstanceVO>> entry : translatedInfo.entrySet()) {
logger.debug("VM state report. host: {}, vm id: {}, power state: {}.", hostId, entry.getKey(), entry.getValue());
logger.debug("VM state report. host: {}, vm: {}, power state: {}", host, entry.getValue().second(), entry.getValue().first());
if (_instanceDao.updatePowerState(entry.getKey(), hostId, entry.getValue(), DateUtil.currentGMTTime())) {
logger.debug("VM state report is updated. host: {}, vm id: {}, power state: {}.", hostId, entry.getKey(), entry.getValue());
if (_instanceDao.updatePowerState(entry.getKey(), host.getId(), entry.getValue().first(), DateUtil.currentGMTTime())) {
logger.debug("VM state report is updated. host: {}, vm: {}, power state: {}", host, entry.getValue().second(), entry.getValue().first());
_messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, entry.getKey());
} else {
logger.trace("VM power state does not change, skip DB writing. vm id: {}.", entry.getKey());
logger.trace("VM power state does not change, skip DB writing. vm: {}", entry.getValue().second());
}
}
// any state outdates should be checked against the time before this list was retrieved
Date startTime = DateUtil.currentGMTTime();
// for all running/stopping VMs, we provide monitoring of missing report
List<VMInstanceVO> vmsThatAreMissingReport = _instanceDao.findByHostInStates(hostId, VirtualMachine.State.Running,
List<VMInstanceVO> vmsThatAreMissingReport = _instanceDao.findByHostInStates(host.getId(), VirtualMachine.State.Running,
VirtualMachine.State.Stopping, VirtualMachine.State.Starting);
java.util.Iterator<VMInstanceVO> it = vmsThatAreMissingReport.iterator();
while (it.hasNext()) {
@ -99,7 +106,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
// here we need to be wary of out of band migration as opposed to other, more unexpected state changes
if (vmsThatAreMissingReport.size() > 0) {
Date currentTime = DateUtil.currentGMTTime();
logger.debug("Run missing VM report. current time: {}", currentTime.getTime());
logger.debug("Run missing VM report for host {}. current time: {}", host, currentTime.getTime());
// 2 times of sync-update interval for graceful period
long milliSecondsGracefullPeriod = mgmtServiceConf.getPingInterval() * 2000L;
@ -109,60 +116,55 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
// Make sure powerState is up to date for missing VMs
try {
if (!force && !_instanceDao.isPowerStateUpToDate(instance.getId())) {
logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM id: {}.", instance.getId());
logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM: {}", instance);
_instanceDao.resetVmPowerStateTracking(instance.getId());
continue;
}
} catch (CloudRuntimeException e) {
logger.warn("Checked for missing powerstate of a none existing vm", e);
logger.warn("Checked for missing powerstate of a none existing vm {}", instance, e);
continue;
}
Date vmStateUpdateTime = instance.getPowerStateUpdateTime();
if (vmStateUpdateTime == null) {
logger.warn("VM power state update time is null, falling back to update time for vm id: {}.", instance.getId());
logger.warn("VM power state update time is null, falling back to update time for vm: {}", instance);
vmStateUpdateTime = instance.getUpdateTime();
if (vmStateUpdateTime == null) {
logger.warn("VM update time is null, falling back to creation time for vm id: {}", instance.getId());
logger.warn("VM update time is null, falling back to creation time for vm: {}", instance);
vmStateUpdateTime = instance.getCreated();
}
}
String lastTime = new SimpleDateFormat("yyyy/MM/dd'T'HH:mm:ss.SSS'Z'").format(vmStateUpdateTime);
logger.debug("Detected missing VM. host: {}, vm id: {}({}), power state: {}, last state update: {}"
, hostId
, instance.getId()
, instance.getUuid()
, VirtualMachine.PowerState.PowerReportMissing
, lastTime);
logger.debug("Detected missing VM. host: {}, vm: {}, power state: {}, last state update: {}",
host, instance, VirtualMachine.PowerState.PowerReportMissing, lastTime);
long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime();
if (force || milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) {
logger.debug("vm id: {} - time since last state update({}ms) has passed graceful period.", instance.getId(), milliSecondsSinceLastStateUpdate);
logger.debug("vm: {} - time since last state update({}ms) has passed graceful period", instance, milliSecondsSinceLastStateUpdate);
// this is were a race condition might have happened if we don't re-fetch the instance;
// between the startime of this job and the currentTime of this missing-branch
// an update might have occurred that we should not override in case of out of band migration
if (_instanceDao.updatePowerState(instance.getId(), hostId, VirtualMachine.PowerState.PowerReportMissing, startTime)) {
logger.debug("VM state report is updated. host: {}, vm id: {}, power state: PowerReportMissing.", hostId, instance.getId());
if (_instanceDao.updatePowerState(instance.getId(), host.getId(), VirtualMachine.PowerState.PowerReportMissing, startTime)) {
logger.debug("VM state report is updated. host: {}, vm: {}, power state: PowerReportMissing ", host, instance);
_messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, instance.getId());
} else {
logger.debug("VM power state does not change, skip DB writing. vm id: {}", instance.getId());
logger.debug("VM power state does not change, skip DB writing. vm: {}", instance);
}
} else {
logger.debug("vm id: {} - time since last state update({}ms) has not passed graceful period yet.", instance.getId(), milliSecondsSinceLastStateUpdate);
logger.debug("vm: {} - time since last state update({} ms) has not passed graceful period yet", instance, milliSecondsSinceLastStateUpdate);
}
}
}
logger.debug("Done with process of VM state report. host: {}", hostId);
logger.debug("Done with process of VM state report. host: {}", host);
}
@Override
public Map<Long, VirtualMachine.PowerState> convertVmStateReport(Map<String, HostVmStateReportEntry> states) {
final HashMap<Long, VirtualMachine.PowerState> map = new HashMap<Long, VirtualMachine.PowerState>();
public Map<Long, Pair<VirtualMachine.PowerState, VMInstanceVO>> convertVmStateReport(Map<String, HostVmStateReportEntry> states) {
final HashMap<Long, Pair<VirtualMachine.PowerState, VMInstanceVO>> map = new HashMap<>();
if (states == null) {
return map;
}
@ -170,9 +172,9 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
for (Map.Entry<String, HostVmStateReportEntry> entry : states.entrySet()) {
VMInstanceVO vm = findVM(entry.getKey());
if (vm != null) {
map.put(vm.getId(), entry.getValue().getState());
map.put(vm.getId(), new Pair<>(entry.getValue().getState(), vm));
} else {
logger.debug("Unable to find matched VM in CloudStack DB. name: {}", entry.getKey());
logger.debug("Unable to find matched VM in CloudStack DB. name: {} powerstate: {}", entry.getKey(), entry.getValue());
}
}

View File

@ -67,8 +67,8 @@ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDi
try {
List<AsyncJobJoinMapVO> joinRecords = _joinMapDao.listJoinRecords(job.getId());
if (joinRecords.size() != 1) {
logger.warn("AsyncJob-" + job.getId()
+ " received wakeup call with un-supported joining job number: " + joinRecords.size());
logger.warn("AsyncJob-{} ({}) received wakeup call with un-supported " +
"joining job number: {}", job.getId(), job, joinRecords.size());
// if we fail wakeup-execution for any reason, avoid release sync-source if there is any
job.setSyncSource(null);
@ -82,7 +82,7 @@ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDi
try {
workClz = Class.forName(job.getCmd());
} catch (ClassNotFoundException e) {
logger.error("VM work class " + job.getCmd() + " is not found", e);
logger.error("VM work class {} for job {} is not found", job.getCmd(), job, e);
return;
}
@ -103,14 +103,13 @@ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDi
handler.invoke(_vmMgr);
} else {
assert (false);
logger.error("Unable to find wakeup handler " + joinRecord.getWakeupHandler() +
" when waking up job-" + job.getId());
logger.error("Unable to find wakeup handler {} when waking up job-{} ({})", joinRecord.getWakeupHandler(), job.getId(), job);
}
} finally {
CallContext.unregister();
}
} catch (Throwable e) {
logger.warn("Unexpected exception in waking up job-" + job.getId());
logger.warn("Unexpected exception in waking up job-{} ({})", job.getId(), job);
// if we fail wakeup-execution for any reason, avoid release sync-source if there is any
job.setSyncSource(null);

View File

@ -96,7 +96,7 @@ public class DataCenterResourceManagerImpl implements DataCenterResourceManager
public EngineClusterVO loadCluster(String uuid) {
EngineClusterVO cluster = _clusterDao.findByUuid(uuid);
if (cluster == null) {
throw new InvalidParameterValueException("Pod does not exist");
throw new InvalidParameterValueException("Cluster does not exist");
}
return cluster;
}

View File

@ -29,6 +29,7 @@ import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEnti
import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity.State.Event;
import org.apache.cloudstack.util.CPUArchConverter;
import org.apache.cloudstack.util.HypervisorTypeConverter;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import javax.persistence.Column;
import javax.persistence.Convert;
@ -264,4 +265,11 @@ public class EngineClusterVO implements EngineCluster, Identity {
public PartitionType partitionType() {
return PartitionType.Cluster;
}
@Override
public String toString() {
return String.format("EngineCluster %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name"));
}
}

View File

@ -43,6 +43,7 @@ import com.cloud.org.Grouping;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.db.GenericDao;
import com.cloud.utils.db.StateMachine;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@Table(name = "data_center")
@ -523,4 +524,11 @@ public class EngineDataCenterVO implements EngineDataCenter, Identity {
public DataCenter.Type getType() {
return type;
}
@Override
public String toString() {
return String.format("EngineDataCenter %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name"));
}
}

View File

@ -38,6 +38,7 @@ import com.cloud.org.Grouping;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.db.GenericDao;
import com.cloud.utils.db.StateMachine;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@Table(name = "host_pod_ref")
@ -246,4 +247,11 @@ public class EngineHostPodVO implements EnginePod, Identity {
public State getState() {
return state;
}
@Override
public String toString() {
return String.format("EngineHostPod %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name"));
}
}

View File

@ -53,6 +53,7 @@ import com.cloud.utils.db.GenericDao;
import com.cloud.utils.db.StateMachine;
import org.apache.cloudstack.util.CPUArchConverter;
import org.apache.cloudstack.util.HypervisorTypeConverter;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@Table(name = "host")
@ -697,7 +698,9 @@ public class EngineHostVO implements EngineHost, Identity {
@Override
public String toString() {
return new StringBuilder("Host[").append("-").append(id).append("-").append(type).append("]").toString();
return String.format("EngineHost %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name", "type"));
}
public void setHypervisorType(HypervisorType hypervisorType) {

View File

@ -297,7 +297,7 @@ public class EngineClusterDaoImpl extends GenericDaoBase<EngineClusterVO, Long>
.append("; updatedTime=")
.append(oldUpdatedTime);
} else {
logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore");
logger.debug("Unable to update dataCenter {} with id={}, as there is no such dataCenter exists in the database anymore", vo, vo.getId());
}
}
return rows > 0;

View File

@ -300,7 +300,7 @@ public class EngineDataCenterDaoImpl extends GenericDaoBase<EngineDataCenterVO,
.append("; updatedTime=")
.append(oldUpdatedTime);
} else {
logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore");
logger.debug("Unable to update dataCenter {} with id {}, as there is no such dataCenter exists in the database anymore", vo, vo.getId());
}
}
return rows > 0;

View File

@ -451,7 +451,7 @@ public class EngineHostDaoImpl extends GenericDaoBase<EngineHostVO, Long> implem
.append("; updatedTime=")
.append(oldUpdatedTime);
} else {
logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore");
logger.debug("Unable to update dataCenter: {}, as there is no such dataCenter exists in the database anymore", vo);
}
}
return rows > 0;

View File

@ -183,7 +183,7 @@ public class EngineHostPodDaoImpl extends GenericDaoBase<EngineHostPodVO, Long>
.append("; updatedTime=")
.append(oldUpdatedTime);
} else {
logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore");
logger.debug("Unable to update dataCenter: {}, as there is no such dataCenter exists in the database anymore", vo);
}
}
return rows > 0;

View File

@ -1248,18 +1248,18 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
VlanVO vlanVo = _vlanDao.findByNetworkIdAndIpv4(network.getId(), requestedIpv4Address);
if (vlanVo == null) {
throw new InvalidParameterValueException(String.format("Trying to configure a Nic with the requested [IPv4='%s'] but cannot find a Vlan for the [network id='%s']",
requestedIpv4Address, network.getId()));
throw new InvalidParameterValueException(String.format("Trying to configure a Nic with the requested [IPv4='%s'] but cannot find a Vlan for the [network '%s']",
requestedIpv4Address, network));
}
String ipv4Gateway = vlanVo.getVlanGateway();
String ipv4Netmask = vlanVo.getVlanNetmask();
if (!NetUtils.isValidIp4(ipv4Gateway)) {
throw new InvalidParameterValueException(String.format("The [IPv4Gateway='%s'] from [VlanId='%s'] is not valid", ipv4Gateway, vlanVo.getId()));
throw new InvalidParameterValueException(String.format("The [IPv4Gateway='%s'] from [Vlan id=%d uuid=%s] is not valid", ipv4Gateway, vlanVo.getId(), vlanVo.getUuid()));
}
if (!NetUtils.isValidIp4Netmask(ipv4Netmask)) {
throw new InvalidParameterValueException(String.format("The [IPv4Netmask='%s'] from [VlanId='%s'] is not valid", ipv4Netmask, vlanVo.getId()));
throw new InvalidParameterValueException(String.format("The [IPv4Netmask='%s'] from [Vlan id=%d uuid=%s] is not valid", ipv4Netmask, vlanVo.getId(), vlanVo.getUuid()));
}
acquireLockAndCheckIfIpv4IsFree(network, requestedIpv4Address);
@ -1273,7 +1273,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
String macAddress = _networkModel.getNextAvailableMacAddressInNetwork(network.getId());
nicProfile.setMacAddress(macAddress);
} catch (InsufficientAddressCapacityException e) {
throw new CloudRuntimeException(String.format("Cannot get next available mac address in [network id='%s']", network.getId()), e);
throw new CloudRuntimeException(String.format("Cannot get next available mac address in [network %s]", network), e);
}
}
}
@ -1285,7 +1285,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
IPAddressVO ipVO = _ipAddressDao.findByIpAndSourceNetworkId(network.getId(), requestedIpv4Address);
if (ipVO == null) {
throw new InvalidParameterValueException(
String.format("Cannot find IPAddressVO for guest [IPv4 address='%s'] and [network id='%s']", requestedIpv4Address, network.getId()));
String.format("Cannot find IPAddressVO for guest [IPv4 address='%s'] and [network %s]", requestedIpv4Address, network));
}
try {
IPAddressVO lockedIpVO = _ipAddressDao.acquireInLockTable(ipVO.getId());
@ -1489,17 +1489,17 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
final SetupPersistentNetworkAnswer answer = (SetupPersistentNetworkAnswer) _agentMgr.send(host.getId(), cmd);
if (answer == null) {
logger.warn("Unable to get an answer to the SetupPersistentNetworkCommand from agent: {}", host.getId());
logger.warn("Unable to get an answer to the SetupPersistentNetworkCommand from agent: {}", host);
clusterToHostsMap.get(host.getClusterId()).remove(host.getId());
continue;
}
if (!answer.getResult()) {
logger.warn("Unable to setup agent {} due to {}", host.getId(), answer.getDetails());
logger.warn("Unable to setup agent {} due to {}", host, answer.getDetails());
clusterToHostsMap.get(host.getClusterId()).remove(host.getId());
}
} catch (Exception e) {
logger.warn("Failed to connect to host: {}", host.getName());
logger.warn("Failed to connect to host: {}", host);
}
}
if (clusterToHostsMap.keySet().size() != clusterVOs.size()) {
@ -1526,7 +1526,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
NetworkVO network = _networksDao.findById(networkId);
final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName());
if (isNetworkImplemented(network)) {
logger.debug("Network id={} is already implemented", networkId);
logger.debug("Network {} is already implemented", network);
implemented.set(guru, network);
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_UPDATE, network.getAccountId(), network.getDataCenterId(), network.getId(),
network.getName(), network.getNetworkOfferingId(), null, network.getState().name(), Network.class.getName(), network.getUuid(), true);
@ -1542,11 +1542,11 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
throw ex;
}
logger.debug("Lock is acquired for network id {} as a part of network implement", networkId);
logger.debug("Lock is acquired for network id {} as a part of network implement", network);
try {
if (isNetworkImplemented(network)) {
logger.debug("Network id={} is already implemented", networkId);
logger.debug("Network {} is already implemented", network);
implemented.set(guru, network);
return implemented;
}
@ -1618,7 +1618,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
_networksDao.releaseFromLockTable(networkId);
logger.debug("Lock is released for network id {} as a part of network implement", networkId);
logger.debug("Lock is released for network {} as a part of network implement", network);
}
}
@ -1743,57 +1743,57 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
_firewallMgr.applyDefaultEgressFirewallRule(network.getId(), offering.isEgressDefaultPolicy(), true);
}
if (!_firewallMgr.applyFirewallRules(firewallEgressRulesToApply, false, caller)) {
logger.warn("Failed to reapply firewall Egress rule(s) as a part of network id={} restart", networkId);
logger.warn("Failed to reapply firewall Egress rule(s) as a part of network {} restart", network);
success = false;
}
// associate all ip addresses
if (!_ipAddrMgr.applyIpAssociations(network, false)) {
logger.warn("Failed to apply ip addresses as a part of network id {} restart", networkId);
logger.warn("Failed to apply ip addresses as a part of network {} restart", network);
success = false;
}
// apply BGP settings
if (!bgpService.applyBgpPeers(network, false)) {
logger.warn("Failed to apply bpg peers as a part of network id {} restart", networkId);
logger.warn("Failed to apply bpg peers as a part of network {} restart", network);
success = false;
}
// apply static nat
if (!_rulesMgr.applyStaticNatsForNetwork(networkId, false, caller)) {
logger.warn("Failed to apply static nats a part of network id {} restart", networkId);
if (!_rulesMgr.applyStaticNatsForNetwork(network, false, caller)) {
logger.warn("Failed to apply static nats a part of network {} restart", network);
success = false;
}
// apply firewall rules
final List<FirewallRuleVO> firewallIngressRulesToApply = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress);
if (!_firewallMgr.applyFirewallRules(firewallIngressRulesToApply, false, caller)) {
logger.warn("Failed to reapply Ingress firewall rule(s) as a part of network id={} restart", networkId);
logger.warn("Failed to reapply Ingress firewall rule(s) as a part of network {} restart", network);
success = false;
}
// apply port forwarding rules
if (!_rulesMgr.applyPortForwardingRulesForNetwork(networkId, false, caller)) {
logger.warn("Failed to reapply port forwarding rule(s) as a part of network id={} restart", networkId);
logger.warn("Failed to reapply port forwarding rule(s) as a part of network {} restart", network);
success = false;
}
// apply static nat rules
if (!_rulesMgr.applyStaticNatRulesForNetwork(networkId, false, caller)) {
logger.warn("Failed to reapply static nat rule(s) as a part of network id={} restart", networkId);
logger.warn("Failed to reapply static nat rule(s) as a part of network {} restart", network);
success = false;
}
// apply public load balancer rules
if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Public)) {
logger.warn("Failed to reapply Public load balancer rules as a part of network id={} restart", networkId);
if (!_lbMgr.applyLoadBalancersForNetwork(network, Scheme.Public)) {
logger.warn("Failed to reapply Public load balancer rules as a part of network {} restart", network);
success = false;
}
// apply internal load balancer rules
if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Internal)) {
logger.warn("Failed to reapply internal load balancer rules as a part of network id={} restart", networkId);
if (!_lbMgr.applyLoadBalancersForNetwork(network, Scheme.Internal)) {
logger.warn("Failed to reapply internal load balancer rules as a part of network {} restart", network);
success = false;
}
@ -1803,7 +1803,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
for (final RemoteAccessVpn vpn : vpnsToReapply) {
// Start remote access vpn per ip
if (_vpnMgr.startRemoteAccessVpn(vpn.getServerAddressId(), false) == null) {
logger.warn("Failed to reapply vpn rules as a part of network id={} restart", networkId);
logger.warn("Failed to reapply vpn rules as a part of network {} restart", network);
success = false;
}
}
@ -1811,7 +1811,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
//apply network ACLs
if (!_networkACLMgr.applyACLToNetwork(networkId)) {
logger.warn("Failed to reapply network ACLs as a part of of network id={}", networkId);
logger.warn("Failed to reapply network ACLs as a part of of network {}", network);
success = false;
}
@ -1922,13 +1922,13 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
long userId = User.UID_SYSTEM;
//remove all PF/Static Nat rules for the network
logger.info("Services: {} are no longer supported in network: {} after applying new network offering: {} removing the related configuration",
services, network.getUuid(), network.getNetworkOfferingId());
services::toString, network::toString, () -> _networkOfferingDao.findById(network.getNetworkOfferingId()));
if (services.contains(Service.StaticNat.getName()) || services.contains(Service.PortForwarding.getName())) {
try {
if (_rulesMgr.revokeAllPFStaticNatRulesForNetwork(networkId, userId, caller)) {
logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id={}", networkId);
logger.debug("Successfully cleaned up portForwarding/staticNat rules for network {}", network);
} else {
logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup", networkId);
logger.warn("Failed to release portForwarding/StaticNat rules as a part of network {} cleanup", network);
}
if (services.contains(Service.StaticNat.getName())) {
//removing static nat configured on ips.
@ -1947,7 +1947,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
});
}
} catch (ResourceUnavailableException ex) {
logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex);
logger.warn("Failed to release portForwarding/StaticNat rules as a part of network {} cleanup due to resourceUnavailable", network, ex);
}
}
if (services.contains(Service.SourceNat.getName())) {
@ -1966,22 +1966,22 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
if (services.contains(Service.Lb.getName())) {
//remove all LB rules for the network
if (_lbMgr.removeAllLoadBalanacersForNetwork(networkId, caller, userId)) {
logger.debug("Successfully cleaned up load balancing rules for network id={}", networkId);
logger.debug("Successfully cleaned up load balancing rules for network {}", network);
} else {
logger.warn("Failed to cleanup LB rules as a part of network id={} cleanup", networkId);
logger.warn("Failed to cleanup LB rules as a part of network {} cleanup", network);
}
}
if (services.contains(Service.Firewall.getName())) {
//revoke all firewall rules for the network
try {
if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, userId, caller)) {
logger.debug("Successfully cleaned up firewallRules rules for network id={}", networkId);
if (_firewallMgr.revokeAllFirewallRulesForNetwork(network, userId, caller)) {
logger.debug("Successfully cleaned up firewallRules rules for network {}", network);
} else {
logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup", networkId);
logger.warn("Failed to cleanup Firewall rules as a part of network {} cleanup", network);
}
} catch (ResourceUnavailableException ex) {
logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex);
logger.warn("Failed to cleanup Firewall rules as a part of network {} cleanup due to resourceUnavailable", network, ex);
}
}
@ -1991,7 +1991,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
try {
_vpnMgr.destroyRemoteAccessVpnForIp(vpn.getServerAddressId(), caller, true);
} catch (ResourceUnavailableException ex) {
logger.warn("Failed to cleanup remote access vpn resources of network: {} due to Exception: {}", network.getUuid(), ex);
logger.warn("Failed to cleanup remote access vpn resources of network: {} due to Exception: {}", network, ex);
}
}
}
@ -2088,20 +2088,20 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
@DB
protected void updateNic(final NicVO nic, final long networkId, final int count) {
protected void updateNic(final NicVO nic, final Network network, final int count) {
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(final TransactionStatus status) {
_nicDao.update(nic.getId(), nic);
if (nic.getVmType() == VirtualMachine.Type.User) {
logger.debug("Changing active number of nics for network id={} on {}", networkId, count);
_networksDao.changeActiveNicsBy(networkId, count);
logger.debug(String.format("Changing active number of nics for network id=%s on %d", network, count));
_networksDao.changeActiveNicsBy(network.getId(), count);
}
if (nic.getVmType() == VirtualMachine.Type.User
|| nic.getVmType() == VirtualMachine.Type.DomainRouter && _networksDao.findById(networkId).getTrafficType() == TrafficType.Guest) {
_networksDao.setCheckForGc(networkId);
|| nic.getVmType() == VirtualMachine.Type.DomainRouter && _networksDao.findById(network.getId()).getTrafficType() == TrafficType.Guest) {
_networksDao.setCheckForGc(network.getId());
}
}
});
@ -2128,8 +2128,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
for (final NicVO nic : nics) {
final Pair<NetworkGuru, NetworkVO> implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter);
if (implemented == null || implemented.first() == null) {
logger.warn("Failed to implement network id={} as a part of preparing nic id={}", nic.getNetworkId(), nic.getId());
throw new CloudRuntimeException("Failed to implement network id=" + nic.getNetworkId() + " as a part preparing nic id=" + nic.getId());
NetworkVO network = _networksDao.findById(nic.getNetworkId());
logger.warn("Failed to implement network: {} as a part of preparing nic {}", network, nic);
throw new CloudRuntimeException(String.format("Failed to implement network id=%s as a part preparing nic %s", network, nic));
}
final NetworkVO network = implemented.second();
@ -2194,7 +2195,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
Pair<NetworkVO, VpcVO> networks = getGuestNetworkRouterAndVpcDetails(vmProfile.getId());
setMtuDetailsInVRNic(networks, network, nic);
}
updateNic(nic, network.getId(), 1);
updateNic(nic, network, 1);
final List<Provider> providersToImplement = getNetworkProviders(network.getId());
for (final NetworkElement element : networkElements) {
@ -2299,7 +2300,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
for (final NetworkElement element : networkElements) {
if (providersToImplement.contains(element.getProvider())) {
if (!_networkModel.isProviderEnabledInPhysicalNetwork(_networkModel.getPhysicalNetworkId(network), element.getProvider().getName())) {
throw new CloudRuntimeException("Service provider " + element.getProvider().getName() + " either doesn't exist or is not enabled in physical network id: " + network.getPhysicalNetworkId());
throw new CloudRuntimeException(String.format("Service provider %s either doesn't exist or is not enabled in physical network: %s", element.getProvider().getName(), _physicalNetworkDao.findById(network.getPhysicalNetworkId())));
}
if (element instanceof NetworkMigrationResponder) {
if (!((NetworkMigrationResponder) element).prepareMigration(profile, network, vm, dest, context)) {
@ -2324,10 +2325,10 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
if (nic == null && !addedURIs.contains(broadcastUri.toString())) {
//Nic details are not available in DB
//Create nic profile for migration
logger.debug("Creating nic profile for migration. BroadcastUri: {} NetworkId: {} VM: {}", broadcastUri.toString(), ntwkId, vm.getId());
final NetworkVO network = _networksDao.findById(ntwkId);
final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName());
final NicProfile profile = new NicProfile();
logger.debug("Creating nic profile for migration. BroadcastUri: {} NetworkId: {} VM: {}", broadcastUri.toString(), network, vm);
profile.setDeviceId(255); //dummyId
profile.setIPv4Address(userIp.getAddress().toString());
profile.setIPv4Netmask(publicIp.getNetmask());
@ -2467,7 +2468,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
applyProfileToNicForRelease(nic, profile);
nic.setState(Nic.State.Allocated);
if (originalState == Nic.State.Reserved) {
updateNic(nic, network.getId(), -1);
updateNic(nic, network, -1);
} else {
_nicDao.update(nic.getId(), nic);
}
@ -2476,7 +2477,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
return new Pair<>(network, profile);
} else {
nic.setState(Nic.State.Allocated);
updateNic(nic, network.getId(), -1);
updateNic(nic, network, -1);
}
}
@ -2513,7 +2514,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
@Override
public void cleanupNics(final VirtualMachineProfile vm) {
logger.debug("Cleaning network for vm: {}", vm.getId());
logger.debug("Cleaning network for vm: {}", vm);
final List<NicVO> nics = _nicDao.listByVmId(vm.getId());
for (final NicVO nic : nics) {
@ -2610,7 +2611,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
_nicDao.remove(nic.getId());
}
logger.debug("Removed nic id={}", nic.getId());
logger.debug("Removed nic {}", nic);
// release assigned IPv6 for Isolated Network VR NIC
if (Type.User.equals(vm.getType()) && GuestType.Isolated.equals(network.getGuestType())
@ -2623,7 +2624,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
//remove the secondary ip addresses corresponding to this nic
if (!removeVmSecondaryIpsOfNic(nic.getId())) {
logger.debug("Removing nic {} secondary ip addresses failed", nic.getId());
logger.debug("Removing nic {} secondary ip addresses failed", nic);
}
}
@ -2837,16 +2838,21 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
if (secondaryUri != null && !(bypassVlanOverlapCheck && ntwkOff.getGuestType() == GuestType.Shared) &&
_dcDao.findVnet(zoneId, pNtwk.getId(), BroadcastDomainType.getValue(secondaryUri)).size() > 0) {
throw new InvalidParameterValueException("The VLAN tag for isolated PVLAN " + isolatedPvlan + " is already being used for dynamic vlan allocation for the guest network in zone "
+ zone.getName());
throw new InvalidParameterValueException(String.format(
"The VLAN tag for isolated PVLAN %s is already being used for dynamic vlan allocation for the guest network in zone %s",
isolatedPvlan, zone));
}
if (!UuidUtils.isUuid(vlanId)) {
// For Isolated and L2 networks, don't allow to create network with vlan that already exists in the zone
if (!hasGuestBypassVlanOverlapCheck(bypassVlanOverlapCheck, ntwkOff, isPrivateNetwork)) {
if (_networksDao.listByZoneAndUriAndGuestType(zoneId, uri.toString(), null).size() > 0) {
throw new InvalidParameterValueException("Network with vlan " + vlanId + " already exists or overlaps with other network vlans in zone " + zoneId);
throw new InvalidParameterValueException(String.format(
"Network with vlan %s already exists or overlaps with other network vlans in zone %s",
vlanId, zone));
} else if (secondaryUri != null && _networksDao.listByZoneAndUriAndGuestType(zoneId, secondaryUri.toString(), null).size() > 0) {
throw new InvalidParameterValueException("Network with vlan " + isolatedPvlan + " already exists or overlaps with other network vlans in zone " + zoneId);
throw new InvalidParameterValueException(String.format(
"Network with vlan %s already exists or overlaps with other network vlans in zone %s",
isolatedPvlan, zone));
} else {
final List<DataCenterVnetVO> dcVnets = _datacenterVnetDao.findVnet(zoneId, BroadcastDomainType.getValue(uri));
//for the network that is created as part of private gateway,
@ -2878,7 +2884,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
// don't allow to creating shared network with given Vlan ID, if there already exists a isolated network or
// shared network with same Vlan ID in the zone
if (!bypassVlanOverlapCheck && _networksDao.listByZoneAndUriAndGuestType(zoneId, uri.toString(), GuestType.Isolated).size() > 0) {
throw new InvalidParameterValueException("There is an existing isolated/shared network that overlaps with vlan id:" + vlanId + " in zone " + zoneId);
throw new InvalidParameterValueException(String.format(
"There is an existing isolated/shared network that overlaps with vlan id:%s in zone %s", vlanId, zone));
}
}
}
@ -2893,7 +2900,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
if (isUpdateDnsSupported == null || !Boolean.valueOf(isUpdateDnsSupported)) {
if (networkDomain != null) {
// TBD: NetworkOfferingId and zoneId. Send uuids instead.
throw new InvalidParameterValueException("Domain name change is not supported by network offering id=" + networkOfferingId + " in zone id=" + zoneId);
throw new InvalidParameterValueException(String.format(
"Domain name change is not supported by network offering id=%d in zone %s",
networkOfferingId, zone));
}
} else {
if (networkDomain == null) {
@ -3028,8 +3037,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
if (_networksDao.listByPhysicalNetworkPvlan(physicalNetworkId, uri.toString()).size() > 0) {
throw new InvalidParameterValueException("Network with vlan " + vlanIdFinal +
" already exists or overlaps with other network pvlans in zone " + zoneId);
throw new InvalidParameterValueException(String.format(
"Network with vlan %s already exists or overlaps with other network pvlans in zone %s",
vlanIdFinal, zone));
}
userNetwork.setBroadcastUri(uri);
@ -3044,9 +3054,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
URI uri = NetUtils.generateUriForPvlan(vlanIdFinal, isolatedPvlan, isolatedPvlanType.toString());
if (_networksDao.listByPhysicalNetworkPvlan(physicalNetworkId, uri.toString(), isolatedPvlanType).size() > 0) {
throw new InvalidParameterValueException("Network with primary vlan " + vlanIdFinal +
" and secondary vlan " + isolatedPvlan + " type " + isolatedPvlanType +
" already exists or overlaps with other network pvlans in zone " + zoneId);
throw new InvalidParameterValueException(String.format(
"Network with primary vlan %s and secondary vlan %s type %s already exists or overlaps with other network pvlans in zone %s",
vlanIdFinal, isolatedPvlan, isolatedPvlanType, zone));
}
userNetwork.setBroadcastUri(uri);
userNetwork.setBroadcastDomainType(BroadcastDomainType.Pvlan);
@ -3189,7 +3199,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
boolean result = false;
if (success) {
logger.debug("Network id={} is shutdown successfully, cleaning up corresponding resources now.", networkId);
logger.debug("Network {} is shutdown successfully, cleaning up corresponding resources now.", networkFinal);
final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, networkFinal.getGuruName());
final NetworkProfile profile = convertNetworkToNetworkProfile(networkFinal.getId());
guru.shutdown(profile, _networkOfferingDao.findById(networkFinal.getNetworkOfferingId()));
@ -3250,14 +3260,14 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
}
if (cleanupNeeded) {
cleanupResult = shutdownNetworkResources(network.getId(), context.getAccount(), context.getCaller().getId());
cleanupResult = shutdownNetworkResources(network, context.getAccount(), context.getCaller().getId());
}
} catch (final Exception ex) {
logger.warn("shutdownNetworkRules failed during the network {} shutdown due to", network, ex);
} finally {
// just warn the administrator that the network elements failed to shutdown
if (!cleanupResult) {
logger.warn("Failed to cleanup network id={} resources as a part of shutdownNetwork", network.getId());
logger.warn("Failed to cleanup network {} resources as a part of shutdownNetwork", network);
}
}
@ -3299,15 +3309,15 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
CleanupPersistentNetworkResourceCommand cmd = new CleanupPersistentNetworkResourceCommand(to);
CleanupPersistentNetworkResourceAnswer answer = (CleanupPersistentNetworkResourceAnswer) _agentMgr.send(host.getId(), cmd);
if (answer == null) {
logger.warn("Unable to get an answer to the CleanupPersistentNetworkResourceCommand from agent: {}", host.getId());
logger.warn("Unable to get an answer to the CleanupPersistentNetworkResourceCommand from agent: {}", host);
continue;
}
if (!answer.getResult()) {
logger.warn("Unable to setup agent {} due to {}", host.getId(), answer.getDetails());
logger.warn("Unable to setup agent {} due to {}", host, answer.getDetails());
}
} catch (Exception e) {
logger.warn("Failed to cleanup network resources on host: {}", host.getName());
logger.warn("Failed to cleanup network resources on host: {}", host);
}
}
}
@ -3337,7 +3347,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
// Don't allow to delete network via api call when it has vms assigned to it
final int nicCount = getActiveNicsInNetwork(networkId);
if (nicCount > 0) {
logger.debug("The network id={} has active Nics, but shouldn't.", networkId);
logger.debug("The network {} has active Nics, but shouldn't.", network);
// at this point we have already determined that there are no active user vms in network
// if the op_networks table shows active nics, it's a bug in releasing nics updating op_networks
_networksDao.changeActiveNicsBy(networkId, -1 * nicCount);
@ -3367,7 +3377,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
boolean success = true;
if (!cleanupNetworkResources(networkId, callerAccount, context.getCaller().getId())) {
logger.warn("Unable to delete network id={}: failed to cleanup network resources", networkId);
logger.warn("Unable to delete network {}: failed to cleanup network resources", network);
return false;
}
@ -3396,7 +3406,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
if (success) {
logger.debug("Network id={} is destroyed successfully, cleaning up corresponding resources now.", networkId);
logger.debug("Network {} is destroyed successfully, cleaning up corresponding resources now.", network);
final NetworkVO networkFinal = network;
try {
@ -3495,7 +3505,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
for (final VlanVO vlan : publicVlans) {
VlanVO vlanRange = _configMgr.deleteVlanAndPublicIpRange(userId, vlan.getId(), callerAccount);
if (vlanRange == null) {
logger.warn("Failed to delete vlan " + vlan.getId() + ");");
logger.warn("Failed to delete vlan [id: {}, uuid: {}];", vlan.getId(), vlan.getUuid());
result = false;
} else {
deletedPublicVlanRange.add(vlanRange);
@ -3505,16 +3515,16 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
//cleanup private vlans
final int privateIpAllocCount = _privateIpDao.countAllocatedByNetworkId(networkId);
if (privateIpAllocCount > 0) {
logger.warn("Can't delete Private ip range for network {} as it has allocated ip addresses", networkId);
logger.warn("Can't delete Private ip range for network {} as it has allocated ip addresses", network);
result = false;
} else {
_privateIpDao.deleteByNetworkId(networkId);
logger.debug("Deleted ip range for private network id={}", networkId);
logger.debug("Deleted ip range for private network {}", network);
}
// release vlans of user-shared networks without specifyvlan
if (isSharedNetworkWithoutSpecifyVlan(_networkOfferingDao.findById(network.getNetworkOfferingId()))) {
logger.debug("Releasing vnet for the network id={}", network.getId());
logger.debug("Releasing vnet for the network {}", network);
_dcDao.releaseVnet(BroadcastDomainType.getValue(network.getBroadcastUri()), network.getDataCenterId(),
network.getPhysicalNetworkId(), network.getAccountId(), network.getReservationId());
}
@ -3560,10 +3570,10 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
final Long time = _lastNetworkIdsToFree.remove(networkId);
if (time == null) {
logger.debug("We found network {} to be free for the first time. Adding it to the list: {}", networkId, currentTime);
logger.debug("We found network {} to be free for the first time. Adding it to the list: {}", () -> _networksDao.findById(networkId), () -> currentTime);
stillFree.put(networkId, currentTime);
} else if (time > currentTime - netGcWait) {
logger.debug("Network {} is still free but it's not time to shutdown yet: {}",networkId, time);
logger.debug("Network {} is still free but it's not time to shutdown yet: {}", () -> _networksDao.findById(networkId), time::toString);
stillFree.put(networkId, time);
} else {
shutdownList.add(networkId);
@ -3590,7 +3600,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
shutdownNetwork(networkId, context, false);
} catch (final Exception e) {
logger.warn("Unable to shutdown network: {}", networkId);
logger.warn("Unable to shutdown network: {}", () -> _networksDao.findById(networkId));
}
}
}
@ -3630,7 +3640,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
boolean restartRequired = false;
final NetworkVO network = _networksDao.findById(networkId);
logger.debug("Restarting network {}...", networkId);
logger.debug("Restarting network {}...", network);
final ReservationContext context = new ReservationContextImpl(null, null, callerUser, callerAccount);
final NetworkOffering offering = _networkOfferingDao.findByIdIncludingRemoved(network.getNetworkOfferingId());
@ -3985,51 +3995,51 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
//remove all PF/Static Nat rules for the network
try {
if (_rulesMgr.revokeAllPFStaticNatRulesForNetwork(networkId, callerUserId, caller)) {
logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id={}", networkId);
logger.debug("Successfully cleaned up portForwarding/staticNat rules for network {}", network);
} else {
success = false;
logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup", networkId);
logger.warn("Failed to release portForwarding/StaticNat rules as a part of network {} cleanup", network);
}
} catch (final ResourceUnavailableException ex) {
success = false;
// shouldn't even come here as network is being cleaned up after all network elements are shutdown
logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex);
logger.warn("Failed to release portForwarding/StaticNat rules as a part of network {} cleanup due to resourceUnavailable", network, ex);
}
//remove all LB rules for the network
if (_lbMgr.removeAllLoadBalanacersForNetwork(networkId, caller, callerUserId)) {
logger.debug("Successfully cleaned up load balancing rules for network id={}", networkId);
logger.debug("Successfully cleaned up load balancing rules for network {}", network);
} else {
// shouldn't even come here as network is being cleaned up after all network elements are shutdown
success = false;
logger.warn("Failed to cleanup LB rules as a part of network id={} cleanup", networkId);
logger.warn("Failed to cleanup LB rules as a part of network {} cleanup", network);
}
//revoke all firewall rules for the network
try {
if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, callerUserId, caller)) {
logger.debug("Successfully cleaned up firewallRules rules for network id={}", networkId);
if (_firewallMgr.revokeAllFirewallRulesForNetwork(network, callerUserId, caller)) {
logger.debug("Successfully cleaned up firewallRules rules for network {}", network);
} else {
success = false;
logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup", networkId);
logger.warn("Failed to cleanup Firewall rules as a part of network {} cleanup", network);
}
} catch (final ResourceUnavailableException ex) {
success = false;
// shouldn't even come here as network is being cleaned up after all network elements are shutdown
logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex);
logger.warn("Failed to cleanup Firewall rules as a part of network {} cleanup due to resourceUnavailable", network, ex);
}
//revoke all network ACLs for network
try {
if (_networkACLMgr.revokeACLItemsForNetwork(networkId)) {
logger.debug("Successfully cleaned up NetworkACLs for network id={}", networkId);
logger.debug("Successfully cleaned up NetworkACLs for network {}", network);
} else {
success = false;
logger.warn("Failed to cleanup NetworkACLs as a part of network id={} cleanup", networkId);
logger.warn("Failed to cleanup NetworkACLs as a part of network {} cleanup", network);
}
} catch (final ResourceUnavailableException ex) {
success = false;
logger.warn("Failed to cleanup Network ACLs as a part of network id={} cleanup due to resourceUnavailable ", networkId, ex);
logger.warn("Failed to cleanup Network ACLs as a part of network {} cleanup due to resourceUnavailable ", network, ex);
}
//release all ip addresses
@ -4047,7 +4057,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
logger.debug("Portable IP address {} is no longer associated with any network", ipToRelease);
}
} else {
_vpcMgr.unassignIPFromVpcNetwork(ipToRelease.getId(), network.getId());
_vpcMgr.unassignIPFromVpcNetwork(ipToRelease, network);
}
}
@ -4065,14 +4075,13 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
return success;
}
private boolean shutdownNetworkResources(final long networkId, final Account caller, final long callerUserId) {
private boolean shutdownNetworkResources(final Network network, final Account caller, final long callerUserId) {
// This method cleans up network rules on the backend w/o touching them in the DB
boolean success = true;
final Network network = _networksDao.findById(networkId);
// Mark all PF rules as revoked and apply them on the backend (not in the DB)
final List<PortForwardingRuleVO> pfRules = _portForwardingRulesDao.listByNetwork(networkId);
logger.debug("Releasing {} port forwarding rules for network id={} as a part of shutdownNetworkRules.", pfRules.size(), networkId);
final List<PortForwardingRuleVO> pfRules = _portForwardingRulesDao.listByNetwork(network.getId());
logger.debug("Releasing {} port forwarding rules for network id={} as a part of shutdownNetworkRules.", pfRules.size(), network);
for (final PortForwardingRuleVO pfRule : pfRules) {
logger.trace("Marking pf rule {} with Revoke state", pfRule);
@ -4090,9 +4099,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
// Mark all static rules as revoked and apply them on the backend (not in the DB)
final List<FirewallRuleVO> firewallStaticNatRules = _firewallDao.listByNetworkAndPurpose(networkId, Purpose.StaticNat);
final List<FirewallRuleVO> firewallStaticNatRules = _firewallDao.listByNetworkAndPurpose(network.getId(), Purpose.StaticNat);
final List<StaticNatRule> staticNatRules = new ArrayList<StaticNatRule>();
logger.debug("Releasing {} static nat rules for network id={} as a part of shutdownNetworkRules", firewallStaticNatRules.size(), networkId);
logger.debug("Releasing {} static nat rules for network {} as a part of shutdownNetworkRules", firewallStaticNatRules.size(), network);
for (final FirewallRuleVO firewallStaticNatRule : firewallStaticNatRules) {
logger.trace("Marking static nat rule {} with Revoke state", firewallStaticNatRule);
@ -4100,7 +4109,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
final FirewallRuleVO ruleVO = _firewallDao.findById(firewallStaticNatRule.getId());
if (ip == null || !ip.isOneToOneNat() || ip.getAssociatedWithVmId() == null) {
throw new InvalidParameterValueException("Source ip address of the rule id=" + firewallStaticNatRule.getId() + " is not static nat enabled");
throw new InvalidParameterValueException(String.format("Source ip address of the rule %s is not static nat enabled", firewallStaticNatRule));
}
//String dstIp = _networkModel.getIpInNetwork(ip.getAssociatedWithVmId(), firewallStaticNatRule.getNetworkId());
@ -4119,7 +4128,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
try {
if (!_lbMgr.revokeLoadBalancersForNetwork(networkId, Scheme.Public)) {
if (!_lbMgr.revokeLoadBalancersForNetwork(network, Scheme.Public)) {
logger.warn("Failed to cleanup public lb rules as a part of shutdownNetworkRules");
success = false;
}
@ -4129,7 +4138,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
try {
if (!_lbMgr.revokeLoadBalancersForNetwork(networkId, Scheme.Internal)) {
if (!_lbMgr.revokeLoadBalancersForNetwork(network, Scheme.Internal)) {
logger.warn("Failed to cleanup internal lb rules as a part of shutdownNetworkRules");
success = false;
}
@ -4139,8 +4148,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
// revoke all firewall rules for the network w/o applying them on the DB
final List<FirewallRuleVO> firewallRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress);
logger.debug("Releasing firewall ingress rules for network id={} as a part of shutdownNetworkRules", firewallRules.size(), networkId);
final List<FirewallRuleVO> firewallRules = _firewallDao.listByNetworkPurposeTrafficType(network.getId(), Purpose.Firewall, FirewallRule.TrafficType.Ingress);
logger.debug("Releasing firewall ingress rules for network {} as a part of shutdownNetworkRules", firewallRules.size(), network);
for (final FirewallRuleVO firewallRule : firewallRules) {
logger.trace("Marking firewall ingress rule {} with Revoke state", firewallRule);
@ -4157,8 +4166,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
success = false;
}
final List<FirewallRuleVO> firewallEgressRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Egress);
logger.debug("Releasing {} firewall egress rules for network id={} as a part of shutdownNetworkRules", firewallEgressRules.size(), networkId);
final List<FirewallRuleVO> firewallEgressRules = _firewallDao.listByNetworkPurposeTrafficType(network.getId(), Purpose.Firewall, FirewallRule.TrafficType.Egress);
logger.debug("Releasing {} firewall egress rules for network {} as a part of shutdownNetworkRules", firewallEgressRules.size(), network);
try {
// delete default egress rule
@ -4166,7 +4175,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
if (_networkModel.areServicesSupportedInNetwork(network.getId(), Service.Firewall)
&& (network.getGuestType() == Network.GuestType.Isolated || network.getGuestType() == Network.GuestType.Shared && zone.getNetworkType() == NetworkType.Advanced)) {
// add default egress rule to accept the traffic
_firewallMgr.applyDefaultEgressFirewallRule(network.getId(), _networkModel.getNetworkEgressDefaultPolicy(networkId), false);
_firewallMgr.applyDefaultEgressFirewallRule(network.getId(), _networkModel.getNetworkEgressDefaultPolicy(network.getId()), false);
}
} catch (final ResourceUnavailableException ex) {
@ -4190,11 +4199,11 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
if (network.getVpcId() != null) {
logger.debug("Releasing Network ACL Items for network id={} as a part of shutdownNetworkRules", networkId);
logger.debug("Releasing Network ACL Items for network {} as a part of shutdownNetworkRules", network);
try {
//revoke all Network ACLs for the network w/o applying them in the DB
if (!_networkACLMgr.revokeACLItemsForNetwork(networkId)) {
if (!_networkACLMgr.revokeACLItemsForNetwork(network.getId())) {
logger.warn("Failed to cleanup network ACLs as a part of shutdownNetworkRules");
success = false;
}
@ -4206,13 +4215,13 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
//release all static nats for the network
if (!_rulesMgr.applyStaticNatForNetwork(networkId, false, caller, true)) {
logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network id {}", networkId);
if (!_rulesMgr.applyStaticNatForNetwork(network, false, caller, true)) {
logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network {}", network);
success = false;
}
// Get all ip addresses, mark as releasing and release them on the backend
final List<IPAddressVO> userIps = _ipAddressDao.listByAssociatedNetwork(networkId, null);
final List<IPAddressVO> userIps = _ipAddressDao.listByAssociatedNetwork(network.getId(), null);
final List<PublicIp> publicIpsToRelease = new ArrayList<PublicIp>();
if (userIps != null && !userIps.isEmpty()) {
for (final IPAddressVO userIp : userIps) {
@ -4310,12 +4319,12 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
final CheckNetworkAnswer answer = (CheckNetworkAnswer) _agentMgr.easySend(hostId, nwCmd);
if (answer == null) {
logger.warn("Unable to get an answer to the CheckNetworkCommand from agent: {}", host.getId());
throw new ConnectionException(true, "Unable to get an answer to the CheckNetworkCommand from agent: " + host.getId());
logger.warn("Unable to get an answer to the CheckNetworkCommand from agent: {}", host);
throw new ConnectionException(true, String.format("Unable to get an answer to the CheckNetworkCommand from agent: %s", host));
}
if (!answer.getResult()) {
logger.warn("Unable to setup agent {} due to {}", hostId, answer.getDetails());
logger.warn("Unable to setup agent {} due to {}", host, answer.getDetails());
final String msg = "Incorrect Network setup on agent, Reinitialize agent after network names are setup, details : " + answer.getDetails();
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, host.getPodId(), msg, msg);
throw new ConnectionException(true, msg);
@ -4471,8 +4480,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
if (prepare) {
final Pair<NetworkGuru, NetworkVO> implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter);
if (implemented == null || implemented.first() == null) {
logger.warn("Failed to implement network id={} as a part of preparing nic id={}", nic.getNetworkId(), nic.getId());
throw new CloudRuntimeException("Failed to implement network id=" + nic.getNetworkId() + " as a part preparing nic id=" + nic.getId());
logger.warn("Failed to implement network {} as a part of preparing nic {}", network, nic);
throw new CloudRuntimeException(String.format("Failed to implement network %s as a part preparing nic %s", network, nic));
}
nic = prepareNic(vmProfile, dest, context, nic.getId(), implemented.second());
logger.debug("Nic is prepared successfully for vm {} in network {}", vm, network);
@ -4588,18 +4597,18 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
final List<Provider> providers = getProvidersForServiceInNetwork(network, service);
//Only support one provider now
if (providers == null) {
logger.error("Cannot find {} provider for network {}", service.getName(), network.getId());
logger.error("Cannot find {} provider for network {}", service.getName(), network);
return null;
}
if (providers.size() != 1 && service != Service.Lb) {
//support more than one LB providers only
logger.error("Found {} {} providers for network! {}", providers.size(), service.getName(), network.getId());
logger.error("Found {} {} providers for network! {}", providers.size(), service.getName(), network);
return null;
}
for (final Provider provider : providers) {
final NetworkElement element = _networkModel.getElementImplementingProvider(provider.getName());
logger.info("Let {} handle {} in network {}", element.getName(), service.getName(), network.getId());
logger.info("Let {} handle {} in network {}", element.getName(), service.getName(), network);
elements.add(element);
}
return elements;
@ -4693,7 +4702,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
@Override
public Pair<NicProfile, Integer> importNic(final String macAddress, int deviceId, final Network network, final Boolean isDefaultNic, final VirtualMachine vm, final Network.IpAddresses ipAddresses, final DataCenter dataCenter, final boolean forced)
throws ConcurrentOperationException, InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException {
logger.debug("Allocating nic for vm {} in network {} during import", vm.getUuid(), network);
logger.debug("Allocating nic for vm {} in network {} during import", vm, network);
String selectedIp = null;
if (ipAddresses != null && StringUtils.isNotEmpty(ipAddresses.getIp4Address())) {
if (ipAddresses.getIp4Address().equals("auto")) {
@ -4743,7 +4752,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
int count = 1;
if (vo.getVmType() == VirtualMachine.Type.User) {
logger.debug("Changing active number of nics for network id={} on {}", network.getUuid(), count);
logger.debug("Changing active number of nics for network {} on {}", network, count);
_networksDao.changeActiveNicsBy(network.getId(), count);
}
if (vo.getVmType() == VirtualMachine.Type.User
@ -4807,16 +4816,16 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
private String generateNewMacAddressIfForced(Network network, String macAddress, boolean forced) {
if (!forced) {
throw new CloudRuntimeException("NIC with MAC address " + macAddress + " exists on network with ID " + network.getUuid() +
throw new CloudRuntimeException("NIC with MAC address " + macAddress + " exists on network " + network +
" and forced flag is disabled");
}
try {
logger.debug("Generating a new mac address on network {} as the mac address {} already exists", network.getName(), macAddress);
logger.debug("Generating a new mac address on network {} as the mac address {} already exists", network, macAddress);
String newMacAddress = _networkModel.getNextAvailableMacAddressInNetwork(network.getId());
logger.debug("Successfully generated the mac address {}, using it instead of the conflicting address {}", newMacAddress, macAddress);
return newMacAddress;
} catch (InsufficientAddressCapacityException e) {
String msg = String.format("Could not generate a new mac address on network %s", network.getName());
String msg = String.format("Could not generate a new mac address on network %s", network);
logger.error(msg);
throw new CloudRuntimeException(msg);
}
@ -4824,7 +4833,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
@Override
public void unmanageNics(VirtualMachineProfile vm) {
logger.debug("Unmanaging NICs for VM: {}", vm.getId());
logger.debug("Unmanaging NICs for VM: {}", vm);
VirtualMachine virtualMachine = vm.getVirtualMachine();
final List<NicVO> nics = _nicDao.listByVmId(vm.getId());

View File

@ -151,7 +151,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
files = migrationHelper.getSortedValidSourcesList(srcDatastore, snapshotChains, childTemplates);
if (files.isEmpty()) {
return new MigrationResponse(String.format("No files in Image store: %s to migrate", srcDatastore.getId()), migrationPolicy.toString(), true);
return new MigrationResponse(String.format("No files in Image store: %s to migrate", srcDatastore), migrationPolicy.toString(), true);
}
Map<Long, Pair<Long, Long>> storageCapacities = new Hashtable<>();
for (Long storeId : destDatastores) {
@ -159,7 +159,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
}
storageCapacities.put(srcDataStoreId, new Pair<>(null, null));
if (migrationPolicy == MigrationPolicy.COMPLETE) {
logger.debug("Setting source image store: {} to read-only", srcDatastore.getId());
logger.debug("Setting source image store: {} to read-only", srcDatastore);
storageService.updateImageStoreStatus(srcDataStoreId, true);
}
@ -309,8 +309,9 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
message += "Image stores have been attempted to be balanced";
success = true;
} else {
message = "Files not completely migrated from "+ srcDatastore.getId() + ". Datastore (source): " + srcDatastore.getId() + "has equal or more free space than destination."+
" If you want to continue using the Image Store, please change the read-only status using 'update imagestore' command";
message = String.format("Files not completely migrated from %s. Source datastore " +
"has equal or more free space than destination. If you want to continue using the Image Store, " +
"please change the read-only status using 'update imagestore' command", srcDatastore);
success = false;
}
} else {
@ -353,7 +354,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
task.setTemplateChain(templateChains);
}
futures.add((executor.submit(task)));
logger.debug(String.format("Migration of {}: {} is initiated.", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid()));
logger.debug("Migration of {}: {} is initiated.", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid());
return storageCapacities;
}

View File

@ -885,7 +885,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
Account owner, long deviceId, String configurationId) {
assert (template.getFormat() != ImageFormat.ISO) : "ISO is not a template.";
Long size = _tmpltMgr.getTemplateSize(template.getId(), vm.getDataCenterId());
Long size = _tmpltMgr.getTemplateSize(template, vm.getDataCenterId());
if (rootDisksize != null) {
if (template.isDeployAsIs()) {
// Volume size specified from template deploy-as-is
@ -994,7 +994,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
if (configurationDetail != null) {
configurationId = configurationDetail.getValue();
}
templateAsIsDisks = _tmpltMgr.getTemplateDisksOnImageStore(template.getId(), DataStoreRole.Image, configurationId);
templateAsIsDisks = _tmpltMgr.getTemplateDisksOnImageStore(template, DataStoreRole.Image, configurationId);
if (CollectionUtils.isNotEmpty(templateAsIsDisks)) {
templateAsIsDisks = templateAsIsDisks.stream()
.filter(x -> !x.isIso())

View File

@ -47,7 +47,7 @@ public class AgentManagerImplTest {
host = new HostVO("some-Uuid");
host.setDataCenterId(1L);
cmds = new StartupCommand[]{new StartupRoutingCommand()};
attache = new ConnectedAgentAttache(null, 1L, "kvm-attache", null, false);
attache = new ConnectedAgentAttache(null, 1L, "uuid", "kvm-attache", null, false);
hostDao = Mockito.mock(HostDao.class);
storagePoolMonitor = Mockito.mock(Listener.class);

View File

@ -31,8 +31,8 @@ public class ConnectedAgentAttacheTest {
Link link = mock(Link.class);
ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, null, link, false);
ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 0, null, link, false);
ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, "uuid", null, link, false);
ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 0, "uuid", null, link, false);
assertTrue(agentAttache1.equals(agentAttache2));
}
@ -42,7 +42,7 @@ public class ConnectedAgentAttacheTest {
Link link = mock(Link.class);
ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, null, link, false);
ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, "uuid", null, link, false);
assertFalse(agentAttache1.equals(null));
}
@ -53,8 +53,8 @@ public class ConnectedAgentAttacheTest {
Link link1 = mock(Link.class);
Link link2 = mock(Link.class);
ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, null, link1, false);
ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 0, null, link2, false);
ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, "uuid", null, link1, false);
ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 0, "uuid", null, link2, false);
assertFalse(agentAttache1.equals(agentAttache2));
}
@ -64,8 +64,8 @@ public class ConnectedAgentAttacheTest {
Link link1 = mock(Link.class);
ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 1, null, link1, false);
ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 2, null, link1, false);
ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 1, "uuid", null, link1, false);
ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 2, "uuid", null, link1, false);
assertFalse(agentAttache1.equals(agentAttache2));
}
@ -75,7 +75,7 @@ public class ConnectedAgentAttacheTest {
Link link1 = mock(Link.class);
ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 1, null, link1, false);
ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 1, "uuid", null, link1, false);
assertFalse(agentAttache1.equals("abc"));
}

View File

@ -26,6 +26,8 @@ import org.mockito.junit.MockitoJUnitRunner;
import com.cloud.resource.ServerResource;
import java.util.UUID;
@RunWith(MockitoJUnitRunner.class)
public class DirectAgentAttacheTest {
@Mock
@ -36,9 +38,11 @@ public class DirectAgentAttacheTest {
long _id = 0L;
String _uuid = UUID.randomUUID().toString();
@Before
public void setup() {
directAgentAttache = new DirectAgentAttache(_agentMgr, _id, "myDirectAgentAttache", _resource, false);
directAgentAttache = new DirectAgentAttache(_agentMgr, _id, _uuid, "myDirectAgentAttache", _resource, false);
MockitoAnnotations.initMocks(directAgentAttache);
}

View File

@ -31,6 +31,7 @@ import javax.persistence.Table;
import com.cloud.org.Grouping;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@Table(name = "host_pod_ref")
@ -197,4 +198,11 @@ public class HostPodVO implements Pod {
public void setUuid(String uuid) {
this.uuid = uuid;
}
@Override
public String toString() {
return String.format("HostPod %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name"));
}
}

View File

@ -29,6 +29,7 @@ import javax.persistence.Id;
import javax.persistence.Table;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@Table(name = "vlan")
@ -192,24 +193,11 @@ public class VlanVO implements Vlan {
@Override
public String toString() {
if (toString == null) {
toString =
new StringBuilder("Vlan[").append(vlanTag)
.append("|")
.append(vlanGateway)
.append("|")
.append(vlanNetmask)
.append("|")
.append(ip6Gateway)
.append("|")
.append(ip6Cidr)
.append("|")
.append(ipRange)
.append("|")
.append(ip6Range)
.append("|")
.append(networkId)
.append("]")
.toString();
toString = String.format("Vlan %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid",
"vlanTag", "vlanGateway", "vlanNetmask", "ip6Gateway", "ip6Cidr",
"ipRange", "ip6Range", "networkId"));
}
return toString;
}

View File

@ -28,6 +28,7 @@ import javax.persistence.Table;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.db.Encrypt;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
/**
* VmwareDatacenterVO contains information of Vmware Datacenter associated with a CloudStack zone.
@ -125,7 +126,9 @@ public class VmwareDatacenterVO implements VmwareDatacenter {
@Override
public String toString() {
return new StringBuilder("VmwareDatacenter[").append(guid).append("]").toString();
return String.format("VmwareDatacenter %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "guid"));
}
@Override

View File

@ -26,6 +26,7 @@ import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
@ -206,7 +207,9 @@ public class DomainVO implements Domain {
@Override
public String toString() {
return new StringBuilder("Domain:").append(id).append(path).toString();
return String.format("Domain %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name", "path"));
}
@Override

View File

@ -712,7 +712,7 @@ public class HostVO implements Host {
@Override
public String toString() {
return String.format("Host %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "name", "uuid", "type"));
return String.format("Host %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "name", "type"));
}
public void setHypervisorType(HypervisorType hypervisorType) {

View File

@ -27,6 +27,7 @@ import javax.persistence.PrimaryKeyJoinColumn;
import javax.persistence.Table;
import com.cloud.network.rules.HealthCheckPolicy;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@Table(name = "load_balancer_healthcheck_policies")
@ -169,4 +170,11 @@ public class LBHealthCheckPolicyVO implements HealthCheckPolicy {
public boolean isDisplay() {
return display;
}
@Override
public String toString() {
return String.format("LBHealthCheckPolicy %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "pingPath"));
}
}

View File

@ -33,6 +33,7 @@ import javax.persistence.TemporalType;
import org.apache.cloudstack.api.InternalIdentity;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@Table(name = "autoscale_policies")
@ -92,7 +93,9 @@ public class AutoScalePolicyVO implements AutoScalePolicy, InternalIdentity {
@Override
public String toString() {
return new StringBuilder("AutoScalePolicy[").append("id-").append(id).append("]").toString();
return String.format("AutoScalePolicy %s.",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name"));
}
@Override

View File

@ -32,6 +32,7 @@ import javax.persistence.TemporalType;
import org.apache.cloudstack.api.Identity;
import org.apache.cloudstack.api.InternalIdentity;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import org.apache.commons.lang3.StringUtils;
import com.cloud.utils.db.GenericDao;
@ -126,11 +127,9 @@ public class AutoScaleVmGroupVO implements AutoScaleVmGroup, InternalIdentity, I
@Override
public String toString() {
return new StringBuilder("AutoScaleVmGroupVO[").append("id=").append(id)
.append("|name=").append(name)
.append("|loadBalancerId=").append(loadBalancerId)
.append("|profileId=").append(profileId)
.append("]").toString();
return String.format("AutoScaleVmGroup %s.",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name", "loadBalancerId", "profileId"));
}
@Override

View File

@ -37,6 +37,7 @@ import javax.persistence.Table;
import org.apache.cloudstack.api.Identity;
import org.apache.cloudstack.api.InternalIdentity;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang3.StringUtils;
@ -126,7 +127,9 @@ public class AutoScaleVmProfileVO implements AutoScaleVmProfile, Identity, Inter
@Override
public String toString() {
return new StringBuilder("AutoScaleVMProfileVO[").append("id").append(id).append("-").append("templateId").append("-").append(templateId).append("]").toString();
return String.format("AutoScaleVMProfile %s.",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "templateId"));
}
@Override

View File

@ -33,6 +33,7 @@ import org.apache.cloudstack.api.Identity;
import org.apache.cloudstack.api.InternalIdentity;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@Table(name = "conditions")
@ -91,7 +92,9 @@ public class ConditionVO implements Condition, Identity, InternalIdentity {
@Override
public String toString() {
return new StringBuilder("Condition[").append("id-").append(id).append("]").toString();
return String.format("Condition %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid"));
}
@Override

View File

@ -34,6 +34,7 @@ import org.apache.cloudstack.api.InternalIdentity;
import com.cloud.network.Network;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@Table(name = "counter")
@ -79,7 +80,9 @@ public class CounterVO implements Counter, Identity, InternalIdentity {
@Override
public String toString() {
return new StringBuilder("Counter[").append("id-").append(id).append("]").toString();
return String.format("Counter %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name"));
}
@Override

View File

@ -30,6 +30,7 @@ import javax.persistence.Table;
import org.apache.cloudstack.api.Identity;
import org.apache.cloudstack.api.InternalIdentity;
import org.apache.cloudstack.network.ExternalNetworkDeviceManager;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
/**
* ExternalLoadBalancerDeviceVO contains information on external load balancer devices (F5/Netscaler VPX,MPX,SDX) added into a deployment
@ -244,4 +245,11 @@ public class ExternalLoadBalancerDeviceVO implements InternalIdentity, Identity
public void setUuid(String uuid) {
this.uuid = uuid;
}
@Override
public String toString() {
return String.format("ExternalLoadBalancerDevice %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "providerName"));
}
}

View File

@ -33,6 +33,7 @@ import javax.persistence.TemporalType;
import com.cloud.network.IpAddress;
import com.cloud.utils.db.GenericDao;
import com.cloud.utils.net.Ip;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
/**
* A bean representing a public IP Address
@ -268,7 +269,9 @@ public class IPAddressVO implements IpAddress {
@Override
public String toString() {
return new StringBuilder("Ip[").append(address).append("-").append(dataCenterId).append("]").toString();
return String.format("IPAddress %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "dataCenterId", "address"));
}
@Override

View File

@ -33,6 +33,7 @@ import javax.persistence.Table;
import com.cloud.network.rules.StickinessPolicy;
import com.cloud.utils.Pair;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@Table(name = "load_balancer_stickiness_policies")
@ -162,4 +163,11 @@ public class LBStickinessPolicyVO implements StickinessPolicy {
public boolean isDisplay() {
return display;
}
@Override
public String toString() {
return String.format("LBStickinessPolicy %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name", "methodName"));
}
}

View File

@ -27,6 +27,7 @@ import javax.persistence.Table;
import com.cloud.network.rules.FirewallRuleVO;
import com.cloud.network.rules.LoadBalancer;
import com.cloud.utils.net.NetUtils;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
/**
* This VO represents Public Load Balancer
@ -136,4 +137,11 @@ public class LoadBalancerVO extends FirewallRuleVO implements LoadBalancer {
public String getCidrList() {
return cidrList;
}
@Override
public String toString() {
return String.format("LoadBalancer %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name", "purpose", "state"));
}
}

View File

@ -35,6 +35,7 @@ import org.apache.cloudstack.api.InternalIdentity;
import com.cloud.network.Network.Service;
import com.cloud.network.PhysicalNetworkServiceProvider;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@Table(name = "physical_network_service_providers")
@ -109,6 +110,13 @@ public class PhysicalNetworkServiceProviderVO implements PhysicalNetworkServiceP
this.uuid = UUID.randomUUID().toString();
}
@Override
public String toString() {
return String.format("PhysicalNetworkServiceProvider %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name", "providerName"));
}
@Override
public long getId() {
return id;

View File

@ -37,6 +37,7 @@ import com.cloud.network.PhysicalNetwork;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.Pair;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
/**
* NetworkConfigurationVO contains information about a specific physical network.
@ -248,4 +249,11 @@ public class PhysicalNetworkVO implements PhysicalNetwork {
public String getName() {
return name;
}
@Override
public String toString() {
return String.format("PhysicalNetwork %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name"));
}
}

View File

@ -18,6 +18,7 @@ package com.cloud.network.dao;
import com.cloud.network.RemoteAccessVpn;
import com.cloud.utils.db.Encrypt;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import javax.persistence.Column;
import javax.persistence.Entity;
@ -86,6 +87,11 @@ public class RemoteAccessVpnVO implements RemoteAccessVpn {
this.vpcId = vpcId;
}
@Override
public String toString() {
return String.format("RemoteAccessVpn %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid"));
}
@Override
public State getState() {
return state;

View File

@ -29,6 +29,7 @@ import javax.persistence.Table;
import com.cloud.network.Site2SiteCustomerGateway;
import com.cloud.utils.db.Encrypt;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@ -110,6 +111,13 @@ public class Site2SiteCustomerGatewayVO implements Site2SiteCustomerGateway {
this.ikeVersion = ikeVersion;
}
@Override
public String toString() {
return String.format("Site2SiteCustomerGateway %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name"));
}
@Override
public long getId() {
return id;

View File

@ -32,6 +32,7 @@ import org.apache.cloudstack.api.InternalIdentity;
import com.cloud.network.Site2SiteVpnConnection;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@ -182,4 +183,11 @@ public class Site2SiteVpnConnectionVO implements Site2SiteVpnConnection, Interna
public String getName() {
return null;
}
@Override
public String toString() {
return String.format("Site2SiteVpnConnection %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "state"));
}
}

View File

@ -28,6 +28,7 @@ import javax.persistence.Table;
import com.cloud.network.Site2SiteVpnGateway;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@ -70,6 +71,13 @@ public class Site2SiteVpnGatewayVO implements Site2SiteVpnGateway {
this.domainId = domainId;
}
@Override
public String toString() {
return String.format("Site2SiteVpnGateway %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name"));
}
@Override
public long getId() {
return id;

View File

@ -36,6 +36,7 @@ import javax.persistence.Transient;
import com.cloud.utils.db.GenericDao;
import com.cloud.utils.net.NetUtils;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@Table(name = "firewall_rules")
@ -258,7 +259,9 @@ public class FirewallRuleVO implements FirewallRule {
@Override
public String toString() {
return new StringBuilder("Rule[").append(id).append("-").append(purpose).append("-").append(state).append("]").toString();
return String.format("FirewallRule %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "networkId", "purpose", "state"));
}
@Override

View File

@ -16,6 +16,8 @@
// under the License.
package com.cloud.network.security;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import java.util.UUID;
import javax.persistence.Column;
@ -89,6 +91,13 @@ public class SecurityGroupRuleVO implements SecurityRule {
}
}
@Override
public String toString() {
return String.format("SecurityGroupRule %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "type"));
}
@Override
public long getId() {
return id;

View File

@ -16,6 +16,8 @@
// under the License.
package com.cloud.network.security;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import java.util.UUID;
import javax.persistence.Column;
@ -60,6 +62,13 @@ public class SecurityGroupVO implements SecurityGroup {
uuid = UUID.randomUUID().toString();
}
@Override
public String toString() {
return String.format("SecurityGroup %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name"));
}
@Override
public long getId() {
return id;

View File

@ -35,6 +35,7 @@ import javax.persistence.Transient;
import com.cloud.utils.db.GenericDao;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.net.NetUtils;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@Table(name = "network_acl_item")
@ -168,7 +169,9 @@ public class NetworkACLItemVO implements NetworkACLItem, Cloneable {
@Override
public String toString() {
return new StringBuilder("Rule[").append(id).append("-").append("NetworkACL").append("-").append(state).append("]").toString();
return String.format("NetworkACLItem %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "aclId", "state"));
}
@Override

View File

@ -89,7 +89,7 @@ public class NetworkACLVO implements NetworkACL {
@Override
public String toString() {
return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "uuid", "name", "vpcId");
return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "name", "vpcId");
}
public void setUuid(String uuid) {

View File

@ -29,6 +29,7 @@ import javax.persistence.Id;
import javax.persistence.Table;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@Table(name = "vpc_gateways")
@ -163,9 +164,9 @@ public class VpcGatewayVO implements VpcGateway {
@Override
public String toString() {
StringBuilder buf = new StringBuilder("VpcGateway[");
buf.append(id).append("|").append(ip4Address.toString()).append("|").append(vpcId).append("]");
return buf.toString();
return String.format("VpcGateway %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "vpcId", "ip4Address"));
}
@Override

View File

@ -30,6 +30,7 @@ import javax.persistence.Table;
import com.cloud.offering.NetworkOffering;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@Table(name = "vpc_offerings")
@ -180,8 +181,9 @@ public class VpcOfferingVO implements VpcOffering {
@Override
public String toString() {
StringBuilder buf = new StringBuilder("[VPC Offering [");
return buf.append(id).append("-").append(name).append("]").toString();
return String.format("VPCOffering %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name"));
}
public void setName(String name) {

View File

@ -28,6 +28,7 @@ import javax.persistence.Table;
import javax.persistence.Transient;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@Table(name = "vpc")
@ -210,8 +211,9 @@ public class VpcVO implements Vpc {
@Override
public String toString() {
final StringBuilder buf = new StringBuilder("[VPC [");
return buf.append(id).append("-").append(name).append("]").toString();
return String.format("VPC %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name"));
}
@Override

View File

@ -32,6 +32,7 @@ import com.cloud.network.Network;
import com.cloud.network.Networks.TrafficType;
import com.cloud.offering.NetworkOffering;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
@Entity
@Table(name = "network_offerings")
@ -471,8 +472,8 @@ public class NetworkOfferingVO implements NetworkOffering {
@Override
public String toString() {
StringBuilder buf = new StringBuilder("[Network Offering [");
return buf.append(id).append("-").append(trafficType).append("-").append(name).append("]").toString();
return String.format("NetworkOffering %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name", "trafficType"));
}
@Override

Some files were not shown because too many files have changed in this diff Show More