completed the new vmsync TODOs in the code.

removed old vmsync logic
This commit is contained in:
Anthony Xu 2014-07-28 12:45:14 -07:00
parent acc9c79370
commit 330c4ba578
36 changed files with 235 additions and 2395 deletions

View File

@ -32,7 +32,6 @@ import com.cloud.agent.api.Command;
import com.cloud.agent.api.PingCommand;
import com.cloud.agent.api.StartupCommand;
import com.cloud.agent.api.StartupRoutingCommand;
import com.cloud.agent.api.StartupRoutingCommand.VmState;
import com.cloud.agent.api.StartupStorageCommand;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.host.Host;
@ -147,13 +146,11 @@ public class DummyResource implements ServerResource {
@Override
public StartupCommand[] initialize() {
Map<String, VmState> changes = null;
final List<Object> info = getHostInfo();
final StartupRoutingCommand cmd =
new StartupRoutingCommand((Integer)info.get(0), (Long)info.get(1), (Long)info.get(2), (Long)info.get(4), (String)info.get(3), HypervisorType.KVM,
RouterPrivateIpStrategy.HostLocal, changes, null);
RouterPrivateIpStrategy.HostLocal);
fillNetworkInformation(cmd);
cmd.getHostDetails().putAll(getVersionStrings());
cmd.setCluster(getConfiguredProperty("cluster", "1"));

View File

@ -19,23 +19,23 @@
package com.cloud.agent.api;
import com.cloud.vm.VirtualMachine.State;
import com.cloud.vm.VirtualMachine.PowerState;
public class CheckVirtualMachineAnswer extends Answer {
Integer vncPort;
State state;
PowerState state;
protected CheckVirtualMachineAnswer() {
}
public CheckVirtualMachineAnswer(CheckVirtualMachineCommand cmd, State state, Integer vncPort, String detail) {
public CheckVirtualMachineAnswer(CheckVirtualMachineCommand cmd, PowerState state, Integer vncPort, String detail) {
super(cmd, true, detail);
this.state = state;
this.vncPort = vncPort;
}
public CheckVirtualMachineAnswer(CheckVirtualMachineCommand cmd, State state, Integer vncPort) {
public CheckVirtualMachineAnswer(CheckVirtualMachineCommand cmd, PowerState state, Integer vncPort) {
this(cmd, state, vncPort, null);
}
@ -47,7 +47,7 @@ public class CheckVirtualMachineAnswer extends Answer {
return vncPort;
}
public State getState() {
public PowerState getState() {
return state;
}
}

View File

@ -23,23 +23,10 @@ import java.util.List;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import com.cloud.vm.VirtualMachine;
public class CreateVMSnapshotCommand extends VMSnapshotBaseCommand {
public CreateVMSnapshotCommand(String vmName, VMSnapshotTO snapshot, List<VolumeObjectTO> volumeTOs, String guestOSType, VirtualMachine.State vmState) {
public CreateVMSnapshotCommand(String vmName, VMSnapshotTO snapshot, List<VolumeObjectTO> volumeTOs, String guestOSType) {
super(vmName, snapshot, volumeTOs, guestOSType);
this.vmState = vmState;
}
private VirtualMachine.State vmState;
public VirtualMachine.State getVmState() {
return vmState;
}
public void setVmState(VirtualMachine.State vmState) {
this.vmState = vmState;
}
}

View File

@ -22,14 +22,9 @@ package com.cloud.agent.api;
import java.util.Map;
import com.cloud.host.Host;
import com.cloud.vm.VirtualMachine.State;
public class PingRoutingCommand extends PingCommand {
// TODO vmsync {
Map<String, State> newStates;
// TODO vmsync }
Map<String, HostVmStateReportEntry> _hostVmStateReport;
boolean _gatewayAccessible = true;
@ -38,16 +33,11 @@ public class PingRoutingCommand extends PingCommand {
protected PingRoutingCommand() {
}
public PingRoutingCommand(Host.Type type, long id, Map<String, State> states, Map<String, HostVmStateReportEntry> hostVmStateReport) {
public PingRoutingCommand(Host.Type type, long id, Map<String, HostVmStateReportEntry> hostVmStateReport) {
super(type, id);
this.newStates = states;
this._hostVmStateReport = hostVmStateReport;
}
public Map<String, State> getNewStates() {
return newStates;
}
public Map<String, HostVmStateReportEntry> getHostVmStateReport() {
return this._hostVmStateReport;
}

View File

@ -24,7 +24,6 @@ import java.util.Map;
import com.cloud.host.Host;
import com.cloud.utils.Pair;
import com.cloud.vm.VirtualMachine.State;
public class PingRoutingWithNwGroupsCommand extends PingRoutingCommand {
HashMap<String, Pair<Long, Long>> newGroupStates;
@ -33,9 +32,9 @@ public class PingRoutingWithNwGroupsCommand extends PingRoutingCommand {
super();
}
public PingRoutingWithNwGroupsCommand(Host.Type type, long id, Map<String, State> states, Map<String, HostVmStateReportEntry> hostVmStateReport,
public PingRoutingWithNwGroupsCommand(Host.Type type, long id, Map<String, HostVmStateReportEntry> hostVmStateReport,
HashMap<String, Pair<Long, Long>> nwGrpStates) {
super(type, id, states, hostVmStateReport);
super(type, id, hostVmStateReport);
newGroupStates = nwGrpStates;
}

View File

@ -24,7 +24,6 @@ import java.util.Map;
import com.cloud.host.Host;
import com.cloud.utils.Pair;
import com.cloud.vm.VirtualMachine.State;
public class PingRoutingWithOvsCommand extends PingRoutingCommand {
List<Pair<String, Long>> states;
@ -33,9 +32,9 @@ public class PingRoutingWithOvsCommand extends PingRoutingCommand {
super();
}
public PingRoutingWithOvsCommand(Host.Type type, long id, Map<String, State> states, Map<String, HostVmStateReportEntry> hostVmStateReport,
public PingRoutingWithOvsCommand(Host.Type type, long id, Map<String, HostVmStateReportEntry> hostVmStateReport,
List<Pair<String, Long>> ovsStates) {
super(type, id, states, hostVmStateReport);
super(type, id, hostVmStateReport);
this.states = ovsStates;
}

View File

@ -28,7 +28,7 @@ import com.cloud.vm.VirtualMachine;
public class RevertToVMSnapshotAnswer extends Answer {
private List<VolumeObjectTO> volumeTOs;
private VirtualMachine.State vmState;
private VirtualMachine.PowerState vmState;
public RevertToVMSnapshotAnswer(RevertToVMSnapshotCommand cmd, boolean result, String message) {
super(cmd, result, message);
@ -38,13 +38,13 @@ public class RevertToVMSnapshotAnswer extends Answer {
super();
}
public RevertToVMSnapshotAnswer(RevertToVMSnapshotCommand cmd, List<VolumeObjectTO> volumeTOs, VirtualMachine.State vmState) {
public RevertToVMSnapshotAnswer(RevertToVMSnapshotCommand cmd, List<VolumeObjectTO> volumeTOs, VirtualMachine.PowerState vmState) {
super(cmd, true, "");
this.volumeTOs = volumeTOs;
this.vmState = vmState;
}
public VirtualMachine.State getVmState() {
public VirtualMachine.PowerState getVmState() {
return vmState;
}
@ -56,7 +56,7 @@ public class RevertToVMSnapshotAnswer extends Answer {
this.volumeTOs = volumeTOs;
}
public void setVmState(VirtualMachine.State vmState) {
public void setVmState(VirtualMachine.PowerState vmState) {
this.vmState = vmState;
}

View File

@ -25,31 +25,8 @@ import java.util.Map;
import com.cloud.host.Host;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.network.Networks.RouterPrivateIpStrategy;
import com.cloud.utils.Pair;
import com.cloud.vm.VirtualMachine.State;
public class StartupRoutingCommand extends StartupCommand {
public static class VmState {
State state;
String host;
public VmState() {
}
public VmState(State state, String host) {
this.state = state;
this.host = host;
}
public State getState() {
return state;
}
public String getHost() {
return host;
}
}
Integer cpuSockets;
int cpus;
long speed;
@ -57,16 +34,6 @@ public class StartupRoutingCommand extends StartupCommand {
long dom0MinMemory;
boolean poolSync;
// VM power state report is added in a side-by-side way as old VM state report
// this is to allow a graceful migration from the old VM state sync model to the new model
//
// side-by-side addition of power state sync
Map<String, HostVmStateReportEntry> _hostVmStateReport;
// TODO vmsync
// deprecated, will delete after full replacement
Map<String, VmState> vms;
HashMap<String, Pair<String, State>> _clusterVMStates;
String caps;
String pool;
@ -82,68 +49,30 @@ public class StartupRoutingCommand extends StartupCommand {
}
public StartupRoutingCommand(int cpus, long speed, long memory, long dom0MinMemory, String caps, HypervisorType hypervisorType,
RouterPrivateIpStrategy privIpStrategy, Map<String, VmState> vms, Map<String, HostVmStateReportEntry> hostVmStateReport) {
this(cpus, speed, memory, dom0MinMemory, caps, hypervisorType, vms, hostVmStateReport);
getHostDetails().put(RouterPrivateIpStrategy.class.getCanonicalName(), privIpStrategy.toString());
}
public StartupRoutingCommand(int cpus, long speed, long memory, long dom0MinMemory, String caps, HypervisorType hypervisorType, RouterPrivateIpStrategy privIpStrategy) {
this(cpus,
speed,
memory,
dom0MinMemory,
caps,
hypervisorType,
new HashMap<String, String>(),
new HashMap<String, VmState>(),
new HashMap<String, HostVmStateReportEntry>());
getHostDetails().put(RouterPrivateIpStrategy.class.getCanonicalName(), privIpStrategy.toString());
}
public StartupRoutingCommand(int cpus, long speed, long memory, long dom0MinMemory, final String caps, final HypervisorType hypervisorType,
final Map<String, String> hostDetails, Map<String, VmState> vms, Map<String, HostVmStateReportEntry> hostVmStateReport) {
final Map<String, String> hostDetails) {
super(Host.Type.Routing);
this.cpus = cpus;
this.speed = speed;
this.memory = memory;
this.dom0MinMemory = dom0MinMemory;
this.vms = vms;
this._hostVmStateReport = hostVmStateReport;
this.hypervisorType = hypervisorType;
this.hostDetails = hostDetails;
this.caps = caps;
this.poolSync = false;
}
public StartupRoutingCommand(int cpus2, long speed2, long memory2, long dom0MinMemory2, String caps2, HypervisorType hypervisorType2, Map<String, VmState> vms2,
Map<String, HostVmStateReportEntry> hostVmStateReport) {
this(cpus2, speed2, memory2, dom0MinMemory2, caps2, hypervisorType2, new HashMap<String, String>(), vms2, hostVmStateReport);
public StartupRoutingCommand(int cpus, long speed, long memory, long dom0MinMemory, String caps, HypervisorType hypervisorType,
RouterPrivateIpStrategy privIpStrategy) {
this(cpus, speed, memory, dom0MinMemory, caps, hypervisorType);
getHostDetails().put(RouterPrivateIpStrategy.class.getCanonicalName(), privIpStrategy.toString());
}
public StartupRoutingCommand(int cpus, long speed, long memory, long dom0MinMemory, final String caps, final HypervisorType hypervisorType,
final Map<String, String> hostDetails, Map<String, VmState> vms, Map<String, HostVmStateReportEntry> vmPowerStates, String hypervisorVersion) {
this(cpus, speed, memory, dom0MinMemory, caps, hypervisorType, hostDetails, vms, vmPowerStates);
this.hypervisorVersion = hypervisorVersion;
public StartupRoutingCommand(int cpus2, long speed2, long memory2, long dom0MinMemory2, String caps2, HypervisorType hypervisorType2) {
this(cpus2, speed2, memory2, dom0MinMemory2, caps2, hypervisorType2, new HashMap<String, String>());
}
public void setChanges(Map<String, VmState> vms) {
this.vms = vms;
}
public void setStateChanges(Map<String, State> vms) {
for (String vm_name : vms.keySet()) {
if (this.vms == null) {
this.vms = new HashMap<String, VmState>();
}
this.vms.put(vm_name, new VmState(vms.get(vm_name), null));
}
}
public void setClusterVMStateChanges(HashMap<String, Pair<String, State>> allStates) {
_clusterVMStates = allStates;
}
public Integer getCpuSockets() {
return cpuSockets;
@ -169,14 +98,6 @@ public class StartupRoutingCommand extends StartupCommand {
return dom0MinMemory;
}
public Map<String, VmState> getVmStates() {
return vms;
}
public HashMap<String, Pair<String, State>> getClusterVMStateChanges() {
return _clusterVMStates;
}
public void setSpeed(long speed) {
this.speed = speed;
}
@ -241,14 +162,6 @@ public class StartupRoutingCommand extends StartupCommand {
this.hypervisorVersion = hypervisorVersion;
}
public Map<String, HostVmStateReportEntry> getHostVmStateReport() {
return this._hostVmStateReport;
}
public void setHostVmStateReport(Map<String, HostVmStateReportEntry> hostVmStateReport) {
this._hostVmStateReport = hostVmStateReport;
}
public HashMap<String, HashMap<String, VgpuTypesInfo>> getGpuGroupDetails() {
return groupDetails;
}

View File

@ -1184,13 +1184,9 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
if (s_logger.isDebugEnabled()) {
if (cmd instanceof PingRoutingCommand) {
final PingRoutingCommand ping = (PingRoutingCommand)cmd;
if (ping.getNewStates().size() > 0) {
s_logger.debug("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request);
} else {
logD = false;
s_logger.debug("Ping from " + hostId);
s_logger.trace("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request);
}
logD = false;
s_logger.debug("Ping from " + hostId);
s_logger.trace("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request);
} else if (cmd instanceof PingCommand) {
logD = false;
s_logger.debug("Ping from " + hostId);

View File

@ -22,15 +22,12 @@ import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TimeZone;
import java.util.UUID;
import java.util.concurrent.Executors;
@ -96,7 +93,6 @@ import com.cloud.agent.api.StartAnswer;
import com.cloud.agent.api.StartCommand;
import com.cloud.agent.api.StartupCommand;
import com.cloud.agent.api.StartupRoutingCommand;
import com.cloud.agent.api.StartupRoutingCommand.VmState;
import com.cloud.agent.api.StopAnswer;
import com.cloud.agent.api.StopCommand;
import com.cloud.agent.api.UnPlugNicAnswer;
@ -206,9 +202,7 @@ import com.cloud.vm.dao.NicDao;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.UserVmDetailsDao;
import com.cloud.vm.dao.VMInstanceDao;
import com.cloud.vm.snapshot.VMSnapshot;
import com.cloud.vm.snapshot.VMSnapshotManager;
import com.cloud.vm.snapshot.VMSnapshotVO;
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
@Local(value = VirtualMachineManager.class)
@ -350,9 +344,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
static final ConfigKey<Integer> ClusterVMMetaDataSyncInterval = new ConfigKey<Integer>("Advanced", Integer.class, "vmmetadata.sync.interval", "180", "Cluster VM metadata sync interval in seconds",
false);
static final ConfigKey<Boolean> VmJobEnabled = new ConfigKey<Boolean>("Advanced",
Boolean.class, "vm.job.enabled", "true",
"True to enable new VM sync model. false to use the old way", false);
static final ConfigKey<Long> VmJobCheckInterval = new ConfigKey<Long>("Advanced",
Long.class, "vm.job.check.interval", "3000",
"Interval in milliseconds to check if the job is complete", false);
@ -596,9 +587,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
_agentMgr.registerForHostEvents(this, true, true, true);
if (VmJobEnabled.value()) {
_messageBus.subscribe(VirtualMachineManager.Topics.VM_POWER_STATE, MessageDispatcher.getDispatcher(this));
}
_messageBus.subscribe(VirtualMachineManager.Topics.VM_POWER_STATE, MessageDispatcher.getDispatcher(this));
return true;
}
@ -766,13 +755,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException {
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if ( jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
}
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
try {
orchestrateStart(vmUuid, params, planToDeploy, planner);
} finally {
@ -1343,14 +1330,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
throws AgentUnavailableException, OperationTimedoutException, ConcurrentOperationException {
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
}
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
try {
orchestrateStop(vmUuid, cleanUpEvenIfUnableToStop);
} finally {
@ -1662,7 +1647,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
protected boolean checkVmOnHost(VirtualMachine vm, long hostId) throws AgentUnavailableException, OperationTimedoutException {
CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer)_agentMgr.send(hostId, new CheckVirtualMachineCommand(vm.getInstanceName()));
if (!answer.getResult() || answer.getState() == State.Stopped) {
if (!answer.getResult() || answer.getState() == PowerState.PowerOff) {
return false;
}
@ -1672,13 +1657,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
@Override
public void storageMigration(String vmUuid, StoragePool destPool) {
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
}
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
try {
orchestrateStorageMigration(vmUuid, destPool);
} finally {
@ -1767,13 +1750,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
throws ResourceUnavailableException, ConcurrentOperationException {
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
}
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
try {
orchestrateMigrate(vmUuid, srcHostId, dest);
} finally {
@ -2073,14 +2054,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
throws ResourceUnavailableException, ConcurrentOperationException {
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
}
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
try {
orchestrateMigrateWithStorage(vmUuid, srcHostId, destHostId, volumeToPool);
} finally {
@ -2257,14 +2236,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
@Override
public void migrateAway(String vmUuid, long srcHostId) throws InsufficientServerCapacityException {
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
}
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
try {
try {
orchestrateMigrateAway(vmUuid, srcHostId, null);
@ -2273,8 +2250,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
orchestrateMigrateAway(vmUuid, srcHostId, _haMgr.getHAPlanner());
}
} finally {
if (VmJobEnabled.value())
_workJobDao.expunge(placeHolder.getId());
_workJobDao.expunge(placeHolder.getId());
}
} else {
Outcome<VirtualMachine> outcome = migrateVmAwayThroughJobQueue(vmUuid, srcHostId);
@ -2421,13 +2397,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException {
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if ( jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
}
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
try {
orchestrateReboot(vmUuid, params);
} finally {
@ -2502,92 +2476,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
}
public Commands fullHostSync(final long hostId, StartupRoutingCommand startup) {
Commands commands = new Commands(Command.OnError.Continue);
Map<Long, AgentVmInfo> infos = convertToInfos(startup);
final List<? extends VMInstanceVO> vms = _vmDao.listByHostId(hostId);
s_logger.debug("Found " + vms.size() + " VMs for host " + hostId);
for (VMInstanceVO vm : vms) {
AgentVmInfo info = infos.remove(vm.getId());
// sync VM Snapshots related transient states
List<VMSnapshotVO> vmSnapshotsInTrasientStates =
_vmSnapshotDao.listByInstanceId(vm.getId(), VMSnapshot.State.Expunging, VMSnapshot.State.Reverting, VMSnapshot.State.Creating);
if (vmSnapshotsInTrasientStates.size() > 1) {
s_logger.info("Found vm " + vm.getInstanceName() + " with VM snapshots in transient states, needs to sync VM snapshot state");
if (!_vmSnapshotMgr.syncVMSnapshot(vm, hostId)) {
s_logger.warn("Failed to sync VM in a transient snapshot related state: " + vm.getInstanceName());
continue;
} else {
s_logger.info("Successfully sync VM with transient snapshot: " + vm.getInstanceName());
}
}
if (info == null) {
info = new AgentVmInfo(vm.getInstanceName(), vm, State.Stopped);
}
HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
Command command = compareState(hostId, vm, info, true, hvGuru.trackVmHostChange());
if (command != null) {
commands.addCommand(command);
}
}
for (final AgentVmInfo left : infos.values()) {
boolean found = false;
VMInstanceVO vm = _vmDao.findVMByInstanceName(left.name);
if (vm != null) {
found = true;
HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
if (hvGuru.trackVmHostChange()) {
Command command = compareState(hostId, vm, left, true, true);
if (command != null) {
commands.addCommand(command);
}
} else {
s_logger.warn("Stopping a VM, VM " + left.name + " migrate from Host " + vm.getHostId() + " to Host " + hostId);
commands.addCommand(cleanup(left.name));
}
}
if (!found) {
s_logger.warn("Stopping a VM that we have no record of <fullHostSync>: " + left.name);
commands.addCommand(cleanup(left.name));
}
}
return commands;
}
public Commands deltaHostSync(long hostId, Map<String, State> newStates) {
Map<Long, AgentVmInfo> states = convertDeltaToInfos(newStates);
Commands commands = new Commands(Command.OnError.Continue);
for (Map.Entry<Long, AgentVmInfo> entry : states.entrySet()) {
AgentVmInfo info = entry.getValue();
VMInstanceVO vm = info.vm;
Command command = null;
if (vm != null) {
HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
command = compareState(hostId, vm, info, false, hvGuru.trackVmHostChange());
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Cleaning up a VM that is no longer found: " + info.name);
}
command = cleanup(info.name);
}
if (command != null) {
commands.addCommand(command);
}
}
return commands;
}
// this is XenServer specific
public void syncVMMetaData(Map<String, String> vmMetadatum) {
@ -2628,376 +2516,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
}
}
public void fullSync(final long clusterId, Map<String, Pair<String, State>> newStates) {
if (newStates == null)
return;
Map<Long, AgentVmInfo> infos = convertToInfos(newStates);
Set<VMInstanceVO> set_vms = Collections.synchronizedSet(new HashSet<VMInstanceVO>());
set_vms.addAll(_vmDao.listByClusterId(clusterId));
set_vms.addAll(_vmDao.listLHByClusterId(clusterId));
for (VMInstanceVO vm : set_vms) {
AgentVmInfo info = infos.remove(vm.getId());
// sync VM Snapshots related transient states
List<VMSnapshotVO> vmSnapshotsInExpungingStates =
_vmSnapshotDao.listByInstanceId(vm.getId(), VMSnapshot.State.Expunging, VMSnapshot.State.Creating, VMSnapshot.State.Reverting);
if (vmSnapshotsInExpungingStates.size() > 0) {
s_logger.info("Found vm " + vm.getInstanceName() + " in state. " + vm.getState() + ", needs to sync VM snapshot state");
Long hostId = null;
Host host = null;
if (info != null && info.getHostUuid() != null) {
host = _hostDao.findByGuid(info.getHostUuid());
}
hostId = host == null ? (vm.getHostId() == null ? vm.getLastHostId() : vm.getHostId()) : host.getId();
if (!_vmSnapshotMgr.syncVMSnapshot(vm, hostId)) {
s_logger.warn("Failed to sync VM with transient snapshot: " + vm.getInstanceName());
continue;
} else {
s_logger.info("Successfully sync VM with transient snapshot: " + vm.getInstanceName());
}
}
if ((info == null && (vm.getState() == State.Running || vm.getState() == State.Starting)) ||
(info != null && (info.state == State.Running && vm.getState() == State.Starting))) {
s_logger.info("Found vm " + vm.getInstanceName() + " in inconsistent state. " + vm.getState() + " on CS while " + (info == null ? "Stopped" : "Running") +
" on agent");
info = new AgentVmInfo(vm.getInstanceName(), vm, State.Stopped);
// Bug 13850- grab outstanding work item if any for this VM state so that we mark it as DONE after we change VM state, else it will remain pending
ItWorkVO work = _workDao.findByOutstandingWork(vm.getId(), vm.getState());
if (work != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found an outstanding work item for this vm " + vm + " in state:" + vm.getState() + ", work id:" + work.getId());
}
}
vm.setState(State.Running); // set it as running and let HA take care of it
_vmDao.persist(vm);
if (work != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Updating outstanding work item to Done, id:" + work.getId());
}
work.setStep(Step.Done);
_workDao.update(work.getId(), work);
}
try {
Host host = _hostDao.findByGuid(info.getHostUuid());
long hostId = host == null ? (vm.getHostId() == null ? vm.getLastHostId() : vm.getHostId()) : host.getId();
HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
Command command = compareState(hostId, vm, info, true, hvGuru.trackVmHostChange());
if (command != null) {
Answer answer = _agentMgr.send(hostId, command);
if (!answer.getResult()) {
s_logger.warn("Failed to update state of the VM due to " + answer.getDetails());
}
}
} catch (Exception e) {
s_logger.warn("Unable to update state of the VM due to exception " + e.getMessage());
e.printStackTrace();
}
} else if (info != null &&
(vm.getState() == State.Stopped || vm.getState() == State.Stopping || vm.isRemoved() || vm.getState() == State.Destroyed || vm.getState() == State.Expunging)) {
Host host = _hostDao.findByGuid(info.getHostUuid());
if (host != null) {
s_logger.warn("Stopping a VM which is stopped/stopping/destroyed/expunging " + info.name);
if (vm.getState() == State.Stopped || vm.getState() == State.Stopping) {
vm.setState(State.Stopped); // set it as stop and clear it from host
vm.setHostId(null);
_vmDao.persist(vm);
}
try {
Answer answer = _agentMgr.send(host.getId(), cleanup(info.name));
if (!answer.getResult()) {
s_logger.warn("Unable to stop a VM due to " + answer.getDetails());
}
} catch (Exception e) {
s_logger.warn("Unable to stop a VM due to " + e.getMessage());
}
}
} else
// host id can change
if (info != null && vm.getState() == State.Running) {
// check for host id changes
Host host = _hostDao.findByGuid(info.getHostUuid());
if (host != null && (vm.getHostId() == null || host.getId() != vm.getHostId())) {
s_logger.info("Found vm " + vm.getInstanceName() + " with inconsistent host in db, new host is " + host.getId());
try {
stateTransitTo(vm, VirtualMachine.Event.AgentReportMigrated, host.getId());
} catch (NoTransitionException e) {
s_logger.warn(e.getMessage());
}
}
}
/* else if(info == null && vm.getState() == State.Stopping) { //Handling CS-13376
s_logger.warn("Marking the VM as Stopped as it was still stopping on the CS" +vm.getName());
vm.setState(State.Stopped); // Setting the VM as stopped on the DB and clearing it from the host
vm.setLastHostId(vm.getHostId());
vm.setHostId(null);
_vmDao.persist(vm);
}*/
}
for (final AgentVmInfo left : infos.values()) {
if (!VirtualMachineName.isValidVmName(left.name))
continue; // if the vm doesn't follow CS naming ignore it for stopping
try {
Host host = _hostDao.findByGuid(left.getHostUuid());
if (host != null) {
s_logger.warn("Stopping a VM which we do not have any record of " + left.name);
Answer answer = _agentMgr.send(host.getId(), cleanup(left.name));
if (!answer.getResult()) {
s_logger.warn("Unable to stop a VM due to " + answer.getDetails());
}
}
} catch (Exception e) {
s_logger.warn("Unable to stop a VM due to " + e.getMessage());
}
}
}
protected Map<Long, AgentVmInfo> convertToInfos(final Map<String, Pair<String, State>> newStates) {
final HashMap<Long, AgentVmInfo> map = new HashMap<Long, AgentVmInfo>();
if (newStates == null) {
return map;
}
boolean is_alien_vm = true;
long alien_vm_count = -1;
for (Map.Entry<String, Pair<String, State>> entry : newStates.entrySet()) {
is_alien_vm = true;
String name = entry.getKey();
VMInstanceVO vm = _vmDao.findVMByInstanceName(name);
if (vm != null) {
map.put(vm.getId(), new AgentVmInfo(entry.getKey(), vm, entry.getValue().second(), entry.getValue().first()));
is_alien_vm = false;
}
// alien VMs
if (is_alien_vm) {
map.put(alien_vm_count--, new AgentVmInfo(entry.getKey(), null, entry.getValue().second(), entry.getValue().first()));
s_logger.warn("Found an alien VM " + entry.getKey());
}
}
return map;
}
protected Map<Long, AgentVmInfo> convertToInfos(StartupRoutingCommand cmd) {
final Map<String, VmState> states = cmd.getVmStates();
final HashMap<Long, AgentVmInfo> map = new HashMap<Long, AgentVmInfo>();
if (states == null) {
return map;
}
for (Map.Entry<String, VmState> entry : states.entrySet()) {
String name = entry.getKey();
VMInstanceVO vm = _vmDao.findVMByInstanceName(name);
if (vm != null) {
map.put(vm.getId(), new AgentVmInfo(entry.getKey(), vm, entry.getValue().getState(), entry.getValue().getHost()));
}
}
return map;
}
protected Map<Long, AgentVmInfo> convertDeltaToInfos(final Map<String, State> states) {
final HashMap<Long, AgentVmInfo> map = new HashMap<Long, AgentVmInfo>();
if (states == null) {
return map;
}
for (Map.Entry<String, State> entry : states.entrySet()) {
String name = entry.getKey();
VMInstanceVO vm = _vmDao.findVMByInstanceName(name);
if (vm != null) {
map.put(vm.getId(), new AgentVmInfo(entry.getKey(), vm, entry.getValue()));
}
}
return map;
}
/**
* compareState does as its name suggests and compares the states between
* management server and agent. It returns whether something should be
* cleaned up
*
*/
protected Command compareState(long hostId, VMInstanceVO vm, final AgentVmInfo info, final boolean fullSync, boolean trackExternalChange) {
State agentState = info.state;
final State serverState = vm.getState();
final String serverName = vm.getInstanceName();
Command command = null;
s_logger.debug("VM " + serverName + ": cs state = " + serverState + " and realState = " + agentState);
if (s_logger.isDebugEnabled()) {
s_logger.debug("VM " + serverName + ": cs state = " + serverState + " and realState = " + agentState);
}
if (agentState == State.Error) {
agentState = State.Stopped;
AlertManager.AlertType alertType = AlertManager.AlertType.ALERT_TYPE_USERVM;
if (VirtualMachine.Type.DomainRouter.equals(vm.getType())) {
alertType = AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER;
} else if (VirtualMachine.Type.ConsoleProxy.equals(vm.getType())) {
alertType = AlertManager.AlertType.ALERT_TYPE_CONSOLE_PROXY;
} else if (VirtualMachine.Type.SecondaryStorageVm.equals(vm.getType())) {
alertType = AlertManager.AlertType.ALERT_TYPE_SSVM;
}
HostPodVO podVO = _podDao.findById(vm.getPodIdToDeployIn());
DataCenterVO dcVO = _dcDao.findById(vm.getDataCenterId());
HostVO hostVO = _hostDao.findById(vm.getHostId());
String hostDesc = "name: " + hostVO.getName() + " (id:" + hostVO.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName();
_alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "VM (name: " + vm.getInstanceName() + ", id: " + vm.getId() +
") stopped on host " + hostDesc + " due to storage failure", "Virtual Machine " + vm.getInstanceName() + " (id: " + vm.getId() + ") running on host [" +
vm.getHostId() + "] stopped due to storage failure.");
}
if (trackExternalChange) {
if (serverState == State.Starting) {
if (vm.getHostId() != null && vm.getHostId() != hostId) {
s_logger.info("CloudStack is starting VM on host " + vm.getHostId() + ", but status report comes from a different host " + hostId +
", skip status sync for vm: " + vm.getInstanceName());
return null;
}
}
if (vm.getHostId() == null || hostId != vm.getHostId()) {
try {
ItWorkVO workItem = _workDao.findByOutstandingWork(vm.getId(), State.Migrating);
if (workItem == null) {
stateTransitTo(vm, VirtualMachine.Event.AgentReportMigrated, hostId);
}
} catch (NoTransitionException e) {
}
}
}
// during VM migration time, don't sync state will agent status update
if (serverState == State.Migrating) {
s_logger.debug("Skipping vm in migrating state: " + vm);
return null;
}
if (trackExternalChange) {
if (serverState == State.Starting) {
if (vm.getHostId() != null && vm.getHostId() != hostId) {
s_logger.info("CloudStack is starting VM on host " + vm.getHostId() + ", but status report comes from a different host " + hostId +
", skip status sync for vm: " + vm.getInstanceName());
return null;
}
}
if (serverState == State.Running) {
try {
//
// we had a bug that sometimes VM may be at Running State
// but host_id is null, we will cover it here.
// means that when CloudStack DB lost of host information,
// we will heal it with the info reported from host
//
if (vm.getHostId() == null || hostId != vm.getHostId()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("detected host change when VM " + vm + " is at running state, VM could be live-migrated externally from host " +
vm.getHostId() + " to host " + hostId);
}
stateTransitTo(vm, VirtualMachine.Event.AgentReportMigrated, hostId);
}
} catch (NoTransitionException e) {
s_logger.warn(e.getMessage());
}
}
}
if (agentState == serverState) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Both states are " + agentState + " for " + vm);
}
assert (agentState == State.Stopped || agentState == State.Running) : "If the states we send up is changed, this must be changed.";
if (agentState == State.Running) {
try {
stateTransitTo(vm, VirtualMachine.Event.AgentReportRunning, hostId);
} catch (NoTransitionException e) {
s_logger.warn(e.getMessage());
}
// FIXME: What if someone comes in and sets it to stopping? Then
// what?
return null;
}
s_logger.debug("State matches but the agent said stopped so let's send a cleanup command anyways.");
return cleanup(vm);
}
if (agentState == State.Shutdowned) {
if (serverState == State.Running || serverState == State.Starting || serverState == State.Stopping) {
try {
advanceStop(vm.getUuid(), true);
} catch (AgentUnavailableException e) {
assert (false) : "How do we hit this with forced on?";
return null;
} catch (OperationTimedoutException e) {
assert (false) : "How do we hit this with forced on?";
return null;
} catch (ConcurrentOperationException e) {
assert (false) : "How do we hit this with forced on?";
return null;
}
} else {
s_logger.debug("Sending cleanup to a shutdowned vm: " + vm.getInstanceName());
command = cleanup(vm);
}
} else if (agentState == State.Stopped) {
// This state means the VM on the agent was detected previously
// and now is gone. This is slightly different than if the VM
// was never completed but we still send down a Stop Command
// to ensure there's cleanup.
if (serverState == State.Running) {
// Our records showed that it should be running so let's restart
// it.
_haMgr.scheduleRestart(vm, false);
} else if (serverState == State.Stopping) {
_haMgr.scheduleStop(vm, hostId, WorkType.ForceStop);
s_logger.debug("Scheduling a check stop for VM in stopping mode: " + vm);
} else if (serverState == State.Starting) {
s_logger.debug("Ignoring VM in starting mode: " + vm.getInstanceName());
_haMgr.scheduleRestart(vm, false);
}
command = cleanup(vm);
} else if (agentState == State.Running) {
if (serverState == State.Starting) {
if (fullSync) {
try {
ensureVmRunningContext(hostId, vm, Event.AgentReportRunning);
} catch (OperationTimedoutException e) {
s_logger.error("Exception during update for running vm: " + vm, e);
return null;
} catch (ResourceUnavailableException e) {
s_logger.error("Exception during update for running vm: " + vm, e);
return null;
} catch (InsufficientAddressCapacityException e) {
s_logger.error("Exception during update for running vm: " + vm, e);
return null;
} catch (NoTransitionException e) {
s_logger.warn(e.getMessage());
}
}
} else if (serverState == State.Stopped) {
s_logger.debug("Scheduling a stop command for " + vm);
_haMgr.scheduleStop(vm, hostId, WorkType.Stop);
} else {
s_logger.debug("server VM state " + serverState + " does not meet expectation of a running VM report from agent");
// just be careful not to stop VM for things we don't handle
// command = cleanup(vm);
}
}
return command;
}
private void ensureVmRunningContext(long hostId, VMInstanceVO vm, Event cause) throws OperationTimedoutException, ResourceUnavailableException,
NoTransitionException, InsufficientAddressCapacityException {
@ -3095,22 +2613,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
for (Command cmd : cmds) {
if (cmd instanceof PingRoutingCommand) {
PingRoutingCommand ping = (PingRoutingCommand)cmd;
if (ping.getNewStates() != null && ping.getNewStates().size() > 0) {
if (!VmJobEnabled.value()) {
Commands commands = deltaHostSync(agentId, ping.getNewStates());
if (commands.size() > 0) {
try {
_agentMgr.send(agentId, commands, this);
} catch (final AgentUnavailableException e) {
s_logger.warn("Agent is now unavailable", e);
}
}
}
}
if(VmJobEnabled.value()) {
if (ping.getHostVmStateReport() != null) {
_syncMgr.processHostVmStatePingReport(agentId, ping.getHostVmStateReport());
}
if (ping.getHostVmStateReport() != null) {
_syncMgr.processHostVmStatePingReport(agentId, ping.getHostVmStateReport());
}
// take the chance to scan VMs that are stuck in transitional states
@ -3141,9 +2645,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
if(s_logger.isDebugEnabled())
s_logger.debug("Received startup command from hypervisor host. host id: " + agent.getId());
if(VmJobEnabled.value()) {
_syncMgr.resetHostSyncState(agent.getId());
}
_syncMgr.resetHostSyncState(agent.getId());
if (forRebalance) {
s_logger.debug("Not processing listener " + this + " as connect happens on rebalance process");
@ -3159,13 +2661,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
long agentId = agent.getId();
if (agent.getHypervisorType() == HypervisorType.XenServer) { // only for Xen
if (!VmJobEnabled.value()) {
StartupRoutingCommand startup = (StartupRoutingCommand)cmd;
HashMap<String, Pair<String, State>> allStates = startup.getClusterVMStateChanges();
if (allStates != null) {
fullSync(clusterId, allStates);
}
}
// initiate the cron job
ClusterVMMetaDataSyncCommand syncVMMetaDataCmd = new ClusterVMMetaDataSyncCommand(ClusterVMMetaDataSyncInterval.value(), clusterId);
try {
@ -3174,35 +2669,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
} catch (AgentUnavailableException e) {
s_logger.fatal("The Cluster VM metadata sync process failed for cluster id " + clusterId + " with ", e);
}
} else { // for others KVM and VMWare
if (!VmJobEnabled.value()) {
StartupRoutingCommand startup = (StartupRoutingCommand)cmd;
Commands commands = fullHostSync(agentId, startup);
if (commands.size() > 0) {
s_logger.debug("Sending clean commands to the agent");
try {
boolean error = false;
Answer[] answers = _agentMgr.send(agentId, commands);
for (Answer answer : answers) {
if (!answer.getResult()) {
s_logger.warn("Unable to stop a VM due to " + answer.getDetails());
error = true;
}
}
if (error) {
throw new ConnectionException(true, "Unable to stop VMs");
}
} catch (final AgentUnavailableException e) {
s_logger.warn("Agent is unavailable now", e);
throw new ConnectionException(true, "Unable to sync", e);
} catch (final OperationTimedoutException e) {
s_logger.warn("Agent is unavailable now", e);
throw new ConnectionException(true, "Unable to sync", e);
}
}
}
}
}
@ -3239,28 +2705,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
}
}
protected class AgentVmInfo {
public String name;
public State state;
public String hostUuid;
public VMInstanceVO vm;
public AgentVmInfo(String name, VMInstanceVO vm, State state, String host) {
this.name = name;
this.state = state;
this.vm = vm;
hostUuid = host;
}
public AgentVmInfo(String name, VMInstanceVO vm, State state) {
this(name, vm, state, null);
}
public String getHostUuid() {
return hostUuid;
}
}
@Override
public VMInstanceVO findById(long vmId) {
return _vmDao.findById(vmId);
@ -3345,12 +2789,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException {
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
placeHolder = createPlaceHolderWork(vm.getId());
}
placeHolder = createPlaceHolderWork(vm.getId());
try {
return orchestrateAddVmToNetwork(vm, network, requested);
} finally {
@ -3460,12 +2902,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
throws ConcurrentOperationException, ResourceUnavailableException {
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
placeHolder = createPlaceHolderWork(vm.getId());
}
placeHolder = createPlaceHolderWork(vm.getId());
try {
return orchestrateRemoveNicFromVm(vm, nic);
} finally {
@ -3712,13 +3152,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
public void migrateForScale(String vmUuid, long srcHostId, DeployDestination dest, Long oldSvcOfferingId)
throws ResourceUnavailableException, ConcurrentOperationException {
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
}
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
try {
orchestrateMigrateForScale(vmUuid, srcHostId, dest, oldSvcOfferingId);
} finally {
@ -3977,13 +3415,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
throws ResourceUnavailableException, InsufficientServerCapacityException, ConcurrentOperationException {
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
}
VirtualMachine vm = _vmDao.findByUuid(vmUuid);
placeHolder = createPlaceHolderWork(vm.getId());
try {
return orchestrateReConfigureVm(vmUuid, oldServiceOffering, reconfiguringOnExistingHost);
} finally {

View File

@ -102,6 +102,7 @@ import com.cloud.utils.db.EntityManager;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VirtualMachine.Event;
import com.cloud.vm.VirtualMachine.State;
import com.cloud.vm.VirtualMachine.PowerState;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.UserVmDetailsDao;
import com.cloud.vm.dao.VMInstanceDao;
@ -397,7 +398,7 @@ public class VirtualMachineManagerImplTest {
CheckVirtualMachineAnswer checkVmAnswerMock = mock(CheckVirtualMachineAnswer.class);
when(checkVmAnswerMock.getResult()).thenReturn(true);
when(checkVmAnswerMock.getState()).thenReturn(State.Running);
when(checkVmAnswerMock.getState()).thenReturn(PowerState.PowerOn);
when(_agentMgr.send(anyLong(), isA(CheckVirtualMachineCommand.class))).thenReturn(checkVmAnswerMock);
// Mock the state transitions of vm.

View File

@ -137,7 +137,7 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot
HostVO host = hostDao.findById(hostId);
GuestOSHypervisorVO guestOsMapping = guestOsHypervisorDao.findByOsIdAndHypervisor(guestOS.getId(), host.getHypervisorType().toString(), host.getHypervisorVersion());
CreateVMSnapshotCommand ccmd = new CreateVMSnapshotCommand(userVm.getInstanceName(), target, volumeTOs, guestOS.getDisplayName(), userVm.getState());
CreateVMSnapshotCommand ccmd = new CreateVMSnapshotCommand(userVm.getInstanceName(), target, volumeTOs, guestOS.getDisplayName());
if (guestOsMapping == null) {
ccmd.setPlatformEmulator(null);
} else {

View File

@ -124,7 +124,7 @@ public class HypervisorHelperImpl implements HypervisorHelper {
GuestOSVO guestOS = guestOSDao.findById(virtualMachine.getGuestOSId());
List<VolumeObjectTO> volumeTOs = vmSnapshotHelper.getVolumeTOList(virtualMachine.getId());
CreateVMSnapshotCommand ccmd =
new CreateVMSnapshotCommand(virtualMachine.getInstanceName(), vmSnapshotTO, volumeTOs, guestOS.getDisplayName(), virtualMachine.getState());
new CreateVMSnapshotCommand(virtualMachine.getInstanceName(), vmSnapshotTO, volumeTOs, guestOS.getDisplayName());
HostVO host = hostDao.findById(hostId);
GuestOSHypervisorVO guestOsMapping = guestOsHypervisorDao.findByOsIdAndHypervisor(guestOS.getId(), host.getHypervisorType().toString(), host.getHypervisorVersion());
ccmd.setPlatformEmulator(guestOsMapping.getGuestOsName());

View File

@ -67,9 +67,7 @@ import com.cloud.utils.script.Script;
import com.cloud.utils.script.Script2;
import com.cloud.utils.script.Script2.ParamType;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.PowerState;
import com.cloud.vm.VirtualMachine.State;
import com.cloud.vm.dao.VMInstanceDao;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
@ -85,7 +83,6 @@ import java.util.concurrent.TimeUnit;
@Local(value = ServerResource.class)
public class BareMetalResourceBase extends ManagerBase implements ServerResource {
private static final Logger s_logger = Logger.getLogger(BareMetalResourceBase.class);
protected HashMap<String, State> _vms = new HashMap<String, State>(2);
protected String _name;
protected String _uuid;
protected String _zone;
@ -116,17 +113,6 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource
protected ConfigurationDao configDao;
protected VMInstanceDao vmDao;
private void changeVmState(String vmName, VirtualMachine.State state) {
synchronized (_vms) {
_vms.put(vmName, state);
}
}
private State removeVmState(String vmName) {
synchronized (_vms) {
return _vms.remove(vmName);
}
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
@ -341,37 +327,6 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource
return com.cloud.host.Host.Type.Routing;
}
protected State getVmState() {
OutputInterpreter.AllLinesParser interpreter = new OutputInterpreter.AllLinesParser();
if (!doScript(_getStatusCommand, interpreter)) {
s_logger.warn("Cannot get power status of " + _name + ", assume VM state was not changed");
return null;
}
if (isPowerOn(interpreter.getLines())) {
return State.Running;
} else {
return State.Stopped;
}
}
protected Map<String, State> fullSync() {
Map<String, State> states = new HashMap<String, State>();
if (hostId != null) {
final List<? extends VMInstanceVO> vms = vmDao.listByHostId(hostId);
for (VMInstanceVO vm : vms) {
states.put(vm.getInstanceName(), vm.getState());
}
}
/*
* Map<String, State> changes = new HashMap<String, State>();
*
* if (_vmName != null) { State state = getVmState(); if (state != null)
* { changes.put(_vmName, state); } }
*/
return states;
}
protected Map<String, HostVmStateReportEntry> getHostVmStateReport() {
Map<String, HostVmStateReportEntry> states = new HashMap<String, HostVmStateReportEntry>();
if (hostId != null) {
@ -380,25 +335,18 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource
states.put(
vm.getInstanceName(),
new HostVmStateReportEntry(
vm.getState() == State.Running ? PowerState.PowerOn : PowerState.PowerOff, "host-" + hostId
vm.getPowerState(), "host-" + hostId
)
);
}
}
/*
* Map<String, State> changes = new HashMap<String, State>();
*
* if (_vmName != null) { State state = getVmState(); if (state != null)
* { changes.put(_vmName, state); } }
*/
return states;
}
@Override
public StartupCommand[] initialize() {
StartupRoutingCommand cmd = new StartupRoutingCommand(0, 0, 0, 0, null, Hypervisor.HypervisorType.BareMetal,
new HashMap<String, String>(), null, null);
new HashMap<String, String>());
cmd.setDataCenter(_zone);
cmd.setPod(_pod);
@ -413,7 +361,6 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource
cmd.setMemory(_memCapacity);
cmd.setPrivateMacAddress(_mac);
cmd.setPublicMacAddress(_mac);
cmd.setStateChanges(fullSync());
return new StartupCommand[] { cmd };
}
@ -439,15 +386,15 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource
if (hostId != null) {
final List<? extends VMInstanceVO> vms = vmDao.listByHostId(hostId);
if (vms.isEmpty()) {
return new PingRoutingCommand(getType(), id, deltaSync(), getHostVmStateReport());
return new PingRoutingCommand(getType(), id, getHostVmStateReport());
} else {
VMInstanceVO vm = vms.get(0);
SecurityGroupHttpClient client = new SecurityGroupHttpClient();
HashMap<String, Pair<Long, Long>> nwGrpStates = client.sync(vm.getInstanceName(), vm.getId(), vm.getPrivateIpAddress());
return new PingRoutingWithNwGroupsCommand(getType(), id, null, getHostVmStateReport(), nwGrpStates);
return new PingRoutingWithNwGroupsCommand(getType(), id, getHostVmStateReport(), nwGrpStates);
}
} else {
return new PingRoutingCommand(getType(), id, deltaSync(), getHostVmStateReport());
return new PingRoutingCommand(getType(), id, getHostVmStateReport());
}
}
@ -487,7 +434,7 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource
}
protected CheckVirtualMachineAnswer execute(final CheckVirtualMachineCommand cmd) {
return new CheckVirtualMachineAnswer(cmd, State.Stopped, null);
return new CheckVirtualMachineAnswer(cmd, PowerState.PowerOff, null);
}
protected Answer execute(IpmiBootorResetCommand cmd) {
@ -608,85 +555,33 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource
protected StartAnswer execute(StartCommand cmd) {
VirtualMachineTO vm = cmd.getVirtualMachine();
State state = State.Stopped;
try {
changeVmState(vm.getName(), State.Starting);
OutputInterpreter.AllLinesParser interpreter = new OutputInterpreter.AllLinesParser();
if (!doScript(_getStatusCommand, interpreter)) {
return new StartAnswer(cmd, "Cannot get current power status of " + _name);
}
OutputInterpreter.AllLinesParser interpreter = new OutputInterpreter.AllLinesParser();
if (!doScript(_getStatusCommand, interpreter)) {
return new StartAnswer(cmd, "Cannot get current power status of " + _name);
if (isPowerOn(interpreter.getLines())) {
if (!doScript(_rebootCommand)) {
return new StartAnswer(cmd, "IPMI reboot failed");
}
if (isPowerOn(interpreter.getLines())) {
if (!doScript(_rebootCommand)) {
return new StartAnswer(cmd, "IPMI reboot failed");
}
} else {
if (!doScript(_powerOnCommand)) {
return new StartAnswer(cmd, "IPMI power on failed");
}
}
if (_isEchoScAgent) {
SecurityGroupHttpClient hc = new SecurityGroupHttpClient();
boolean echoRet = hc.echo(vm.getNics()[0].getIp(), TimeUnit.MINUTES.toMillis(30), TimeUnit.MINUTES.toMillis(1));
if (!echoRet) {
return new StartAnswer(cmd, String.format("Call security group agent on vm[%s] timeout", vm.getNics()[0].getIp()));
}
}
s_logger.debug("Start bare metal vm " + vm.getName() + "successfully");
state = State.Running;
_vmName = vm.getName();
return new StartAnswer(cmd);
} finally {
if (state != State.Stopped) {
changeVmState(vm.getName(), state);
} else {
removeVmState(vm.getName());
} else {
if (!doScript(_powerOnCommand)) {
return new StartAnswer(cmd, "IPMI power on failed");
}
}
}
protected HashMap<String, State> deltaSync() {
final HashMap<String, State> changes = new HashMap<String, State>();
/*
* Disable sync until we find a way that only tracks status but not does
* action
*
* The scenario is: Baremetal will reboot host when creating template.
* Given most servers take a long time to boot up, there would be a
* period that mgmt server finds the host is stopped through fullsync.
* Then mgmt server updates database with marking the host as stopped,
* after that, the host comes up and full sync then indicates it's
* running. Because in database the host is already stopped, mgmt server
* sends out a stop command. As a result, creating image gets never
* happened.
*
* if (_vmName == null) { return null; }
*
* State newState = getVmState(); if (newState == null) {
* s_logger.warn("Cannot get power state of VM " + _vmName); return
* null; }
*
* final State oldState = removeVmState(_vmName); if (oldState == null)
* { changeVmState(_vmName, newState); changes.put(_vmName, newState); }
* else if (oldState == State.Starting) { if (newState == State.Running)
* { changeVmState(_vmName, newState); } else if (newState ==
* State.Stopped) { s_logger.debug("Ignoring vm " + _vmName +
* " because of a lag in starting the vm."); } } else if (oldState ==
* State.Migrating) {
* s_logger.warn("How can baremetal VM get into migrating state???"); }
* else if (oldState == State.Stopping) { if (newState == State.Stopped)
* { changeVmState(_vmName, newState); } else if (newState ==
* State.Running) { s_logger.debug("Ignoring vm " + _vmName +
* " because of a lag in stopping the vm. "); } } else if (oldState !=
* newState) { changeVmState(_vmName, newState); changes.put(_vmName,
* newState); }
*/
return changes;
if (_isEchoScAgent) {
SecurityGroupHttpClient hc = new SecurityGroupHttpClient();
boolean echoRet = hc.echo(vm.getNics()[0].getIp(), TimeUnit.MINUTES.toMillis(30), TimeUnit.MINUTES.toMillis(1));
if (!echoRet) {
return new StartAnswer(cmd, String.format("Call security group agent on vm[%s] timeout", vm.getNics()[0].getIp()));
}
}
s_logger.debug("Start bare metal vm " + vm.getName() + "successfully");
_vmName = vm.getName();
return new StartAnswer(cmd);
}
protected ReadyAnswer execute(ReadyCommand cmd) {

View File

@ -42,7 +42,6 @@ import com.cloud.agent.api.StartupExternalDhcpCommand;
import com.cloud.host.Host.Type;
import com.cloud.resource.ServerResource;
import com.cloud.utils.component.ManagerBase;
import com.cloud.vm.VirtualMachine.State;
public class BaremetalDhcpResourceBase extends ManagerBase implements ServerResource {
private static final Logger s_logger = Logger.getLogger(BaremetalDhcpResourceBase.class);
@ -126,7 +125,7 @@ public class BaremetalDhcpResourceBase extends ManagerBase implements ServerReso
@Override
public PingCommand getCurrentStatus(long id) {
//TODO: check server
return new PingRoutingCommand(getType(), id, new HashMap<String, State>(), new HashMap<String, HostVmStateReportEntry>());
return new PingRoutingCommand(getType(), id, new HashMap<String, HostVmStateReportEntry>());
}
protected ReadyAnswer execute(ReadyCommand cmd) {

View File

@ -39,7 +39,6 @@ import com.cloud.agent.api.PingRoutingCommand;
import com.cloud.agent.api.routing.DhcpEntryCommand;
import com.cloud.utils.script.Script;
import com.cloud.utils.ssh.SSHCmdHelper;
import com.cloud.vm.VirtualMachine.State;
public class BaremetalDhcpdResource extends BaremetalDhcpResourceBase {
private static final Logger s_logger = Logger.getLogger(BaremetalDhcpdResource.class);
@ -107,7 +106,7 @@ public class BaremetalDhcpdResource extends BaremetalDhcpResourceBase {
return null;
} else {
SSHCmdHelper.releaseSshConnection(sshConnection);
return new PingRoutingCommand(getType(), id, new HashMap<String, State>(), new HashMap<String, HostVmStateReportEntry>());
return new PingRoutingCommand(getType(), id, new HashMap<String, HostVmStateReportEntry>());
}
}

View File

@ -39,7 +39,6 @@ import com.cloud.agent.api.PingRoutingCommand;
import com.cloud.agent.api.routing.DhcpEntryCommand;
import com.cloud.utils.script.Script;
import com.cloud.utils.ssh.SSHCmdHelper;
import com.cloud.vm.VirtualMachine.State;
public class BaremetalDnsmasqResource extends BaremetalDhcpResourceBase {
private static final Logger s_logger = Logger.getLogger(BaremetalDnsmasqResource.class);
@ -99,7 +98,7 @@ public class BaremetalDnsmasqResource extends BaremetalDhcpResourceBase {
return null;
} else {
SSHCmdHelper.releaseSshConnection(sshConnection);
return new PingRoutingCommand(getType(), id, new HashMap<String, State>(), new HashMap<String, HostVmStateReportEntry>());
return new PingRoutingCommand(getType(), id, new HashMap<String, HostVmStateReportEntry>());
}
}

View File

@ -38,7 +38,6 @@ import com.cloud.agent.api.routing.VmDataCommand;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
import com.cloud.utils.ssh.SSHCmdHelper;
import com.cloud.vm.VirtualMachine.State;
public class BaremetalKickStartPxeResource extends BaremetalPxeResourceBase {
private static final Logger s_logger = Logger.getLogger(BaremetalKickStartPxeResource.class);
@ -107,7 +106,7 @@ public class BaremetalKickStartPxeResource extends BaremetalPxeResourceBase {
return null;
} else {
SSHCmdHelper.releaseSshConnection(sshConnection);
return new PingRoutingCommand(getType(), id, new HashMap<String, State>(), new HashMap<String, HostVmStateReportEntry>());
return new PingRoutingCommand(getType(), id, new HashMap<String, HostVmStateReportEntry>());
}
}

View File

@ -43,7 +43,6 @@ import com.cloud.agent.api.baremetal.PrepareCreateTemplateCommand;
import com.cloud.agent.api.routing.VmDataCommand;
import com.cloud.utils.script.Script;
import com.cloud.utils.ssh.SSHCmdHelper;
import com.cloud.vm.VirtualMachine.State;
public class BaremetalPingPxeResource extends BaremetalPxeResourceBase {
private static final Logger s_logger = Logger.getLogger(BaremetalPingPxeResource.class);
@ -143,7 +142,7 @@ public class BaremetalPingPxeResource extends BaremetalPxeResourceBase {
return null;
} else {
SSHCmdHelper.releaseSshConnection(sshConnection);
return new PingRoutingCommand(getType(), id, new HashMap<String, State>(), new HashMap<String, HostVmStateReportEntry>());
return new PingRoutingCommand(getType(), id, new HashMap<String, HostVmStateReportEntry>());
}
}

View File

@ -90,7 +90,6 @@ import com.cloud.agent.api.SetupGuestNetworkCommand;
import com.cloud.agent.api.StartCommand;
import com.cloud.agent.api.StartupCommand;
import com.cloud.agent.api.StartupRoutingCommand;
import com.cloud.agent.api.StartupRoutingCommand.VmState;
import com.cloud.agent.api.StartupStorageCommand;
import com.cloud.agent.api.UnPlugNicAnswer;
import com.cloud.agent.api.UnPlugNicCommand;
@ -211,8 +210,7 @@ public class HypervDirectConnectResource extends ServerResourceBase implements S
// Create default StartupRoutingCommand, then customise
StartupRoutingCommand defaultStartRoutCmd =
new StartupRoutingCommand(0, 0, 0, 0, null, Hypervisor.HypervisorType.Hyperv, RouterPrivateIpStrategy.HostLocal, new HashMap<String, VmState>(),
new HashMap<String, HostVmStateReportEntry>());
new StartupRoutingCommand(0, 0, 0, 0, null, Hypervisor.HypervisorType.Hyperv, RouterPrivateIpStrategy.HostLocal);
// Identity within the data centre is decided by CloudStack kernel,
// and passed via ServerResource.configure()
@ -224,7 +222,6 @@ public class HypervDirectConnectResource extends ServerResourceBase implements S
defaultStartRoutCmd.setPrivateIpAddress(_agentIp);
defaultStartRoutCmd.setStorageIpAddress(_agentIp);
defaultStartRoutCmd.setPool(_clusterGuid);
defaultStartRoutCmd.setHostVmStateReport(getHostVmStateReport());
s_logger.debug("Generated StartupRoutingCommand for _agentIp \"" + _agentIp + "\"");
@ -316,7 +313,7 @@ public class HypervDirectConnectResource extends ServerResourceBase implements S
@Override
public final PingCommand getCurrentStatus(final long id) {
PingCommand pingCmd = new PingRoutingCommand(getType(), id, null, getHostVmStateReport());
PingCommand pingCmd = new PingRoutingCommand(getType(), id, getHostVmStateReport());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Ping host " + _name + " (IP " + _agentIp + ")");

View File

@ -63,7 +63,6 @@ import com.cloud.agent.api.StartAnswer;
import com.cloud.agent.api.StartCommand;
import com.cloud.agent.api.StartupCommand;
import com.cloud.agent.api.StartupRoutingCommand;
import com.cloud.agent.api.StartupRoutingCommand.VmState;
import com.cloud.agent.api.StartupStorageCommand;
import com.cloud.agent.api.StopAnswer;
import com.cloud.agent.api.StopCommand;
@ -252,8 +251,7 @@ public class HypervDirectConnectResourceTest {
public final void testStartupCommand() {
StartupRoutingCommand defaultStartRoutCmd =
new StartupRoutingCommand(0, 0, 0, 0, null, Hypervisor.HypervisorType.Hyperv, RouterPrivateIpStrategy.HostLocal, new HashMap<String, VmState>(),
new HashMap<String, HostVmStateReportEntry>());
new StartupRoutingCommand(0, 0, 0, 0, null, Hypervisor.HypervisorType.Hyperv, RouterPrivateIpStrategy.HostLocal);
// Identity within the data centre is decided by CloudStack kernel,
// and passed via ServerResource.configure()

View File

@ -68,6 +68,7 @@ import org.libvirt.Connect;
import org.libvirt.Domain;
import org.libvirt.DomainBlockStats;
import org.libvirt.DomainInfo;
import org.libvirt.DomainInfo.DomainState;
import org.libvirt.DomainInterfaceStats;
import org.libvirt.DomainSnapshot;
import org.libvirt.LibvirtException;
@ -268,7 +269,6 @@ import com.cloud.utils.ssh.SshHelper;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.PowerState;
import com.cloud.vm.VirtualMachine.State;
/**
* LibvirtComputingResource execute requests on the computing/routing host using
@ -461,31 +461,17 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
protected int _cmdsTimeout;
protected int _stopTimeout;
// TODO vmsync {
protected static final HashMap<DomainInfo.DomainState, State> s_statesTable;
protected static final HashMap<DomainState, PowerState> s_powerStatesTable;
static {
s_statesTable = new HashMap<DomainInfo.DomainState, State>();
s_statesTable.put(DomainInfo.DomainState.VIR_DOMAIN_SHUTOFF, State.Stopped);
s_statesTable.put(DomainInfo.DomainState.VIR_DOMAIN_PAUSED, State.Running);
s_statesTable.put(DomainInfo.DomainState.VIR_DOMAIN_RUNNING, State.Running);
s_statesTable.put(DomainInfo.DomainState.VIR_DOMAIN_BLOCKED, State.Running);
s_statesTable.put(DomainInfo.DomainState.VIR_DOMAIN_NOSTATE, State.Unknown);
s_statesTable.put(DomainInfo.DomainState.VIR_DOMAIN_SHUTDOWN, State.Stopping);
}
// TODO vmsync }
protected static final HashMap<DomainInfo.DomainState, PowerState> s_powerStatesTable;
static {
s_powerStatesTable = new HashMap<DomainInfo.DomainState, PowerState>();
s_powerStatesTable.put(DomainInfo.DomainState.VIR_DOMAIN_SHUTOFF, PowerState.PowerOff);
s_powerStatesTable.put(DomainInfo.DomainState.VIR_DOMAIN_PAUSED, PowerState.PowerOn);
s_powerStatesTable.put(DomainInfo.DomainState.VIR_DOMAIN_RUNNING, PowerState.PowerOn);
s_powerStatesTable.put(DomainInfo.DomainState.VIR_DOMAIN_BLOCKED, PowerState.PowerOn);
s_powerStatesTable.put(DomainInfo.DomainState.VIR_DOMAIN_NOSTATE, PowerState.PowerUnknown);
s_powerStatesTable.put(DomainInfo.DomainState.VIR_DOMAIN_SHUTDOWN, PowerState.PowerOff);
s_powerStatesTable = new HashMap<DomainState, PowerState>();
s_powerStatesTable.put(DomainState.VIR_DOMAIN_SHUTOFF, PowerState.PowerOff);
s_powerStatesTable.put(DomainState.VIR_DOMAIN_PAUSED, PowerState.PowerOn);
s_powerStatesTable.put(DomainState.VIR_DOMAIN_RUNNING, PowerState.PowerOn);
s_powerStatesTable.put(DomainState.VIR_DOMAIN_BLOCKED, PowerState.PowerOn);
s_powerStatesTable.put(DomainState.VIR_DOMAIN_NOSTATE, PowerState.PowerUnknown);
s_powerStatesTable.put(DomainState.VIR_DOMAIN_SHUTDOWN, PowerState.PowerOff);
}
protected HashMap<String, State> _vms = new HashMap<String, State>(20);
protected List<String> _vmsKilled = new ArrayList<String>();
private VirtualRoutingResource _virtRouterResource;
@ -2335,7 +2321,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
String vmName = cmd.getVmName();
try {
Connect conn = LibvirtConnection.getConnectionByVmName(vmName);
DomainInfo.DomainState state = null;
DomainState state = null;
Domain vm = null;
if (vmName != null) {
try {
@ -2349,7 +2335,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
KVMStoragePool primaryPool = _storagePoolMgr.getStoragePool(cmd.getPool().getType(), cmd.getPool().getUuid());
KVMPhysicalDisk disk = primaryPool.getPhysicalDisk(cmd.getVolumePath());
if (state == DomainInfo.DomainState.VIR_DOMAIN_RUNNING && !primaryPool.isExternalSnapshot()) {
if (state == DomainState.VIR_DOMAIN_RUNNING && !primaryPool.isExternalSnapshot()) {
String vmUuid = vm.getUUIDString();
Object[] args = new Object[] {snapshotName, vmUuid};
String snapshot = SnapshotXML.format(args);
@ -2367,7 +2353,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
*/
vm = getDomain(conn, cmd.getVmName());
state = vm.getInfo().state;
if (state == DomainInfo.DomainState.VIR_DOMAIN_PAUSED) {
if (state == DomainState.VIR_DOMAIN_PAUSED) {
vm.resume();
}
} else {
@ -2525,7 +2511,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
/* Delete the snapshot on primary */
DomainInfo.DomainState state = null;
DomainState state = null;
Domain vm = null;
if (vmName != null) {
try {
@ -2537,7 +2523,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
KVMStoragePool primaryStorage = _storagePoolMgr.getStoragePool(cmd.getPool().getType(), cmd.getPool().getUuid());
if (state == DomainInfo.DomainState.VIR_DOMAIN_RUNNING && !primaryStorage.isExternalSnapshot()) {
if (state == DomainState.VIR_DOMAIN_RUNNING && !primaryStorage.isExternalSnapshot()) {
String vmUuid = vm.getUUIDString();
Object[] args = new Object[] {snapshotName, vmUuid};
String snapshot = SnapshotXML.format(args);
@ -2551,7 +2537,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
*/
vm = getDomain(conn, cmd.getVmName());
state = vm.getInfo().state;
if (state == DomainInfo.DomainState.VIR_DOMAIN_PAUSED) {
if (state == DomainState.VIR_DOMAIN_PAUSED) {
vm.resume();
}
} else {
@ -2965,23 +2951,18 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
return new ReadyAnswer(cmd);
}
protected State convertToState(DomainInfo.DomainState ps) {
final State state = s_statesTable.get(ps);
return state == null ? State.Unknown : state;
}
protected PowerState convertToPowerState(DomainInfo.DomainState ps) {
protected PowerState convertToPowerState(DomainState ps) {
final PowerState state = s_powerStatesTable.get(ps);
return state == null ? PowerState.PowerUnknown : state;
}
protected State getVmState(Connect conn, final String vmName) {
protected PowerState getVmState(Connect conn, final String vmName) {
int retry = 3;
Domain vms = null;
while (retry-- > 0) {
try {
vms = conn.domainLookupByName(vmName);
State s = convertToState(vms.getInfo().state);
PowerState s = convertToPowerState(vms.getInfo().state);
return s;
} catch (final LibvirtException e) {
s_logger.warn("Can't get vm state " + vmName + e.getMessage() + "retry:" + retry);
@ -2995,20 +2976,16 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
}
}
return State.Stopped;
return PowerState.PowerOff;
}
private Answer execute(CheckVirtualMachineCommand cmd) {
try {
Connect conn = LibvirtConnection.getConnectionByVmName(cmd.getVmName());
final State state = getVmState(conn, cmd.getVmName());
final PowerState state = getVmState(conn, cmd.getVmName());
Integer vncPort = null;
if (state == State.Running) {
if (state == PowerState.PowerOn) {
vncPort = getVncPort(conn, cmd.getVmName());
synchronized (_vms) {
_vms.put(cmd.getVmName(), State.Running);
}
}
return new CheckVirtualMachineAnswer(cmd, state, vncPort);
@ -3055,12 +3032,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
private Answer execute(MigrateCommand cmd) {
String vmName = cmd.getVmName();
State state = null;
String result = null;
synchronized (_vms) {
state = _vms.get(vmName);
_vms.put(vmName, State.Stopping);
}
List<InterfaceDef> ifaces = null;
List<DiskDef> disks = null;
@ -3119,7 +3091,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
// pause vm if we meet the vm.migrate.pauseafter threshold and not already paused
if (_migratePauseAfter > 0 && sleeptime > _migratePauseAfter && dm.getInfo().state == DomainInfo.DomainState.VIR_DOMAIN_RUNNING ) {
if (_migratePauseAfter > 0 && sleeptime > _migratePauseAfter && dm.getInfo().state == DomainState.VIR_DOMAIN_RUNNING ) {
s_logger.info("Pausing VM " + vmName + " due to property vm.migrate.pauseafter setting to " + _migratePauseAfter+ "ms to complete migration");
try {
dm.suspend();
@ -3170,9 +3142,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
if (result != null) {
synchronized (_vms) {
_vms.put(vmName, state);
}
} else {
destroy_network_rules_for_vm(conn, vmName);
for (InterfaceDef iface : ifaces) {
@ -3243,10 +3212,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
return new PrepareForMigrationAnswer(cmd, "failed to connect physical disks to host");
}
synchronized (_vms) {
_vms.put(vm.getName(), State.Migrating);
}
skipDisconnect = true;
return new PrepareForMigrationAnswer(cmd);
@ -3419,10 +3384,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
private Answer execute(RebootCommand cmd) {
synchronized (_vms) {
_vms.put(cmd.getVmName(), State.Starting);
}
try {
Connect conn = LibvirtConnection.getConnectionByVmName(cmd.getVmName());
final String result = rebootVM(conn, cmd.getVmName());
@ -3440,10 +3401,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
} catch (LibvirtException e) {
return new RebootAnswer(cmd, e.getMessage(), false);
} finally {
synchronized (_vms) {
_vms.put(cmd.getVmName(), State.Running);
}
}
}
@ -3504,7 +3461,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
try {
Connect conn = LibvirtConnection.getConnectionByVmName(vmName);
Domain vm = conn.domainLookupByName(cmd.getVmName());
if (vm != null && vm.getInfo().state == DomainInfo.DomainState.VIR_DOMAIN_RUNNING) {
if (vm != null && vm.getInfo().state == DomainState.VIR_DOMAIN_RUNNING) {
return new StopAnswer(cmd, "vm is still running on host", false);
}
} catch (Exception e) {
@ -3512,11 +3469,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
}
State state = null;
synchronized (_vms) {
state = _vms.get(vmName);
_vms.put(vmName, State.Stopping);
}
try {
Connect conn = LibvirtConnection.getConnectionByVmName(vmName);
@ -3538,18 +3490,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
}
state = State.Stopped;
return new StopAnswer(cmd, result, true);
} catch (LibvirtException e) {
return new StopAnswer(cmd, e.getMessage(), false);
} finally {
synchronized (_vms) {
if (state != null) {
_vms.put(vmName, state);
} else {
_vms.remove(vmName);
}
}
}
}
@ -3799,13 +3742,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
String vmName = vmSpec.getName();
LibvirtVMDef vm = null;
State state = State.Stopped;
DomainState state = DomainState.VIR_DOMAIN_SHUTOFF;
Connect conn = null;
try {
synchronized (_vms) {
_vms.put(vmName, State.Starting);
}
NicTO[] nics = vmSpec.getNics();
for (NicTO nic : nics) {
@ -3874,7 +3813,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
}
state = State.Running;
state = DomainState.VIR_DOMAIN_RUNNING;
return new StartAnswer(cmd);
} catch (LibvirtException e) {
s_logger.warn("LibvirtException ", e);
@ -3895,14 +3834,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
return new StartAnswer(cmd, e.getMessage());
} finally {
synchronized (_vms) {
if (state != State.Stopped) {
_vms.put(vmName, state);
} else {
_vms.remove(vmName);
}
}
if (state != State.Running) {
if (state != DomainState.VIR_DOMAIN_RUNNING) {
_storagePoolMgr.disconnectPhysicalDisksViaVmSpec(vmSpec);
}
}
@ -4221,13 +4153,12 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
@Override
public PingCommand getCurrentStatus(long id) {
final HashMap<String, State> newStates = sync();
if (!_canBridgeFirewall) {
return new PingRoutingCommand(com.cloud.host.Host.Type.Routing, id, newStates, this.getHostVmStateReport());
return new PingRoutingCommand(com.cloud.host.Host.Type.Routing, id, this.getHostVmStateReport());
} else {
HashMap<String, Pair<Long, Long>> nwGrpStates = syncNetworkGroups(id);
return new PingRoutingWithNwGroupsCommand(getType(), id, newStates, this.getHostVmStateReport(), nwGrpStates);
return new PingRoutingWithNwGroupsCommand(getType(), id, this.getHostVmStateReport(), nwGrpStates);
}
}
@ -4249,19 +4180,12 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
@Override
public StartupCommand[] initialize() {
Map<String, State> changes = null;
synchronized (_vms) {
_vms.clear();
changes = sync();
}
final List<Object> info = getHostInfo();
final StartupRoutingCommand cmd =
new StartupRoutingCommand((Integer)info.get(0), (Long)info.get(1), (Long)info.get(2), (Long)info.get(4), (String)info.get(3), _hypervisorType,
RouterPrivateIpStrategy.HostLocal);
cmd.setStateChanges(changes);
cmd.setCpuSockets((Integer)info.get(5));
fillNetworkInformation(cmd);
_privateIp = cmd.getPrivateIpAddress();
@ -4269,7 +4193,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
cmd.setPool(_pool);
cmd.setCluster(_clusterId);
cmd.setGatewayIpAddress(_localGateway);
cmd.setHostVmStateReport(getHostVmStateReport());
cmd.setIqn(getIqn());
StartupStorageCommand sscmd = null;
@ -4322,138 +4245,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
}
protected HashMap<String, State> sync() {
HashMap<String, State> newStates;
HashMap<String, State> oldStates = null;
final HashMap<String, State> changes = new HashMap<String, State>();
synchronized (_vms) {
newStates = getAllVms();
if (newStates == null) {
s_logger.debug("Unable to get the vm states so no state sync at this point.");
return changes;
}
oldStates = new HashMap<String, State>(_vms.size());
oldStates.putAll(_vms);
for (final Map.Entry<String, State> entry : newStates.entrySet()) {
final String vm = entry.getKey();
State newState = entry.getValue();
final State oldState = oldStates.remove(vm);
if (newState == State.Stopped && oldState != State.Stopping && oldState != null && oldState != State.Stopped) {
newState = getRealPowerState(vm);
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("VM " + vm + ": libvirt has state " + newState + " and we have state " + (oldState != null ? oldState.toString() : "null"));
}
if (vm.startsWith("migrating")) {
s_logger.debug("Migration detected. Skipping");
continue;
}
if (oldState == null) {
_vms.put(vm, newState);
s_logger.debug("Detecting a new state but couldn't find a old state so adding it to the changes: " + vm);
changes.put(vm, newState);
} else if (oldState == State.Starting) {
if (newState == State.Running) {
_vms.put(vm, newState);
} else if (newState == State.Stopped) {
s_logger.debug("Ignoring vm " + vm + " because of a lag in starting the vm.");
}
} else if (oldState == State.Migrating) {
if (newState == State.Running) {
s_logger.debug("Detected that an migrating VM is now running: " + vm);
_vms.put(vm, newState);
}
} else if (oldState == State.Stopping) {
if (newState == State.Stopped) {
_vms.put(vm, newState);
} else if (newState == State.Running) {
s_logger.debug("Ignoring vm " + vm + " because of a lag in stopping the vm. ");
}
} else if (oldState != newState) {
_vms.put(vm, newState);
if (newState == State.Stopped) {
if (_vmsKilled.remove(vm)) {
s_logger.debug("VM " + vm + " has been killed for storage. ");
newState = State.Error;
}
}
changes.put(vm, newState);
}
}
for (final Map.Entry<String, State> entry : oldStates.entrySet()) {
final String vm = entry.getKey();
final State oldState = entry.getValue();
if (s_logger.isTraceEnabled()) {
s_logger.trace("VM " + vm + " is now missing from libvirt so reporting stopped");
}
if (oldState == State.Stopping) {
s_logger.debug("Ignoring VM " + vm + " in transition state stopping.");
_vms.remove(vm);
} else if (oldState == State.Starting) {
s_logger.debug("Ignoring VM " + vm + " in transition state starting.");
} else if (oldState == State.Stopped) {
_vms.remove(vm);
} else if (oldState == State.Migrating) {
s_logger.debug("Ignoring VM " + vm + " in migrating state.");
} else {
_vms.remove(vm);
State state = State.Stopped;
if (_vmsKilled.remove(entry.getKey())) {
s_logger.debug("VM " + vm + " has been killed by storage monitor");
state = State.Error;
}
changes.put(entry.getKey(), state);
}
}
}
return changes;
}
protected State getRealPowerState(String vm) {
int i = 0;
s_logger.trace("Checking on the HALTED State");
Domain dm = null;
for (; i < 5; i++) {
try {
Connect conn = LibvirtConnection.getConnectionByVmName(vm);
dm = conn.domainLookupByName(vm);
DomainInfo.DomainState vps = dm.getInfo().state;
if (vps != null && vps != DomainInfo.DomainState.VIR_DOMAIN_SHUTOFF && vps != DomainInfo.DomainState.VIR_DOMAIN_NOSTATE) {
return convertToState(vps);
}
} catch (final LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
} finally {
try {
if (dm != null) {
dm.free();
}
} catch (final LibvirtException l) {
s_logger.trace("Ignoring libvirt error.", l);
}
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
s_logger.trace("Ignoring InterruptedException.", e);
}
}
return State.Stopped;
}
protected List<String> getAllVmNames(Connect conn) {
ArrayList<String> la = new ArrayList<String>();
try {
@ -4494,104 +4285,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
return la;
}
private HashMap<String, State> getAllVms() {
final HashMap<String, State> vmStates = new HashMap<String, State>();
Connect conn = null;
if (_hypervisorType == HypervisorType.LXC) {
try {
conn = LibvirtConnection.getConnectionByType(HypervisorType.LXC.toString());
vmStates.putAll(getAllVms(conn));
conn = LibvirtConnection.getConnectionByType(HypervisorType.KVM.toString());
vmStates.putAll(getAllVms(conn));
} catch (LibvirtException e) {
s_logger.debug("Failed to get connection: " + e.getMessage());
}
}
if (_hypervisorType == HypervisorType.KVM) {
try {
conn = LibvirtConnection.getConnectionByType(HypervisorType.KVM.toString());
vmStates.putAll(getAllVms(conn));
} catch (LibvirtException e) {
s_logger.debug("Failed to get connection: " + e.getMessage());
}
}
return vmStates;
}
private HashMap<String, State> getAllVms(Connect conn) {
final HashMap<String, State> vmStates = new HashMap<String, State>();
String[] vms = null;
int[] ids = null;
try {
ids = conn.listDomains();
} catch (final LibvirtException e) {
s_logger.warn("Unable to listDomains", e);
return null;
}
try {
vms = conn.listDefinedDomains();
} catch (final LibvirtException e) {
s_logger.warn("Unable to listDomains", e);
return null;
}
Domain dm = null;
for (int i = 0; i < ids.length; i++) {
try {
dm = conn.domainLookupByID(ids[i]);
DomainInfo.DomainState ps = dm.getInfo().state;
final State state = convertToState(ps);
s_logger.trace("VM " + dm.getName() + ": powerstate = " + ps + "; vm state=" + state.toString());
String vmName = dm.getName();
vmStates.put(vmName, state);
} catch (final LibvirtException e) {
s_logger.warn("Unable to get vms", e);
} finally {
try {
if (dm != null) {
dm.free();
}
} catch (LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
}
}
for (int i = 0; i < vms.length; i++) {
try {
dm = conn.domainLookupByName(vms[i]);
DomainInfo.DomainState ps = dm.getInfo().state;
final State state = convertToState(ps);
String vmName = dm.getName();
s_logger.trace("VM " + vmName + ": powerstate = " + ps + "; vm state=" + state.toString());
vmStates.put(vmName, state);
} catch (final LibvirtException e) {
s_logger.warn("Unable to get vms", e);
} finally {
try {
if (dm != null) {
dm.free();
}
} catch (LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
}
}
return vmStates;
}
private HashMap<String, HostVmStateReportEntry> getHostVmStateReport() {
final HashMap<String, HostVmStateReportEntry> vmStates = new HashMap<String, HostVmStateReportEntry>();
Connect conn = null;
@ -4643,7 +4336,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
try {
dm = conn.domainLookupByID(ids[i]);
DomainInfo.DomainState ps = dm.getInfo().state;
DomainState ps = dm.getInfo().state;
final PowerState state = convertToPowerState(ps);
@ -4674,7 +4367,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
dm = conn.domainLookupByName(vms[i]);
DomainInfo.DomainState ps = dm.getInfo().state;
DomainState ps = dm.getInfo().state;
final PowerState state = convertToPowerState(ps);
String vmName = dm.getName();
s_logger.trace("VM " + vmName + ": powerstate = " + ps + "; vm state=" + state.toString());
@ -4807,7 +4500,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
protected String stopVM(Connect conn, String vmName) {
DomainInfo.DomainState state = null;
DomainState state = null;
Domain dm = null;
s_logger.debug("Try to stop the vm at first");
@ -4844,7 +4537,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
return null;
}
if (state != DomainInfo.DomainState.VIR_DOMAIN_SHUTOFF) {
if (state != DomainState.VIR_DOMAIN_SHUTOFF) {
s_logger.debug("Try to destroy the vm");
ret = stopVM(conn, vmName, true);
if (ret != null) {

View File

@ -134,7 +134,6 @@ import com.cloud.utils.ssh.SSHCmdHelper;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.PowerState;
import com.cloud.vm.VirtualMachine.State;
public class OvmResourceBase implements ServerResource, HypervisorResource {
private static final Logger s_logger = Logger.getLogger(OvmResourceBase.class);
@ -158,17 +157,6 @@ public class OvmResourceBase implements ServerResource, HypervisorResource {
private final Map<String, Pair<Long, Long>> _vmNetworkStats = new ConcurrentHashMap<String, Pair<Long, Long>>();
private static String s_ovsAgentPath = "/opt/ovs-agent-latest";
// TODO vmsync {
static HashMap<String, State> s_stateMaps;
protected HashMap<String, State> _vms = new HashMap<String, State>(50);
static {
s_stateMaps = new HashMap<String, State>();
s_stateMaps.put("RUNNING", State.Running);
s_stateMaps.put("DOWN", State.Stopped);
s_stateMaps.put("ERROR", State.Error);
s_stateMaps.put("SUSPEND", State.Stopped);
}
// TODO vmsync }
static HashMap<String, PowerState> s_powerStateMaps;
static {
@ -315,7 +303,6 @@ public class OvmResourceBase implements ServerResource, HypervisorResource {
//TODO: introudce PIF
cmd.setPrivateIpAddress(_ip);
cmd.setStorageIpAddress(_ip);
cmd.setHostVmStateReport(getHostVmStateReport());
String defaultBridge = OvmBridge.getBridgeByIp(_conn, _ip);
if (_publicNetworkName == null) {
@ -390,12 +377,6 @@ public class OvmResourceBase implements ServerResource, HypervisorResource {
try {
StartupRoutingCommand cmd = new StartupRoutingCommand();
fillHostInfo(cmd);
Map<String, State> changes = null;
synchronized (_vms) {
_vms.clear();
changes = sync();
}
cmd.setStateChanges(changes);
cmd.setCaps("hvm");
return new StartupCommand[] {cmd};
} catch (Exception e) {
@ -408,8 +389,7 @@ public class OvmResourceBase implements ServerResource, HypervisorResource {
public PingCommand getCurrentStatus(long id) {
try {
OvmHost.ping(_conn);
HashMap<String, State> newStates = sync();
return new PingRoutingCommand(getType(), id, newStates, getHostVmStateReport());
return new PingRoutingCommand(getType(), id, getHostVmStateReport());
} catch (XmlRpcException e) {
s_logger.debug("Check agent status failed", e);
return null;
@ -668,13 +648,8 @@ public class OvmResourceBase implements ServerResource, HypervisorResource {
public synchronized StartAnswer execute(StartCommand cmd) {
VirtualMachineTO vmSpec = cmd.getVirtualMachine();
String vmName = vmSpec.getName();
State state = State.Stopped;
OvmVm.Details vmDetails = null;
try {
synchronized (_vms) {
_vms.put(vmName, State.Starting);
}
vmDetails = new OvmVm.Details();
applySpecToVm(vmDetails, vmSpec);
createVbds(vmDetails, vmSpec);
@ -691,21 +666,11 @@ public class OvmResourceBase implements ServerResource, HypervisorResource {
}
}
state = State.Running;
return new StartAnswer(cmd);
} catch (Exception e) {
s_logger.debug("Start vm " + vmName + " failed", e);
cleanup(vmDetails);
return new StartAnswer(cmd, e.getMessage());
} finally {
synchronized (_vms) {
//FIXME: where to come to Stopped???
if (state != State.Stopped) {
_vms.put(vmName, state);
} else {
_vms.remove(vmName);
}
}
}
}
@ -729,12 +694,6 @@ public class OvmResourceBase implements ServerResource, HypervisorResource {
@Override
public StopAnswer execute(StopCommand cmd) {
String vmName = cmd.getVmName();
State state = null;
synchronized (_vms) {
state = _vms.get(vmName);
_vms.put(vmName, State.Stopping);
}
try {
OvmVm.Details vm = null;
try {
@ -747,29 +706,16 @@ public class OvmResourceBase implements ServerResource, HypervisorResource {
deleteAllNetworkRulesForVm(vmName);
OvmVm.stop(_conn, vmName);
cleanup(vm);
state = State.Stopped;
return new StopAnswer(cmd, "success", true);
} catch (Exception e) {
s_logger.debug("Stop " + vmName + "failed", e);
return new StopAnswer(cmd, e.getMessage(), false);
} finally {
synchronized (_vms) {
if (state != null) {
_vms.put(vmName, state);
} else {
_vms.remove(vmName);
}
}
}
}
@Override
public RebootAnswer execute(RebootCommand cmd) {
String vmName = cmd.getVmName();
synchronized (_vms) {
_vms.put(vmName, State.Starting);
}
try {
Map<String, String> res = OvmVm.reboot(_conn, vmName);
@ -778,22 +724,9 @@ public class OvmResourceBase implements ServerResource, HypervisorResource {
} catch (Exception e) {
s_logger.debug("Reboot " + vmName + " failed", e);
return new RebootAnswer(cmd, e.getMessage(), false);
} finally {
synchronized (_vms) {
_vms.put(cmd.getVmName(), State.Running);
}
}
}
private State toState(String vmName, String s) {
State state = s_stateMaps.get(s);
if (state == null) {
s_logger.debug("Unkown state " + s + " for " + vmName);
state = State.Unknown;
}
return state;
}
private PowerState toPowerState(String vmName, String s) {
PowerState state = s_powerStateMaps.get(s);
if (state == null) {
@ -813,111 +746,16 @@ public class OvmResourceBase implements ServerResource, HypervisorResource {
return vmStates;
}
protected HashMap<String, State> getAllVms() throws XmlRpcException {
final HashMap<String, State> vmStates = new HashMap<String, State>();
protected HashMap<String, PowerState> getAllVms() throws XmlRpcException {
final HashMap<String, PowerState> vmStates = new HashMap<String, PowerState>();
Map<String, String> vms = OvmHost.getAllVms(_conn);
for (final Map.Entry<String, String> entry : vms.entrySet()) {
State state = toState(entry.getKey(), entry.getValue());
vmStates.put(entry.getKey(), state);
PowerState powerState = toPowerState(entry.getKey(), entry.getValue());
vmStates.put(entry.getKey(), powerState);
}
return vmStates;
}
protected HashMap<String, State> sync() {
HashMap<String, State> newStates;
HashMap<String, State> oldStates = null;
try {
final HashMap<String, State> changes = new HashMap<String, State>();
newStates = getAllVms();
if (newStates == null) {
s_logger.debug("Unable to get the vm states so no state sync at this point.");
return null;
}
synchronized (_vms) {
oldStates = new HashMap<String, State>(_vms.size());
oldStates.putAll(_vms);
for (final Map.Entry<String, State> entry : newStates.entrySet()) {
final String vm = entry.getKey();
State newState = entry.getValue();
final State oldState = oldStates.remove(vm);
if (s_logger.isTraceEnabled()) {
s_logger.trace("VM " + vm + ": ovm has state " + newState + " and we have state " + (oldState != null ? oldState.toString() : "null"));
}
/*
* TODO: what is migrating ??? if
* (vm.startsWith("migrating")) {
* s_logger.debug("Migrating from xen detected. Skipping");
* continue; }
*/
if (oldState == null) {
_vms.put(vm, newState);
s_logger.debug("Detecting a new state but couldn't find a old state so adding it to the changes: " + vm);
changes.put(vm, newState);
} else if (oldState == State.Starting) {
if (newState == State.Running) {
_vms.put(vm, newState);
} else if (newState == State.Stopped) {
s_logger.debug("Ignoring vm " + vm + " because of a lag in starting the vm.");
}
} else if (oldState == State.Migrating) {
if (newState == State.Running) {
s_logger.debug("Detected that an migrating VM is now running: " + vm);
_vms.put(vm, newState);
}
} else if (oldState == State.Stopping) {
if (newState == State.Stopped) {
_vms.put(vm, newState);
} else if (newState == State.Running) {
s_logger.debug("Ignoring vm " + vm + " because of a lag in stopping the vm. ");
}
} else if (oldState != newState) {
_vms.put(vm, newState);
if (newState == State.Stopped) {
//TODO: need anything here?
}
changes.put(vm, newState);
}
}
for (final Map.Entry<String, State> entry : oldStates.entrySet()) {
final String vm = entry.getKey();
final State oldState = entry.getValue();
if (s_logger.isTraceEnabled()) {
s_logger.trace("VM " + vm + " is now missing from ovm server so reporting stopped");
}
if (oldState == State.Stopping) {
s_logger.debug("Ignoring VM " + vm + " in transition state stopping.");
_vms.remove(vm);
} else if (oldState == State.Starting) {
s_logger.debug("Ignoring VM " + vm + " in transition state starting.");
} else if (oldState == State.Stopped) {
_vms.remove(vm);
} else if (oldState == State.Migrating) {
s_logger.debug("Ignoring VM " + vm + " in migrating state.");
} else {
_vms.remove(vm);
State state = State.Stopped;
// TODO: need anything here???
changes.put(entry.getKey(), state);
}
}
}
return changes;
} catch (Exception e) {
s_logger.debug("Ovm full sync failed", e);
return null;
}
}
protected GetStorageStatsAnswer execute(final GetStorageStatsCommand cmd) {
try {
@ -998,9 +836,6 @@ public class OvmResourceBase implements ServerResource, HypervisorResource {
getNetwork(nic);
}
synchronized (_vms) {
_vms.put(vm.getName(), State.Migrating);
}
return new PrepareForMigrationAnswer(cmd);
} catch (Exception e) {
s_logger.warn("Catch Exception " + e.getClass().getName() + " prepare for migration failed due to " + e.toString(), e);
@ -1010,28 +845,16 @@ public class OvmResourceBase implements ServerResource, HypervisorResource {
protected MigrateAnswer execute(final MigrateCommand cmd) {
final String vmName = cmd.getVmName();
State state = null;
synchronized (_vms) {
state = _vms.get(vmName);
_vms.put(vmName, State.Stopping);
}
try {
OvmVm.Details vm = OvmVm.getDetails(_conn, vmName);
String destIp = cmd.getDestinationIp();
OvmVm.migrate(_conn, vmName, destIp);
state = State.Stopping;
cleanup(vm);
return new MigrateAnswer(cmd, true, "migration succeeded", null);
} catch (Exception e) {
String msg = "Catch Exception " + e.getClass().getName() + ": Migration failed due to " + e.toString();
s_logger.debug(msg, e);
return new MigrateAnswer(cmd, false, msg, null);
} finally {
synchronized (_vms) {
_vms.put(vmName, state);
}
}
}
@ -1040,23 +863,17 @@ public class OvmResourceBase implements ServerResource, HypervisorResource {
try {
Map<String, String> res = OvmVm.register(_conn, vmName);
Integer vncPort = Integer.parseInt(res.get("vncPort"));
HashMap<String, State> states = getAllVms();
State vmState = states.get(vmName);
if (vmState == null) {
HashMap<String, PowerState> states = getAllVms();
PowerState vmPowerState = states.get(vmName);
if (vmPowerState == null) {
s_logger.warn("Check state of " + vmName + " return null in CheckVirtualMachineCommand");
vmState = State.Stopped;
vmPowerState = PowerState.PowerOff;
}
if (vmState == State.Running) {
synchronized (_vms) {
_vms.put(vmName, State.Running);
}
}
return new CheckVirtualMachineAnswer(cmd, vmState, vncPort);
return new CheckVirtualMachineAnswer(cmd, vmPowerState, vncPort);
} catch (Exception e) {
s_logger.debug("Check migration for " + vmName + " failed", e);
return new CheckVirtualMachineAnswer(cmd, State.Stopped, null);
return new CheckVirtualMachineAnswer(cmd, PowerState.PowerOff, null);
}
}

View File

@ -67,7 +67,6 @@ public class AgentRoutingResource extends AgentStorageResource {
private static final Logger s_logger = Logger.getLogger(AgentRoutingResource.class);
private static final Gson s_gson = GsonHelper.getGson();
protected Map<String, State> _vms = new HashMap<String, State>();
private Map<String, Pair<Long, Long>> _runningVms = new HashMap<String, Pair<Long, Long>>();
long usedCpu = 0;
long usedMem = 0;
@ -159,23 +158,12 @@ public class AgentRoutingResource extends AgentStorageResource {
if (isStopped()) {
return null;
}
synchronized (_vms) {
if (_vms.size() == 0) {
//load vms state from database
_vms.putAll(_simMgr.getVmStates(hostGuid));
}
}
final HashMap<String, State> newStates = sync();
HashMap<String, Pair<Long, Long>> nwGrpStates = _simMgr.syncNetworkGroups(hostGuid);
return new PingRoutingWithNwGroupsCommand(getType(), id, newStates, getHostVmStateReport(), nwGrpStates);
return new PingRoutingWithNwGroupsCommand(getType(), id, getHostVmStateReport(), nwGrpStates);
}
@Override
public StartupCommand[] initialize() {
synchronized (_vms) {
_vms.clear();
}
Map<String, State> changes = _simMgr.getVmStates(this.hostGuid);
Map<String, MockVMVO> vmsMaps = _simMgr.getVms(this.hostGuid);
totalCpu = agentHost.getCpuCount() * agentHost.getCpuSpeed();
totalMem = agentHost.getMemorySize();
@ -191,7 +179,6 @@ public class AgentRoutingResource extends AgentStorageResource {
StartupRoutingCommand cmd =
new StartupRoutingCommand((Integer)info.get(0), (Long)info.get(1), (Long)info.get(2), (Long)info.get(4), (String)info.get(3), HypervisorType.Simulator,
RouterPrivateIpStrategy.HostLocal);
cmd.setStateChanges(changes);
Map<String, String> hostDetails = new HashMap<String, String>();
hostDetails.put(RouterPrivateIpStrategy.class.getCanonicalName(), RouterPrivateIpStrategy.DcGlobal.toString());
@ -240,11 +227,6 @@ public class AgentRoutingResource extends AgentStorageResource {
if (this.totalCpu < (vmSpec.getCpus() * vmSpec.getMaxSpeed() + this.usedCpu) || this.totalMem < (vmSpec.getMaxRam() + this.usedMem)) {
return new StartAnswer(cmd, "Not enough resource to start the vm");
}
State state = State.Stopped;
synchronized (_vms) {
_vms.put(vmName, State.Starting);
}
try {
Answer result = _simMgr.simulate(cmd, hostGuid);
if (!result.getResult()) {
@ -254,12 +236,7 @@ public class AgentRoutingResource extends AgentStorageResource {
this.usedCpu += vmSpec.getCpus() * vmSpec.getMaxSpeed();
this.usedMem += vmSpec.getMaxRam();
_runningVms.put(vmName, new Pair<Long, Long>(Long.valueOf(vmSpec.getCpus() * vmSpec.getMaxSpeed()), vmSpec.getMaxRam()));
state = State.Running;
} finally {
synchronized (_vms) {
_vms.put(vmName, state);
}
}
return new StartAnswer(cmd);
@ -271,11 +248,6 @@ public class AgentRoutingResource extends AgentStorageResource {
StopAnswer answer = null;
String vmName = cmd.getVmName();
State state = null;
synchronized (_vms) {
state = _vms.get(vmName);
_vms.put(vmName, State.Stopping);
}
try {
Answer result = _simMgr.simulate(cmd, hostGuid);
@ -289,12 +261,7 @@ public class AgentRoutingResource extends AgentStorageResource {
this.usedCpu -= data.first();
this.usedMem -= data.second();
}
state = State.Stopped;
} finally {
synchronized (_vms) {
_vms.put(vmName, state);
}
}
return answer;
@ -303,12 +270,6 @@ public class AgentRoutingResource extends AgentStorageResource {
protected CheckVirtualMachineAnswer execute(final CheckVirtualMachineCommand cmd) {
final String vmName = cmd.getVmName();
CheckVirtualMachineAnswer result = (CheckVirtualMachineAnswer)_simMgr.simulate(cmd, hostGuid);
State state = result.getState();
if (state == State.Running) {
synchronized (_vms) {
_vms.put(vmName, State.Running);
}
}
return result;
}
@ -346,73 +307,6 @@ public class AgentRoutingResource extends AgentStorageResource {
return report;
}
protected HashMap<String, State> sync() {
Map<String, State> newStates;
Map<String, State> oldStates = null;
HashMap<String, State> changes = new HashMap<String, State>();
synchronized (_vms) {
oldStates = new HashMap<String, State>(_vms.size());
oldStates.putAll(_vms);
newStates = new HashMap<String, State>(_vms.size());
newStates.putAll(_vms);
for (Map.Entry<String, State> entry : newStates.entrySet()) {
String vm = entry.getKey();
State newState = entry.getValue();
State oldState = oldStates.remove(vm);
if (s_logger.isTraceEnabled()) {
s_logger.trace("VM " + vm + ": has state " + newState + " and we have state " + (oldState != null ? oldState.toString() : "null"));
}
if (oldState == null) {
_vms.put(vm, newState);
changes.put(vm, newState);
} else if (oldState == State.Starting) {
if (newState == State.Running) {
_vms.put(vm, newState);
} else if (newState == State.Stopped) {
s_logger.debug("Ignoring vm " + vm + " because of a lag in starting the vm.");
}
} else if (oldState == State.Stopping) {
if (newState == State.Stopped) {
_vms.put(vm, newState);
} else if (newState == State.Running) {
s_logger.debug("Ignoring vm " + vm + " because of a lag in stopping the vm. ");
}
} else if (oldState != newState) {
_vms.put(vm, newState);
changes.put(vm, newState);
}
}
for (Map.Entry<String, State> entry : oldStates.entrySet()) {
String vm = entry.getKey();
State oldState = entry.getValue();
if (s_logger.isTraceEnabled()) {
s_logger.trace("VM " + vm + " is now missing from simulator agent so reporting stopped");
}
if (oldState == State.Stopping) {
s_logger.debug("Ignoring VM " + vm + " in transition state stopping.");
_vms.remove(vm);
} else if (oldState == State.Starting) {
s_logger.debug("Ignoring VM " + vm + " in transition state starting.");
} else if (oldState == State.Stopped) {
_vms.remove(vm);
} else {
changes.put(entry.getKey(), State.Stopped);
}
}
}
return changes;
}
private Answer execute(ShutdownCommand cmd) {
this.stopped = true;
return new Answer(cmd);

View File

@ -269,7 +269,6 @@ import com.cloud.utils.net.NetUtils;
import com.cloud.utils.ssh.SshHelper;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.PowerState;
import com.cloud.vm.VirtualMachine.State;
import com.cloud.vm.VirtualMachineName;
import com.cloud.vm.VmDetailConstants;
@ -333,19 +332,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
s_powerStatesTable.put(VirtualMachinePowerState.SUSPENDED, PowerState.PowerOn);
}
// TODO vmsync {
// deprecated, will delete after full replacement
//
protected static HashMap<VirtualMachinePowerState, State> s_statesTable;
static {
s_statesTable = new HashMap<VirtualMachinePowerState, State>();
s_statesTable.put(VirtualMachinePowerState.POWERED_ON, State.Running);
s_statesTable.put(VirtualMachinePowerState.POWERED_OFF, State.Stopped);
s_statesTable.put(VirtualMachinePowerState.SUSPENDED, State.Stopped);
}
// TODO vmsync }
public Gson getGson() {
return _gson;
}
@ -1314,11 +1300,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
try {
VmwareManager mgr = context.getStockObject(VmwareManager.CONTEXT_STOCK_NAME);
// mark VM as starting state so that sync can know not to report stopped too early
synchronized (_vms) {
_vms.put(vmInternalCSName, State.Starting);
}
VmwareHypervisorHost hyperHost = getHyperHost(context);
DiskTO[] disks = validateDisks(vmSpec.getDisks());
assert (disks.length > 0);
@ -1727,13 +1708,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
s_logger.warn(msg, e);
return new StartAnswer(cmd, msg);
} finally {
synchronized (_vms) {
if (state != State.Stopped) {
_vms.put(vmInternalCSName, state);
} else {
_vms.remove(vmInternalCSName);
}
}
}
}
@ -2570,7 +2544,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
HashMap<String, VmStatsEntry> vmStatsMap = null;
try {
HashMap<String, State> newStates = getVmStates();
HashMap<String, PowerState> vmPowerStates = getVmStates()
// getVmNames should return all i-x-y values.
List<String> requestedVmNames = cmd.getVmNames();
@ -2578,7 +2552,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
if (requestedVmNames != null) {
for (String vmName : requestedVmNames) {
if (newStates.get(vmName) != null) {
if (vmPowerStates.get(vmName) != null) {
vmNames.add(vmName);
}
}
@ -2653,40 +2627,25 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
}
}
State state = null;
synchronized (_vms) {
state = _vms.get(cmd.getVmName());
_vms.put(cmd.getVmName(), State.Stopping);
}
try {
vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_NIC_MASK, "0");
if (getVmPowerState(vmMo) != PowerState.PowerOff) {
if (vmMo.safePowerOff(_shutdownWaitMs)) {
state = State.Stopped;
return new StopAnswer(cmd, "Stop VM " + cmd.getVmName() + " Succeed", true);
} else {
String msg = "Have problem in powering off VM " + cmd.getVmName() + ", let the process continue";
s_logger.warn(msg);
return new StopAnswer(cmd, msg, true);
}
} else {
state = State.Stopped;
}
String msg = "VM " + cmd.getVmName() + " is already in stopped state";
s_logger.info(msg);
return new StopAnswer(cmd, msg, true);
} finally {
synchronized (_vms) {
_vms.put(cmd.getVmName(), state);
}
}
} else {
synchronized (_vms) {
_vms.remove(cmd.getVmName());
}
String msg = "VM " + cmd.getVmName() + " is no longer in vSphere";
s_logger.info(msg);
@ -2773,7 +2732,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
}
final String vmName = cmd.getVmName();
State state = State.Unknown;
PowerState powerState = PowerState.Unknown;
Integer vncPort = null;
VmwareContext context = getServiceContext();
@ -2782,16 +2741,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
try {
VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmName);
if (vmMo != null) {
state = getVmState(vmMo);
if (state == State.Running) {
synchronized (_vms) {
_vms.put(vmName, State.Running);
}
}
return new CheckVirtualMachineAnswer(cmd, state, vncPort);
powerState = getVmPowerState(vmMo);
return new CheckVirtualMachineAnswer(cmd, powerState, vncPort);
} else {
s_logger.warn("Can not find vm " + vmName + " to execute CheckVirtualMachineCommand");
return new CheckVirtualMachineAnswer(cmd, state, vncPort);
return new CheckVirtualMachineAnswer(cmd, powerState, vncPort);
}
} catch (Throwable e) {
@ -2801,7 +2755,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
}
s_logger.error("Unexpected exception: " + VmwareHelper.getExceptionMessage(e), e);
return new CheckVirtualMachineAnswer(cmd, state, vncPort);
return new CheckVirtualMachineAnswer(cmd, powerState, vncPort);
}
}
@ -2875,13 +2829,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
}
final String vmName = cmd.getVmName();
State state = null;
synchronized (_vms) {
state = _vms.get(vmName);
_vms.put(vmName, State.Stopping);
}
try {
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
ManagedObjectReference morDc = hyperHost.getHyperHostDatacenter();
@ -2905,7 +2852,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
throw new Exception("Migration failed");
}
state = State.Stopping;
return new MigrateAnswer(cmd, true, "migration succeeded", null);
} catch (Throwable e) {
if (e instanceof RemoteException) {
@ -2916,10 +2862,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
String msg = "MigrationCommand failed due to " + VmwareHelper.getExceptionMessage(e);
s_logger.warn(msg, e);
return new MigrateAnswer(cmd, false, msg, null);
} finally {
synchronized (_vms) {
_vms.put(vmName, state);
}
}
}
@ -2932,12 +2874,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
VirtualMachineTO vmTo = cmd.getVirtualMachine();
final String vmName = vmTo.getName();
State state = null;
synchronized (_vms) {
state = _vms.get(vmName);
_vms.put(vmName, State.Stopping);
}
VmwareHypervisorHost srcHyperHost = null;
VmwareHypervisorHost tgtHyperHost = null;
VirtualMachineMO vmMo = null;
@ -3082,7 +3018,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
s_logger.debug("Successfully relocated VM " + vmName + " from " + _hostName + " to " + tgtHyperHost.getHyperHostName());
}
state = State.Stopping;
return new MigrateWithStorageAnswer(cmd, volumeToList);
} catch (Throwable e) {
if (e instanceof RemoteException) {
@ -3105,9 +3040,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
}
s_logger.debug("Successfully unmounted datastore " + mountedDatastore + " at " + _hostName);
}
synchronized (_vms) {
_vms.put(vmName, state);
}
}
}
@ -3922,12 +3854,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
@Override
public PingCommand getCurrentStatus(long id) {
gcAndKillHungWorkerVMs();
HashMap<String, State> newStates = sync();
if (newStates == null) {
return null;
}
VmwareContext context = getServiceContext();
VmwareHypervisorHost hyperHost = getHyperHost(context);
try {
@ -3938,7 +3864,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
s_logger.error("Unexpected exception", e);
return null;
}
return new PingRoutingCommand(getType(), id, newStates, syncHostVmStates());
return new PingRoutingCommand(getType(), id, syncHostVmStates());
}
private void gcAndKillHungWorkerVMs() {
@ -4043,22 +3969,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
StartupRoutingCommand cmd = new StartupRoutingCommand();
fillHostInfo(cmd);
Map<String, State> changes = null;
synchronized (_vms) {
_vms.clear();
changes = sync();
}
cmd.setHypervisorType(HypervisorType.VMware);
// TODO vmsync {
// deprecated after full replacement
cmd.setStateChanges(changes);
// TODO vmsync}
cmd.setHostVmStateReport(syncHostVmStates());
cmd.setCluster(_cluster);
cmd.setHypervisorVersion(hostApiVersion);
@ -4246,114 +4157,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
}
}
protected HashMap<String, State> sync() {
HashMap<String, State> changes = new HashMap<String, State>();
HashMap<String, State> oldStates = null;
try {
synchronized (_vms) {
HashMap<String, State> newStates = getVmStates();
oldStates = new HashMap<String, State>(_vms.size());
oldStates.putAll(_vms);
for (final Map.Entry<String, State> entry : newStates.entrySet()) {
final String vm = entry.getKey();
State newState = entry.getValue();
final State oldState = oldStates.remove(vm);
if (s_logger.isTraceEnabled()) {
s_logger.trace("VM " + vm + ": vSphere has state " + newState + " and we have state " + (oldState != null ? oldState.toString() : "null"));
}
if (vm.startsWith("migrating")) {
s_logger.debug("Migrating detected. Skipping");
continue;
}
if (oldState == null) {
_vms.put(vm, newState);
s_logger.debug("Detecting a new state but couldn't find a old state so adding it to the changes: " + vm);
changes.put(vm, newState);
} else if (oldState == State.Starting) {
if (newState == State.Running) {
_vms.put(vm, newState);
} else if (newState == State.Stopped) {
s_logger.debug("Ignoring vm " + vm + " because of a lag in starting the vm.");
}
} else if (oldState == State.Migrating) {
if (newState == State.Running) {
s_logger.debug("Detected that an migrating VM is now running: " + vm);
_vms.put(vm, newState);
}
} else if (oldState == State.Stopping) {
if (newState == State.Stopped) {
_vms.put(vm, newState);
} else if (newState == State.Running) {
s_logger.debug("Ignoring vm " + vm + " because of a lag in stopping the vm. ");
}
} else if (oldState != newState) {
_vms.put(vm, newState);
if (newState == State.Stopped) {
/*
* if (_vmsKilled.remove(vm)) { s_logger.debug("VM " + vm + " has been killed for storage. ");
* newState = State.Error; }
*/
}
changes.put(vm, newState);
}
}
for (final Map.Entry<String, State> entry : oldStates.entrySet()) {
final String vm = entry.getKey();
final State oldState = entry.getValue();
if (isVmInCluster(vm)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("VM " + vm + " is now missing from host report but we detected that it might be migrated to other host by vCenter");
}
if (oldState != State.Starting && oldState != State.Migrating) {
s_logger.debug("VM " + vm +
" is now missing from host report and VM is not at starting/migrating state, remove it from host VM-sync map, oldState: " + oldState);
_vms.remove(vm);
} else {
s_logger.debug("VM " + vm + " is missing from host report, but we will ignore VM " + vm + " in transition state " + oldState);
}
continue;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("VM " + vm + " is now missing from host report");
}
if (oldState == State.Stopping) {
s_logger.debug("Ignoring VM " + vm + " in transition state stopping.");
_vms.remove(vm);
} else if (oldState == State.Starting) {
s_logger.debug("Ignoring VM " + vm + " in transition state starting.");
} else if (oldState == State.Stopped) {
_vms.remove(vm);
} else if (oldState == State.Migrating) {
s_logger.debug("Ignoring VM " + vm + " in migrating state.");
} else {
State state = State.Stopped;
changes.put(entry.getKey(), state);
}
}
}
} catch (Throwable e) {
if (e instanceof RemoteException) {
s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context");
invalidateServiceContext();
}
s_logger.error("Unable to perform sync information collection process at this point due to " + VmwareHelper.getExceptionMessage(e), e);
return null;
}
return changes;
}
private boolean isVmInCluster(String vmName) throws Exception {
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
@ -4515,8 +4318,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
return newStates;
}
// TODO vmsync {
private HashMap<String, State> getVmStates() throws Exception {
private HashMap<String, PowerState> getVmStates() throws Exception {
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
int key = ((HostMO)hyperHost).getCustomFieldKey("VirtualMachine", CustomFieldConstants.CLOUD_VM_INTERNAL_NAME);
@ -4560,7 +4363,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
name = VMInternalCSName;
if (!isTemplate) {
newStates.put(name, convertState(powerState));
newStates.put(name, convertPowerState(powerState));
}
}
}
@ -4568,6 +4371,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
return newStates;
}
private HashMap<String, VmStatsEntry> getVmStats(List<String> vmNames) throws Exception {
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
HashMap<String, VmStatsEntry> vmResponseMap = new HashMap<String, VmStatsEntry>();
@ -4690,7 +4495,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
return vmResponseMap;
}
// TODO vmsync }
protected String networkUsage(final String privateIpAddress, final String option, final String ethName) {
String args = null;
@ -4788,19 +4592,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
return connect(vmname, ipAddress, 3922);
}
// TODO vmsync {
// deprecated after full replacement
private static State convertState(VirtualMachinePowerState powerState) {
return s_statesTable.get(powerState);
}
public static State getVmState(VirtualMachineMO vmMo) throws Exception {
VirtualMachineRuntimeInfo runtimeInfo = vmMo.getRuntimeInfo();
return convertState(runtimeInfo.getPowerState());
}
// TODO vmsync }
private static PowerState convertPowerState(VirtualMachinePowerState powerState) {
return s_powerStatesTable.get(powerState);
}

View File

@ -244,7 +244,6 @@ import com.cloud.utils.ssh.SshHelper;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.PowerState;
import com.cloud.vm.VirtualMachine.State;
import com.cloud.vm.snapshot.VMSnapshot;
/**
@ -271,7 +270,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
protected long _dcId;
protected String _pod;
protected String _cluster;
protected static final XenServerPoolVms s_vms = new XenServerPoolVms();
protected String _privateNetworkName;
protected String _linkLocalPrivateNetworkName;
protected String _publicNetworkName;
@ -328,29 +326,16 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
}
}
protected static final HashMap<Types.VmPowerState, PowerState> s_powerStatesTable;
protected static final HashMap<VmPowerState, PowerState> s_powerStatesTable;
static {
s_powerStatesTable = new HashMap<Types.VmPowerState, PowerState>();
s_powerStatesTable.put(Types.VmPowerState.HALTED, PowerState.PowerOff);
s_powerStatesTable.put(Types.VmPowerState.PAUSED, PowerState.PowerOff);
s_powerStatesTable.put(Types.VmPowerState.RUNNING, PowerState.PowerOn);
s_powerStatesTable.put(Types.VmPowerState.SUSPENDED, PowerState.PowerOff);
s_powerStatesTable.put(Types.VmPowerState.UNRECOGNIZED, PowerState.PowerUnknown);
s_powerStatesTable = new HashMap<VmPowerState, PowerState>();
s_powerStatesTable.put(VmPowerState.HALTED, PowerState.PowerOff);
s_powerStatesTable.put(VmPowerState.PAUSED, PowerState.PowerOff);
s_powerStatesTable.put(VmPowerState.RUNNING, PowerState.PowerOn);
s_powerStatesTable.put(VmPowerState.SUSPENDED, PowerState.PowerOff);
s_powerStatesTable.put(VmPowerState.UNRECOGNIZED, PowerState.PowerUnknown);
}
// TODO vmsync {
protected static final HashMap<Types.VmPowerState, State> s_statesTable;
static {
s_statesTable = new HashMap<Types.VmPowerState, State>();
s_statesTable.put(Types.VmPowerState.HALTED, State.Stopped);
s_statesTable.put(Types.VmPowerState.PAUSED, State.Running);
s_statesTable.put(Types.VmPowerState.RUNNING, State.Running);
s_statesTable.put(Types.VmPowerState.SUSPENDED, State.Running);
s_statesTable.put(Types.VmPowerState.UNRECOGNIZED, State.Unknown);
}
// TODO vmsync }
public XsHost getHost() {
return _host;
}
@ -790,13 +775,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
VMSnapshot.Type vmSnapshotType = cmd.getTarget().getType();
Boolean snapshotMemory = vmSnapshotType == VMSnapshot.Type.DiskAndMemory;
Connection conn = getConnection();
VirtualMachine.State vmState = null;
PowerState vmState = null;
VM vm = null;
try {
// remove vm from s_vms, for delta sync
s_vms.remove(_cluster, _name, vmName);
Set<VM> vmSnapshots = VM.getByNameLabel(conn, cmd.getTarget().getSnapshotName());
if (vmSnapshots.size() == 0)
return new RevertToVMSnapshotAnswer(cmd, false, "Cannot find vmSnapshot with name: " + cmd.getTarget().getSnapshotName());
@ -830,10 +812,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
if (!snapshotMemory) {
vm.destroy(conn);
vmState = VirtualMachine.State.Stopped;
vmState = PowerState.PowerOff;
} else {
s_vms.put(_cluster, _name, vmName, State.Running);
vmState = VirtualMachine.State.Running;
vmState = PowerState.PowerOn;
}
// after revert, VM's volumes path have been changed, need to report to manager
@ -1710,7 +1691,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
Connection conn = getConnection();
VirtualMachineTO vmSpec = cmd.getVirtualMachine();
String vmName = vmSpec.getName();
State state = State.Stopped;
VmPowerState state = VmPowerState.HALTED;
VM vm = null;
// if a VDI is created, record its UUID to send back to the CS MS
Map<String, String> iqnToPath = new HashMap<String, String>();
@ -1733,9 +1714,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
}
}
}
synchronized (_cluster.intern()) {
s_vms.put(_cluster, _name, vmName, State.Starting);
}
s_logger.debug("1. The VM " + vmName + " is in Starting state.");
Host host = Host.getByUuid(conn, _host.uuid);
@ -1839,7 +1817,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
}
}
state = State.Running;
state = VmPowerState.RUNNING;
StartAnswer startAnswer = new StartAnswer(cmd);
@ -1856,14 +1834,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
return startAnswer;
} finally {
synchronized (_cluster.intern()) {
if (state != State.Stopped) {
s_vms.put(_cluster, _name, vmName, state);
s_logger.debug("2. The VM " + vmName + " is in " + state + " state.");
} else {
s_vms.remove(_cluster, _name, vmName);
s_logger.debug("The VM is in stopped state, detected problem during startup : " + vmName);
}
if (state != VmPowerState.HALTED) {
s_logger.debug("2. The VM " + vmName + " is in " + state + " state.");
} else {
s_logger.debug("The VM is in stopped state, detected problem during startup : " + vmName);
}
}
}
@ -2575,13 +2549,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
}
protected State convertToState(Types.VmPowerState ps) {
final State state = s_statesTable.get(ps);
return state == null ? State.Unknown : state;
}
private static PowerState convertPowerState(Types.VmPowerState powerState) {
return s_powerStatesTable.get(powerState);
private static PowerState convertToPowerState(VmPowerState ps) {
final PowerState powerState = s_powerStatesTable.get(ps);
return powerState == null ? PowerState.PowerUnknown : powerState;
}
protected HashMap<String, HostVmStateReportEntry> getHostVmStateReport(Connection conn) {
@ -2629,7 +2599,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
if (host_uuid.equalsIgnoreCase(_host.uuid)) {
vmStates.put(
record.nameLabel,
new HostVmStateReportEntry(convertPowerState(ps), host_uuid)
new HostVmStateReportEntry(convertToPowerState(ps), host_uuid)
);
}
}
@ -2638,65 +2608,13 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
return vmStates;
}
// TODO vmsync {
protected HashMap<String, Pair<String, State>> getAllVms(Connection conn) {
final HashMap<String, Pair<String, State>> vmStates = new HashMap<String, Pair<String, State>>();
Map<VM, VM.Record> vm_map = null;
for (int i = 0; i < 2; i++) {
try {
vm_map = VM.getAllRecords(conn); //USE THIS TO GET ALL VMS FROM A CLUSTER
break;
} catch (final Throwable e) {
s_logger.warn("Unable to get vms", e);
}
try {
Thread.sleep(1000);
} catch (final InterruptedException ex) {
}
}
if (vm_map == null) {
return null;
}
for (VM.Record record : vm_map.values()) {
if (record.isControlDomain || record.isASnapshot || record.isATemplate) {
continue; // Skip DOM0
}
VmPowerState ps = record.powerState;
final State state = convertToState(ps);
if (s_logger.isTraceEnabled()) {
s_logger.trace("VM " + record.nameLabel + ": powerstate = " + ps + "; vm state=" + state.toString());
}
Host host = record.residentOn;
String host_uuid = null;
if (!isRefNull(host)) {
try {
host_uuid = host.getUuid(conn);
} catch (BadServerResponse e) {
s_logger.error("Failed to get host uuid for host " + host.toWireString(), e);
} catch (XenAPIException e) {
s_logger.error("Failed to get host uuid for host " + host.toWireString(), e);
} catch (XmlRpcException e) {
s_logger.error("Failed to get host uuid for host " + host.toWireString(), e);
}
vmStates.put(record.nameLabel, new Pair<String, State>(host_uuid, state));
}
}
return vmStates;
}
// TODO vmsync }
protected State getVmState(Connection conn, final String vmName) {
protected PowerState getVmState(Connection conn, final String vmName) {
int retry = 3;
while (retry-- > 0) {
try {
Set<VM> vms = VM.getByNameLabel(conn, vmName);
for (final VM vm : vms) {
return convertToState(vm.getPowerState(conn));
return convertToPowerState(vm.getPowerState(conn));
}
} catch (final BadServerResponse e) {
// There is a race condition within xenserver such that if a vm is
@ -2727,22 +2645,19 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
}
}
return State.Stopped;
return PowerState.PowerOff;
}
protected CheckVirtualMachineAnswer execute(final CheckVirtualMachineCommand cmd) {
Connection conn = getConnection();
final String vmName = cmd.getVmName();
final State state = getVmState(conn, vmName);
final PowerState powerState = getVmState(conn, vmName);
Integer vncPort = null;
if (state == State.Running) {
synchronized (_cluster.intern()) {
s_vms.put(_cluster, _name, vmName, State.Running);
}
if (powerState == PowerState.PowerOn) {
s_logger.debug("3. The VM " + vmName + " is in Running state");
}
return new CheckVirtualMachineAnswer(cmd, state, vncPort);
return new CheckVirtualMachineAnswer(cmd, powerState, vncPort);
}
protected PrepareForMigrationAnswer execute(PrepareForMigrationCommand cmd) {
@ -2760,9 +2675,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
for (NicTO nic : nics) {
getNetwork(conn, nic);
}
synchronized (_cluster.intern()) {
s_vms.put(_cluster, _name, vm.getName(), State.Migrating);
}
s_logger.debug("4. The VM " + vm.getName() + " is in Migrating state");
return new PrepareForMigrationAnswer(cmd);
@ -3000,14 +2912,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
protected MigrateAnswer execute(final MigrateCommand cmd) {
Connection conn = getConnection();
final String vmName = cmd.getVmName();
State state = null;
state = s_vms.getState(_cluster, vmName);
synchronized (_cluster.intern()) {
s_vms.put(_cluster, _name, vmName, State.Stopping);
}
s_logger.debug("5. The VM " + vmName + " is in Stopping state");
try {
Set<VM> vms = VM.getByNameLabel(conn, vmName);
@ -3037,54 +2942,16 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
}
migrateVM(conn, dsthost, vm, vmName);
vm.setAffinity(conn, dsthost);
state = State.Stopping;
}
return new MigrateAnswer(cmd, true, "migration succeeded", null);
} catch (Exception e) {
String msg = "Catch Exception " + e.getClass().getName() + ": Migration failed due to " + e.toString();
s_logger.warn(msg, e);
return new MigrateAnswer(cmd, false, msg, null);
} finally {
synchronized (_cluster.intern()) {
s_vms.put(_cluster, _name, vmName, state);
}
s_logger.debug("6. The VM " + vmName + " is in " + state + " state");
}
}
protected State getRealPowerState(Connection conn, String label) {
int i = 0;
s_logger.trace("Checking on the HALTED State");
for (; i < 20; i++) {
try {
Set<VM> vms = VM.getByNameLabel(conn, label);
if (vms == null || vms.size() == 0) {
continue;
}
VM vm = vms.iterator().next();
VmPowerState vps = vm.getPowerState(conn);
if (vps != null && vps != VmPowerState.HALTED && vps != VmPowerState.UNRECOGNIZED) {
return convertToState(vps);
}
} catch (XenAPIException e) {
String msg = "Unable to get real power state due to " + e.toString();
s_logger.warn(msg, e);
} catch (XmlRpcException e) {
String msg = "Unable to get real power state due to " + e.getMessage();
s_logger.warn(msg, e);
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
}
return State.Stopped;
}
protected Pair<VM, VM.Record> getControlDomain(Connection conn) throws XenAPIException, XmlRpcException {
Host host = Host.getByUuid(conn, _host.uuid);
Set<VM> vms = null;
@ -3172,9 +3039,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
@Override
public RebootAnswer execute(RebootCommand cmd) {
Connection conn = getConnection();
synchronized (_cluster.intern()) {
s_vms.put(_cluster, _name, cmd.getVmName(), State.Starting);
}
s_logger.debug("7. The VM " + cmd.getVmName() + " is in Starting state");
try {
Set<VM> vms = null;
@ -3198,9 +3062,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
}
return new RebootAnswer(cmd, "reboot succeeded", true);
} finally {
synchronized (_cluster.intern()) {
s_vms.put(_cluster, _name, cmd.getVmName(), State.Running);
}
s_logger.debug("8. The VM " + cmd.getVmName() + " is in Running state");
}
}
@ -3342,7 +3203,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
waitForTask(conn, task, 1000, 10 * 60 * 1000);
checkForSuccess(conn, task);
} catch (Types.HandleInvalid e) {
if (vm.getPowerState(conn) == Types.VmPowerState.RUNNING) {
if (vm.getPowerState(conn) == VmPowerState.RUNNING) {
task = null;
return;
}
@ -3390,7 +3251,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
waitForTask(conn, task, 1000, 10 * 60 * 1000);
checkForSuccess(conn, task);
} catch (TimeoutException e) {
if (vm.getPowerState(conn) == Types.VmPowerState.HALTED) {
if (vm.getPowerState(conn) == VmPowerState.HALTED) {
task = null;
return;
}
@ -3399,19 +3260,19 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
} catch (XenAPIException e) {
s_logger.debug("Unable to cleanShutdown VM(" + vmName + ") on host(" + _host.uuid + ") due to " + e.toString());
try {
Types.VmPowerState state = vm.getPowerState(conn);
if (state == Types.VmPowerState.RUNNING) {
VmPowerState state = vm.getPowerState(conn);
if (state == VmPowerState.RUNNING) {
try {
vm.hardShutdown(conn);
} catch (Exception e1) {
s_logger.debug("Unable to hardShutdown VM(" + vmName + ") on host(" + _host.uuid + ") due to " + e.toString());
state = vm.getPowerState(conn);
if (state == Types.VmPowerState.RUNNING) {
if (state == VmPowerState.RUNNING) {
forceShutdownVM(conn, vm);
}
return;
}
} else if (state == Types.VmPowerState.HALTED) {
} else if (state == VmPowerState.HALTED) {
return;
} else {
String msg = "After cleanShutdown the VM status is " + state.toString() + ", that is not expected";
@ -3443,14 +3304,14 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
waitForTask(conn, task, 1000, 10 * 60 * 1000);
checkForSuccess(conn, task);
} catch (Types.HandleInvalid e) {
if (vm.getPowerState(conn) == Types.VmPowerState.RUNNING) {
if (vm.getPowerState(conn) == VmPowerState.RUNNING) {
s_logger.debug("VM " + vmName + " is in Running status");
task = null;
return;
}
throw new CloudRuntimeException("Start VM " + vmName + " catch HandleInvalid and VM is not in RUNNING state");
} catch (TimeoutException e) {
if (vm.getPowerState(conn) == Types.VmPowerState.RUNNING) {
if (vm.getPowerState(conn) == VmPowerState.RUNNING) {
s_logger.debug("VM " + vmName + " is in Running status");
task = null;
return;
@ -3593,10 +3454,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
}
if (vms.size() == 0) {
synchronized (_cluster.intern()) {
s_logger.info("VM does not exist on XenServer" + _host.uuid);
s_vms.remove(_cluster, _name, vmName);
}
return new StopAnswer(cmd, "VM does not exist", true);
}
for (VM vm : vms) {
@ -3620,11 +3477,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
return new StopAnswer(cmd, msg, false);
}
State state = s_vms.getState(_cluster, vmName);
synchronized (_cluster.intern()) {
s_vms.put(_cluster, _name, vmName, State.Stopping);
}
s_logger.debug("9. The VM " + vmName + " is in Stopping state");
try {
@ -3668,7 +3520,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
networks.add(vif.getNetwork(conn));
}
vm.destroy(conn);
state = State.Stopped;
SR sr = getISOSRbyVmName(conn, cmd.getVmName());
removeSR(conn, sr);
// Disable any VLAN networks that aren't used
@ -3688,10 +3539,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
String msg = "VM destroy failed in Stop " + vmName + " Command due to " + e.getMessage();
s_logger.warn(msg, e);
} finally {
synchronized (_cluster.intern()) {
s_vms.put(_cluster, _name, vmName, state);
}
s_logger.debug("10. The VM " + vmName + " is in " + state + " state");
s_logger.debug("10. The VM " + vmName + " is in Stopped state");
}
}
}
@ -4317,13 +4165,13 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
}
Connection conn = getConnection();
if (!_canBridgeFirewall && !_isOvs) {
return new PingRoutingCommand(getType(), id, null, getHostVmStateReport(conn));
return new PingRoutingCommand(getType(), id, getHostVmStateReport(conn));
} else if (_isOvs) {
List<Pair<String, Long>> ovsStates = ovsFullSyncStates();
return new PingRoutingWithOvsCommand(getType(), id, null, getHostVmStateReport(conn), ovsStates);
return new PingRoutingWithOvsCommand(getType(), id, getHostVmStateReport(conn), ovsStates);
} else {
HashMap<String, Pair<Long, Long>> nwGrpStates = syncNetworkGroups(conn, id);
return new PingRoutingWithNwGroupsCommand(getType(), id, null, getHostVmStateReport(conn), nwGrpStates);
return new PingRoutingWithNwGroupsCommand(getType(), id, getHostVmStateReport(conn), nwGrpStates);
}
} catch (Exception e) {
s_logger.warn("Unable to get current status", e);
@ -4581,7 +4429,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
cmd.setHypervisorType(HypervisorType.XenServer);
cmd.setCluster(_cluster);
cmd.setPoolSync(false);
cmd.setHostVmStateReport(getHostVmStateReport(conn));
Pool pool;
try {
@ -4589,10 +4436,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
Pool.Record poolr = pool.getRecord(conn);
Host.Record hostr = poolr.master.getRecord(conn);
if (_host.uuid.equals(hostr.uuid)) {
HashMap<String, Pair<String, State>> allStates = fullClusterSync(conn);
cmd.setClusterVMStateChanges(allStates);
}
} catch (Throwable e) {
s_logger.warn("Check for master failed, failing the FULL Cluster sync command");
}
@ -6411,7 +6254,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
String vmName = cmd.getVmName();
String vmSnapshotName = cmd.getTarget().getSnapshotName();
List<VolumeObjectTO> listVolumeTo = cmd.getVolumeTOs();
VirtualMachine.State vmState = cmd.getVmState();
VmPowerState vmState = VmPowerState.HALTED;
String guestOSType = cmd.getGuestOSType();
String platformEmulator = cmd.getPlatformEmulator();
@ -6446,6 +6289,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
if (task == null) {
try {
vm = getVM(conn, vmName);
vmState = vm.getPowerState(conn);
} catch (Exception e) {
if (!snapshotMemory) {
vm = createWorkingVM(conn, vmName, guestOSType, platformEmulator, listVolumeTo);
@ -6525,7 +6369,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
vmSnapshot.destroy(conn);
}
}
if (vmState == VirtualMachine.State.Stopped) {
if (vmState == VmPowerState.HALTED) {
if (vm != null) {
vm.destroy(conn);
}
@ -7193,146 +7037,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
return vmMetaDatum;
}
protected HashMap<String, Pair<String, State>> fullClusterSync(Connection conn) {
synchronized (_cluster.intern()) {
s_vms.clear(_cluster);
}
try {
Map<VM, VM.Record> vm_map = VM.getAllRecords(conn); //USE THIS TO GET ALL VMS FROM A CLUSTER
for (VM.Record record : vm_map.values()) {
if (record.isControlDomain || record.isASnapshot || record.isATemplate) {
continue; // Skip DOM0
}
String vm_name = record.nameLabel;
VmPowerState ps = record.powerState;
final State state = convertToState(ps);
Host host = record.residentOn;
String host_uuid = null;
if (!isRefNull(host)) {
host_uuid = host.getUuid(conn);
synchronized (_cluster.intern()) {
s_vms.put(_cluster, host_uuid, vm_name, state);
}
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("VM " + vm_name + ": powerstate = " + ps + "; vm state=" + state.toString());
}
}
} catch (final Throwable e) {
String msg = "Unable to get vms through host " + _host.uuid + " due to to " + e.toString();
s_logger.warn(msg, e);
throw new CloudRuntimeException(msg);
}
return s_vms.getClusterVmState(_cluster);
}
protected HashMap<String, Pair<String, State>> deltaClusterSync(Connection conn) {
final HashMap<String, Pair<String, State>> changes = new HashMap<String, Pair<String, State>>();
synchronized (_cluster.intern()) {
HashMap<String, Pair<String, State>> newStates = getAllVms(conn);
if (newStates == null) {
s_logger.warn("Unable to get the vm states so no state sync at this point.");
return null;
}
HashMap<String, Pair<String, State>> oldStates = new HashMap<String, Pair<String, State>>(s_vms.size(_cluster));
oldStates.putAll(s_vms.getClusterVmState(_cluster));
for (final Map.Entry<String, Pair<String, State>> entry : newStates.entrySet()) {
final String vm = entry.getKey();
State newState = entry.getValue().second();
String host_uuid = entry.getValue().first();
final Pair<String, State> oldState = oldStates.remove(vm);
//check if host is changed
if (host_uuid != null && oldState != null) {
if (!host_uuid.equals(oldState.first()) && newState != State.Stopped && newState != State.Stopping) {
s_logger.warn("Detecting a change in host for " + vm);
changes.put(vm, new Pair<String, State>(host_uuid, newState));
s_logger.debug("11. The VM " + vm + " is in " + newState + " state");
s_vms.put(_cluster, host_uuid, vm, newState);
continue;
}
}
if (newState == State.Stopped && oldState != null && oldState.second() != State.Stopping && oldState.second() != State.Stopped) {
newState = getRealPowerState(conn, vm);
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("VM " + vm + ": xenserver has state " + newState + " and we have state " + (oldState != null ? oldState.toString() : "null"));
}
if (vm.startsWith("migrating")) {
s_logger.warn("Migrating from xenserver detected. Skipping");
continue;
}
if (oldState == null) {
s_vms.put(_cluster, host_uuid, vm, newState);
s_logger.warn("Detecting a new state but couldn't find a old state so adding it to the changes: " + vm);
changes.put(vm, new Pair<String, State>(host_uuid, newState));
} else if (oldState.second() == State.Starting) {
if (newState == State.Running) {
s_logger.debug("12. The VM " + vm + " is in " + State.Running + " state");
s_vms.put(_cluster, host_uuid, vm, newState);
} else if (newState == State.Stopped) {
s_logger.warn("Ignoring vm " + vm + " because of a lag in starting the vm.");
}
} else if (oldState.second() == State.Migrating) {
if (newState == State.Running) {
s_logger.debug("Detected that an migrating VM is now running: " + vm);
s_vms.put(_cluster, host_uuid, vm, newState);
}
} else if (oldState.second() == State.Stopping) {
if (newState == State.Stopped) {
s_logger.debug("13. The VM " + vm + " is in " + State.Stopped + " state");
s_vms.put(_cluster, host_uuid, vm, newState);
} else if (newState == State.Running) {
s_logger.warn("Ignoring vm " + vm + " because of a lag in stopping the vm. ");
}
} else if (oldState.second() != newState) {
s_logger.debug("14. The VM " + vm + " is in " + newState + " state was " + oldState.second());
s_vms.put(_cluster, host_uuid, vm, newState);
if (newState == State.Stopped) {
/*
* if (s_vmsKilled.remove(vm)) { s_logger.debug("VM " + vm + " has been killed for storage. ");
* newState = State.Error; }
*/
}
changes.put(vm, new Pair<String, State>(host_uuid, newState));
}
}
for (final Map.Entry<String, Pair<String, State>> entry : oldStates.entrySet()) {
final String vm = entry.getKey();
final State oldState = entry.getValue().second();
String host_uuid = entry.getValue().first();
if (s_logger.isTraceEnabled()) {
s_logger.trace("VM " + vm + " is now missing from xenserver so reporting stopped");
}
if (oldState == State.Stopping) {
s_logger.warn("Ignoring VM " + vm + " in transition state stopping.");
s_vms.remove(_cluster, host_uuid, vm);
} else if (oldState == State.Starting) {
s_logger.warn("Ignoring VM " + vm + " in transition state starting.");
} else if (oldState == State.Stopped) {
s_logger.debug("VM missing " + vm + " old state stopped so removing.");
s_vms.remove(_cluster, host_uuid, vm);
} else if (oldState == State.Migrating) {
s_logger.warn("Ignoring VM " + vm + " in migrating state.");
} else {
State newState = State.Stopped;
s_logger.warn("The VM is now missing marking it as Stopped " + vm);
changes.put(vm, new Pair<String, State>(host_uuid, newState));
}
}
}
return changes;
}
/**
* @param cmd
* @return

View File

@ -101,9 +101,6 @@ public class XenServer56FP1Resource extends XenServer56Resource {
vdis.add(vdi);
}
}
synchronized (_cluster.intern()) {
s_vms.remove(_cluster, _name, vm.getNameLabel(conn));
}
s_logger.info("Fence command for VM " + cmd.getVmName());
vm.powerStateReset(conn);
vm.destroy(conn);

View File

@ -261,9 +261,6 @@ public class XenServer56Resource extends CitrixResourceBase {
}
Set<VM> vms = VM.getByNameLabel(conn, cmd.getVmName());
for (VM vm : vms) {
synchronized (_cluster.intern()) {
s_vms.remove(_cluster, _name, vm.getNameLabel(conn));
}
s_logger.info("Fence command for VM " + cmd.getVmName());
vm.powerStateReset(conn);
vm.destroy(conn);

View File

@ -62,7 +62,6 @@ import com.cloud.network.Networks.TrafficType;
import com.cloud.resource.ServerResource;
import com.cloud.storage.Volume;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VirtualMachine.State;
@Local(value = ServerResource.class)
public class XenServer610Resource extends XenServer602Resource {
@ -143,13 +142,8 @@ public class XenServer610Resource extends XenServer602Resource {
VirtualMachineTO vmSpec = cmd.getVirtualMachine();
Map<VolumeTO, StorageFilerTO> volumeToFiler = cmd.getVolumeToFiler();
final String vmName = vmSpec.getName();
State state = s_vms.getState(_cluster, vmName);
Task task = null;
synchronized (_cluster.intern()) {
s_vms.put(_cluster, _name, vmName, State.Stopping);
}
try {
prepareISO(connection, vmSpec.getName());
Map<String, String> other = new HashMap<String, String>();
@ -196,8 +190,6 @@ public class XenServer610Resource extends XenServer602Resource {
// Volume paths would have changed. Return that information.
List<VolumeObjectTO> volumeToList = getUpdatedVolumePathsOfMigratedVm(connection, vmToMigrate, vmSpec.getDisks());
vmToMigrate.setAffinity(connection, host);
state = State.Stopping;
return new MigrateWithStorageAnswer(cmd, volumeToList);
} catch (Exception e) {
s_logger.warn("Catch Exception " + e.getClass().getName() + ". Storage motion failed due to " + e.toString(), e);
@ -210,10 +202,6 @@ public class XenServer610Resource extends XenServer602Resource {
s_logger.debug("Unable to destroy task " + task.toString() + " on host " + _host.uuid + " due to " + e.toString());
}
}
synchronized (_cluster.intern()) {
s_vms.put(_cluster, _name, vmName, state);
}
}
}
@ -260,15 +248,9 @@ public class XenServer610Resource extends XenServer602Resource {
Map<NicTO, Object> nicToNetwork = cmd.getNicToNetwork();
Map<String, String> token = cmd.getToken();
final String vmName = vmSpec.getName();
State state = s_vms.getState(_cluster, vmName);
Set<VolumeTO> volumeToSet = null;
boolean migrated = false;
Task task = null;
synchronized (_cluster.intern()) {
s_vms.put(_cluster, _name, vmName, State.Stopping);
}
try {
Set<VM> vms = VM.getByNameLabel(connection, vmSpec.getName());
VM vmToMigrate = vms.iterator().next();
@ -339,15 +321,6 @@ public class XenServer610Resource extends XenServer602Resource {
s_logger.debug("Unable to destroy task " + task.toString() + " on host " + _host.uuid + " due to " + e.toString());
}
}
// Keep cluster/vm sync happy.
synchronized (_cluster.intern()) {
if (migrated) {
s_vms.remove(_cluster, _name, vmName);
} else {
s_vms.put(_cluster, _name, vmName, state);
}
}
}
}
@ -369,10 +342,6 @@ public class XenServer610Resource extends XenServer602Resource {
List<VolumeObjectTO> volumeToSet = getUpdatedVolumePathsOfMigratedVm(connection, migratedVm, vmSpec.getDisks());
migratedVm.setAffinity(connection, host);
synchronized (_cluster.intern()) {
s_vms.put(_cluster, _name, vmSpec.getName(), State.Running);
}
return new MigrateWithStorageCompleteAnswer(cmd, volumeToSet);
} catch (CloudRuntimeException e) {
s_logger.error("Migration of vm " + vmSpec.getName() + " with storage failed due to " + e.toString(), e);

View File

@ -1,92 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.xenserver.resource;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.log4j.Logger;
import com.cloud.utils.Pair;
import com.cloud.vm.VirtualMachine.State;
public class XenServerPoolVms {
private static final Logger s_logger = Logger.getLogger(XenServerPoolVms.class);
private final Map<String/* clusterId */, HashMap<String/* vm name */, Pair<String/* host uuid */, State/* vm state */>>> _clusterVms =
new ConcurrentHashMap<String, HashMap<String, Pair<String, State>>>();
public HashMap<String, Pair<String, State>> getClusterVmState(String clusterId) {
HashMap<String, Pair<String, State>> _vms = _clusterVms.get(clusterId);
if (_vms == null) {
HashMap<String, Pair<String, State>> vmStates = new HashMap<String, Pair<String, State>>();
_clusterVms.put(clusterId, vmStates);
return vmStates;
} else
return _vms;
}
public void clear(String clusterId) {
HashMap<String, Pair<String, State>> _vms = getClusterVmState(clusterId);
_vms.clear();
}
public State getState(String clusterId, String name) {
HashMap<String, Pair<String, State>> vms = getClusterVmState(clusterId);
Pair<String, State> pv = vms.get(name);
return pv == null ? State.Stopped : pv.second(); // if a VM is absent on the cluster, it is effectively in stopped state.
}
public Pair<String, State> get(String clusterId, String name) {
HashMap<String, Pair<String, State>> vms = getClusterVmState(clusterId);
return vms.get(name);
}
public void put(String clusterId, String hostUuid, String name, State state) {
HashMap<String, Pair<String, State>> vms = getClusterVmState(clusterId);
vms.put(name, new Pair<String, State>(hostUuid, state));
}
public void remove(String clusterId, String hostUuid, String name) {
HashMap<String, Pair<String, State>> vms = getClusterVmState(clusterId);
vms.remove(name);
}
public void putAll(String clusterId, HashMap<String, Pair<String, State>> newVms) {
HashMap<String, Pair<String, State>> vms = getClusterVmState(clusterId);
vms.putAll(newVms);
}
public int size(String clusterId) {
HashMap<String, Pair<String, State>> vms = getClusterVmState(clusterId);
return vms.size();
}
@Override
public String toString() {
StringBuilder sbuf = new StringBuilder("PoolVms=");
for (HashMap<String/* vm name */, Pair<String/* host uuid */, State/* vm state */>> clusterVM : _clusterVms.values()) {
for (Map.Entry<String,Pair<String,State>> entry: clusterVM.entrySet()) {
String vmname = entry.getKey();
Pair<String,State> vmstate= entry.getValue();
sbuf.append(vmname).append("-").append(vmstate.second()).append(",");
}
}
return sbuf.toString();
}
}

View File

@ -40,7 +40,6 @@ import com.cloud.hypervisor.xenserver.resource.XenServer620SP1Resource;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineName;
/**
*
@ -205,99 +204,7 @@ public class XenServerResourceNewBase extends XenServer620SP1Resource {
}
protected void recordChanges(Connection conn, VM.Record rec, String hostUuid) {
String vm = rec.nameLabel;
if (!VirtualMachineName.isValidCloudStackVmName(vm, _instance)) {
s_logger.debug("Skipping over VMs that does not conform to CloudStack naming convention: " + vm);
return;
}
VirtualMachine.State currentState = convertToState(rec.powerState);
if (vm.startsWith("migrating")) {
s_logger.warn("Skipping " + vm + " because it is migrating.");
return;
}
if (currentState == VirtualMachine.State.Stopped) {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Double check the power state to make sure we got the correct state for " + vm);
}
currentState = getRealPowerState(conn, vm);
}
boolean updateMap = false;
boolean reportChange = false;
// NOTE: For now we only record change when the VM is stopped. We don't find out any VMs starting for now.
synchronized (_cluster.intern()) {
Pair<String, VirtualMachine.State> oldState = s_vms.get(_cluster, vm);
if (oldState == null) {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Unable to find " + vm + " from previous map. Assuming it was in Stopped state.");
}
oldState = new Pair<String, VirtualMachine.State>(null, VirtualMachine.State.Stopped);
}
if (s_logger.isTraceEnabled()) {
s_logger.trace(vm + ": current state=" + currentState + ", previous state=" + oldState);
}
if (oldState.second() == VirtualMachine.State.Starting) {
if (currentState == VirtualMachine.State.Running) {
updateMap = true;
reportChange = false;
} else if (currentState == VirtualMachine.State.Stopped) {
updateMap = false;
reportChange = false;
}
} else if (oldState.second() == VirtualMachine.State.Migrating) {
updateMap = true;
reportChange = false;
} else if (oldState.second() == VirtualMachine.State.Stopping) {
if (currentState == VirtualMachine.State.Stopped) {
updateMap = true;
reportChange = false;
} else if (currentState == VirtualMachine.State.Running) {
updateMap = false;
reportChange = false;
}
} else if (oldState.second() != currentState) {
updateMap = true;
reportChange = true;
} else if (hostUuid != null && !hostUuid.equals(oldState.first())) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Detecting " + vm + " moved from " + oldState.first() + " to " + hostUuid);
}
reportChange = true;
updateMap = true;
}
if (updateMap) {
s_vms.put(_cluster, hostUuid, vm, currentState);
if (s_logger.isTraceEnabled()) {
s_logger.trace("Updated " + vm + " to [" + hostUuid + ", " + currentState);
}
}
if (reportChange) {
Pair<String, VirtualMachine.State> change = _changes.get(vm);
if (hostUuid == null) {
// This is really strange code. It looks like the sync
// code wants this to be set, which is extremely weird
// for VMs that are dead. Why would I want to set the
// hostUuid if the VM is stopped.
hostUuid = oldState.first();
if (hostUuid == null) {
hostUuid = _host.uuid;
}
}
if (change == null) {
change = new Pair<String, VirtualMachine.State>(hostUuid, currentState);
} else {
change.first(hostUuid);
change.second(currentState);
}
_changes.put(vm, change);
}
}
}
@Override

View File

@ -30,7 +30,7 @@ import com.cloud.host.Host;
import com.cloud.host.Status;
import com.cloud.utils.component.AdapterBase;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.State;
import com.cloud.vm.VirtualMachine.PowerState;
@Local(value = Investigator.class)
public class CheckOnAgentInvestigator extends AdapterBase implements Investigator {
@ -57,7 +57,7 @@ public class CheckOnAgentInvestigator extends AdapterBase implements Investigato
}
s_logger.debug("Agent responded with state " + answer.getState().toString());
return answer.getState() == State.Running;
return answer.getState() == PowerState.PowerOn;
} catch (AgentUnavailableException e) {
s_logger.debug("Unable to reach the agent for " + vm.toString() + ": " + e.getMessage());
return null;

View File

@ -23,11 +23,9 @@ import javax.naming.ConfigurationException;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.Command;
import com.cloud.agent.api.HostVmStateReportEntry;
import com.cloud.agent.api.PingCommand;
import com.cloud.agent.api.StartupCommand;
import com.cloud.agent.api.StartupRoutingCommand;
import com.cloud.agent.api.StartupRoutingCommand.VmState;
import com.cloud.host.Host.Type;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.resource.ServerResource;
@ -49,8 +47,7 @@ public class KvmDummyResourceBase extends ServerResourceBase implements ServerRe
@Override
public StartupCommand[] initialize() {
StartupRoutingCommand cmd =
new StartupRoutingCommand(0, 0, 0, 0, null, Hypervisor.HypervisorType.KVM, new HashMap<String, String>(), new HashMap<String, VmState>(),
new HashMap<String, HostVmStateReportEntry>());
new StartupRoutingCommand(0, 0, 0, 0, null, Hypervisor.HypervisorType.KVM, new HashMap<String, String>());
cmd.setDataCenter(_zoneId);
cmd.setPod(_podId);
cmd.setCluster(_clusterId);

View File

@ -31,7 +31,6 @@ import com.cloud.agent.api.StartupRoutingCommand;
import com.cloud.host.Host.Type;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.utils.net.MacAddress;
import com.cloud.vm.VirtualMachine;
public class DummyHostServerResource extends ServerResourceBase {
@ -57,8 +56,7 @@ public class DummyHostServerResource extends ServerResourceBase {
@Override
public PingCommand getCurrentStatus(long id) {
HashMap<String, VirtualMachine.State> newStates = new HashMap<String, VirtualMachine.State>();
return new PingRoutingCommand(com.cloud.host.Host.Type.Routing, id, newStates, new HashMap<String, HostVmStateReportEntry>());
return new PingRoutingCommand(com.cloud.host.Host.Type.Routing, id, new HashMap<String, HostVmStateReportEntry>());
}
@Override

View File

@ -220,10 +220,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this);
// TODO
static final ConfigKey<Boolean> VmJobEnabled = new ConfigKey<Boolean>("Advanced",
Boolean.class, "vm.job.enabled", "true",
"True to enable new VM sync model. false to use the old way", false);
static final ConfigKey<Long> VmJobCheckInterval = new ConfigKey<Long>("Advanced", Long.class, "vm.job.check.interval", "3000",
"Interval in milliseconds to check if the job is complete", false);
@ -898,22 +894,18 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
// serialize VM operation
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if ( jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
placeHolder = createPlaceHolderWork(userVm.getId());
}
placeHolder = createPlaceHolderWork(userVm.getId());
try {
return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops,
newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
} finally {
if (VmJobEnabled.value()) {
_workJobDao.expunge(placeHolder.getId());
}
_workJobDao.expunge(placeHolder.getId());
}
} else {
Outcome<Volume> outcome = resizeVolumeThroughJobQueue(userVm.getId(), volume.getId(), currentSize, newSize, newMinIops, newMaxIops,
@ -1328,18 +1320,15 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
}
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
placeHolder = createPlaceHolderWork(vmId);
}
placeHolder = createPlaceHolderWork(vmId);
try {
return orchestrateAttachVolumeToVM(vmId, volumeId, deviceId);
} finally {
if (VmJobEnabled.value())
_workJobDao.expunge(placeHolder.getId());
_workJobDao.expunge(placeHolder.getId());
}
} else {
@ -1536,17 +1525,14 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
}
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
placeHolder = createPlaceHolderWork(vmId);
}
placeHolder = createPlaceHolderWork(vmId);
try {
return orchestrateDetachVolumeFromVM(vmId, volumeId);
} finally {
if (VmJobEnabled.value())
_workJobDao.expunge(placeHolder.getId());
_workJobDao.expunge(placeHolder.getId());
}
} else {
Outcome<Volume> outcome = detachVolumeFromVmThroughJobQueue(vmId, volumeId);
@ -1733,18 +1719,15 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (vm != null) {
// serialize VM operation
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
placeHolder = createPlaceHolderWork(vm.getId());
}
placeHolder = createPlaceHolderWork(vm.getId());
try {
return orchestrateMigrateVolume(vol.getId(), destPool.getId(), liveMigrateVolume);
} finally {
if ((VmJobEnabled.value())&&(placeHolder != null))
_workJobDao.expunge(placeHolder.getId());
_workJobDao.expunge(placeHolder.getId());
}
} else {
@ -1835,18 +1818,15 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (vm != null) {
// serialize VM operation
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
placeHolder = createPlaceHolderWork(vm.getId());
}
placeHolder = createPlaceHolderWork(vm.getId());
try {
return orchestrateTakeVolumeSnapshot(volumeId, policyId, snapshotId, account, quiescevm);
} finally {
if (VmJobEnabled.value())
_workJobDao.expunge(placeHolder.getId());
_workJobDao.expunge(placeHolder.getId());
}
} else {

View File

@ -134,10 +134,6 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana
int _vmSnapshotMax;
int _wait;
// TODO
static final ConfigKey<Boolean> VmJobEnabled = new ConfigKey<Boolean>("Advanced",
Boolean.class, "vm.job.enabled", "true",
"True to enable new VM sync model. false to use the old way", false);
static final ConfigKey<Long> VmJobCheckInterval = new ConfigKey<Long>("Advanced",
Long.class, "vm.job.check.interval", "3000",
"Interval in milliseconds to check if the job is complete", false);
@ -368,17 +364,14 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana
// serialize VM operation
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
placeHolder = createPlaceHolderWork(vmId);
}
placeHolder = createPlaceHolderWork(vmId);
try {
return orchestrateCreateVMSnapshot(vmId, vmSnapshotId, quiescevm);
return orchestrateCreateVMSnapshot(vmId, vmSnapshotId, quiescevm);
} finally {
if (VmJobEnabled.value())
_workJobDao.expunge(placeHolder.getId());
_workJobDao.expunge(placeHolder.getId());
}
} else {
@ -466,17 +459,14 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana
// serialize VM operation
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
placeHolder = createPlaceHolderWork(vmSnapshot.getVmId());
}
placeHolder = createPlaceHolderWork(vmSnapshot.getVmId());
try {
return orchestrateDeleteVMSnapshot(vmSnapshotId);
return orchestrateDeleteVMSnapshot(vmSnapshotId);
} finally {
if (VmJobEnabled.value())
_workJobDao.expunge(placeHolder.getId());
_workJobDao.expunge(placeHolder.getId());
}
} else {
Outcome<VMSnapshot> outcome = deleteVMSnapshotThroughJobQueue(vmSnapshot.getVmId(), vmSnapshotId);
@ -581,18 +571,15 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana
// serialize VM operation
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
placeHolder = createPlaceHolderWork(vmSnapshotVo.getVmId());
}
placeHolder = createPlaceHolderWork(vmSnapshotVo.getVmId());
try {
return orchestrateRevertToVMSnapshot(vmSnapshotId);
return orchestrateRevertToVMSnapshot(vmSnapshotId);
} finally {
if (VmJobEnabled.value())
_workJobDao.expunge(placeHolder.getId());
_workJobDao.expunge(placeHolder.getId());
}
} else {
@ -718,16 +705,14 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana
public boolean deleteAllVMSnapshots(long vmId, VMSnapshot.Type type) {
// serialize VM operation
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
if (VmJobEnabled.value()) {
placeHolder = createPlaceHolderWork(vmId);
}
placeHolder = createPlaceHolderWork(vmId);
try {
return orchestrateDeleteAllVMSnapshots(vmId, type);
return orchestrateDeleteAllVMSnapshots(vmId, type);
} finally {
if ( (VmJobEnabled.value()) && (placeHolder != null))
if (placeHolder != null)
_workJobDao.expunge(placeHolder.getId());
}