proxy agent

This commit is contained in:
Alex Huang 2011-05-17 07:12:58 -07:00
parent a19e5284d0
commit ea9703997b
16 changed files with 194 additions and 810 deletions

View File

@ -30,7 +30,6 @@ public enum Status {
Updating(true, true, false),
PrepareForMaintenance(false, false, false),
ErrorInMaintenance(false, false, false),
CancelMaintenance(false, false, false),
Maintenance(false, false, false),
Alert(true, true, true),
Removed(true, false, true);

View File

@ -80,9 +80,9 @@ public interface ResourceService {
* @param true if deleted, false otherwise
*/
boolean deleteHost(long hostId, boolean isForced);
boolean updateHostPassword(UpdateHostPasswordCmd upasscmd);
Host getHost(long hostId);
Cluster getCluster(Long clusterId);

View File

@ -32,8 +32,10 @@ public interface Volume extends ControlledEntity, BasedOn {
};
enum State implements FiniteState<State, Event> {
Allocated("The volume is allocated but has not been created yet."), Creating("The volume is being created. getPoolId() should reflect the pool where it is being created."), Ready(
"The volume is ready to be used."), Destroy("The volume is destroyed, and can't be recovered.");
Allocated("The volume is allocated but has not been created yet."),
Creating("The volume is being created. getPoolId() should reflect the pool where it is being created."),
Ready("The volume is ready to be used."),
Destroy("The volume is destroyed, and can't be recovered.");
String _description;

View File

@ -279,6 +279,7 @@
-->
<classpath refid="test.classpath"/>
<jvmarg value="${debug.jvmarg}"/>
<jvmarg value="-ea"/>
<batchtest todir="${unittest.dir}">
<formatter type="plain"/>
<fileset dir="${utils.test.dir}">
@ -301,5 +302,36 @@
<fail if="junit.failure" message="Unit test(s) failed. See reports!"/>
</target>
<target name="test-suspend" description="Execute one unit test" depends="compile-tests">
<junit fork="true" printsummary="true" showoutput="true" failureproperty="junit.failure">
<!-- N.b. use failureproperty instead of haltonfailure, because if we use
the former, we will get no detailed report about the failure.
If the test fails, the fail element below will still assure that
the Ant run will exit with error status.
-->
<classpath refid="test.classpath"/>
<jvmarg value="-Xrunjdwp:transport=dt_socket,address=8787,server=y,suspend=y"/>
<jvmarg value="${assertion}"/>
<batchtest todir="${unittest.dir}">
<formatter type="plain"/>
<fileset dir="${utils.test.dir}">
<include name="**/${test}.java"/>
</fileset>
<fileset dir="${server.test.dir}">
<include name="**/${test}.java"/>
</fileset>
</batchtest>
<junitreport todir="${unittest.dir}">
<fileset dir="${unittest.dir}"/>
<report todir="${unittest.dir}/test-reports"/>
</junitreport>
<fail if="junit.failure" message="Unit test(s) failed. See reports!"/>
</junit>
<junitreport todir="${unittest.dir}">
<fileset dir="${unittest.dir}"/>
<report todir="${unittest.dir}/test-reports"/>
</junitreport>
<fail if="junit.failure" message="Unit test(s) failed. See reports!"/>
</target>
</project>

View File

@ -25,17 +25,11 @@ import com.cloud.agent.api.Answer;
import com.cloud.agent.api.Command;
import com.cloud.agent.manager.AgentAttache;
import com.cloud.agent.manager.Commands;
import com.cloud.api.commands.AddClusterCmd;
import com.cloud.api.commands.AddHostCmd;
import com.cloud.api.commands.AddSecondaryStorageCmd;
import com.cloud.api.commands.DeleteClusterCmd;
import com.cloud.api.commands.UpdateHostPasswordCmd;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.HostPodVO;
import com.cloud.dc.PodCluster;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.DiscoveryException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.OperationTimedoutException;
import com.cloud.host.Host;
import com.cloud.host.Host.Type;
@ -44,13 +38,10 @@ import com.cloud.host.HostVO;
import com.cloud.host.Status;
import com.cloud.host.Status.Event;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.offering.ServiceOffering;
import com.cloud.org.Cluster;
import com.cloud.resource.ServerResource;
import com.cloud.service.ServiceOfferingVO;
import com.cloud.template.VirtualMachineTemplate;
import com.cloud.user.User;
import com.cloud.uservm.UserVm;
import com.cloud.utils.Pair;
import com.cloud.utils.component.Manager;
@ -247,15 +238,6 @@ public interface AgentManager extends Manager {
*/
boolean cancelMaintenance(long hostId);
/**
* Check to see if a virtual machine can be upgraded to the given service offering
*
* @param vm
* @param offering
* @return true if the host can handle the upgrade, false otherwise
*/
boolean isVirtualMachineUpgradable(final UserVm vm, final ServiceOffering offering);
public boolean executeUserRequest(long hostId, Event event) throws AgentUnavailableException;
public boolean reconnect(final long hostId) throws AgentUnavailableException;
@ -280,19 +262,4 @@ public interface AgentManager extends Manager {
void updateStatus(HostVO host, Event event);
List<? extends Cluster> discoverCluster(AddClusterCmd cmd) throws IllegalArgumentException, DiscoveryException;
Cluster getCluster(Long clusterId);
Cluster updateCluster(Cluster clusterToUpdate, String clusterType, String hypervisor, String allocationState);
boolean deleteCluster(DeleteClusterCmd cmd);
List<HostVO> discoverHosts(Long dcId, Long podId, Long clusterId, String clusterName, String url, String username, String password, String hypervisorType, List<String> hostTags)
throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException;
List<? extends Host> discoverHosts(com.cloud.api.commands.AddSecondaryStorageCmd cmd) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException;
List<? extends Host> discoverHosts(AddHostCmd cmd) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException;
}

View File

@ -19,9 +19,6 @@ package com.cloud.agent.manager;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLDecoder;
import java.nio.channels.ClosedChannelException;
import java.util.ArrayList;
import java.util.Enumeration;
@ -73,16 +70,11 @@ import com.cloud.agent.api.StartupStorageCommand;
import com.cloud.agent.api.StartupTrafficMonitorCommand;
import com.cloud.agent.api.UnsupportedAnswer;
import com.cloud.agent.api.UpdateHostPasswordCommand;
import com.cloud.agent.manager.allocator.HostAllocator;
import com.cloud.agent.manager.allocator.PodAllocator;
import com.cloud.agent.transport.Request;
import com.cloud.agent.transport.Response;
import com.cloud.alert.AlertManager;
import com.cloud.api.ApiConstants;
import com.cloud.api.commands.AddClusterCmd;
import com.cloud.api.commands.AddHostCmd;
import com.cloud.api.commands.AddSecondaryStorageCmd;
import com.cloud.api.commands.DeleteClusterCmd;
import com.cloud.api.commands.UpdateHostPasswordCmd;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.CapacityVO;
@ -101,14 +93,10 @@ import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.DataCenterIpAddressDao;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.event.dao.EventDao;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.ConnectionException;
import com.cloud.exception.DiscoveredWithErrorException;
import com.cloud.exception.DiscoveryException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.OperationTimedoutException;
import com.cloud.exception.PermissionDeniedException;
import com.cloud.exception.UnsupportedVersionException;
import com.cloud.ha.HighAvailabilityManager;
import com.cloud.ha.HighAvailabilityManager.WorkType;
@ -123,38 +111,25 @@ import com.cloud.host.Status.Event;
import com.cloud.host.dao.HostDao;
import com.cloud.host.dao.HostDetailsDao;
import com.cloud.host.dao.HostTagsDao;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.hypervisor.kvm.resource.KvmDummyResourceBase;
import com.cloud.network.IPAddressVO;
import com.cloud.network.dao.IPAddressDao;
import com.cloud.offering.ServiceOffering;
import com.cloud.org.Cluster;
import com.cloud.org.Grouping;
import com.cloud.resource.Discoverer;
import com.cloud.resource.ServerResource;
import com.cloud.service.ServiceOfferingVO;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.StoragePoolVO;
import com.cloud.storage.VMTemplateHostVO;
import com.cloud.storage.dao.GuestOSCategoryDao;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.VMTemplateHostDao;
import com.cloud.storage.resource.DummySecondaryStorageResource;
import com.cloud.template.VirtualMachineTemplate;
import com.cloud.user.Account;
import com.cloud.user.AccountManager;
import com.cloud.user.User;
import com.cloud.user.UserContext;
import com.cloud.uservm.UserVm;
import com.cloud.utils.ActionDelegate;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.Pair;
import com.cloud.utils.StringUtils;
import com.cloud.utils.UriUtils;
import com.cloud.utils.component.Adapters;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.component.Inject;
@ -213,11 +188,9 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
protected IPAddressDao _publicIPAddressDao = null;
@Inject
protected HostPodDao _podDao = null;
protected Adapters<HostAllocator> _hostAllocators = null;
@Inject(adapter = PodAllocator.class)
protected Adapters<PodAllocator> _podAllocators = null;
@Inject
protected EventDao _eventDao = null;
@Inject
protected VMInstanceDao _vmDao = null;
@Inject
protected CapacityDao _capacityDao = null;
@ -228,8 +201,6 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
@Inject
protected StoragePoolHostDao _storagePoolHostDao = null;
@Inject
protected GuestOSCategoryDao _guestOSCategoryDao = null;
@Inject
protected HostDetailsDao _hostDetailsDao = null;
@Inject
protected ClusterDao _clusterDao = null;
@ -238,7 +209,6 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
@Inject
protected HostTagsDao _hostTagsDao = null;
protected Adapters<Discoverer> _discoverers = null;
protected int _port;
@Inject
@ -246,9 +216,6 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
@Inject
protected AlertManager _alertMgr = null;
@Inject
protected StorageManager _storageMgr = null;
@Inject
protected AccountManager _accountMgr = null;
@ -273,9 +240,6 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
protected ExecutorService _executor;
@Inject
protected VMTemplateHostDao _vmTemplateHostDao;
@Override
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
_name = name;
@ -315,18 +279,6 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
_instance = "DEFAULT";
}
_hostAllocators = locator.getAdapters(HostAllocator.class);
if (_hostAllocators == null || !_hostAllocators.isSet()) {
throw new ConfigurationException("Unable to find an host allocator.");
}
_podAllocators = locator.getAdapters(PodAllocator.class);
if (_podAllocators == null || !_podAllocators.isSet()) {
throw new ConfigurationException("Unable to find an pod allocator.");
}
_discoverers = locator.getAdapters(Discoverer.class);
_nodeId = ManagementServerNode.getManagementServerId();
_hostDao.markHostsAsDisconnected(_nodeId);
@ -518,498 +470,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
return attache;
}
@Override
public List<? extends Cluster> discoverCluster(AddClusterCmd cmd) throws IllegalArgumentException, DiscoveryException {
Long dcId = cmd.getZoneId();
Long podId = cmd.getPodId();
String clusterName = cmd.getClusterName();
String url = cmd.getUrl();
String username = cmd.getUsername();
String password = cmd.getPassword();
if(url != null) {
url = URLDecoder.decode(url);
}
URI uri = null;
// Check if the zone exists in the system
DataCenterVO zone = _dcDao.findById(dcId);
if (zone == null) {
throw new InvalidParameterValueException("Can't find zone by id " + dcId);
}
Account account = UserContext.current().getCaller();
if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getType())) {
throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + dcId);
}
// Check if the pod exists in the system
if (podId != null) {
if (_podDao.findById(podId) == null) {
throw new InvalidParameterValueException("Can't find pod by id " + podId);
}
// check if pod belongs to the zone
HostPodVO pod = _podDao.findById(podId);
if (!Long.valueOf(pod.getDataCenterId()).equals(dcId)) {
throw new InvalidParameterValueException("Pod " + podId + " doesn't belong to the zone " + dcId);
}
}
// Verify cluster information and create a new cluster if needed
if (clusterName == null || clusterName.isEmpty()) {
throw new InvalidParameterValueException("Please specify cluster name");
}
if (cmd.getHypervisor() == null || cmd.getHypervisor().isEmpty()) {
throw new InvalidParameterValueException("Please specify a hypervisor");
}
Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.getType(cmd.getHypervisor());
if (hypervisorType == null) {
s_logger.error("Unable to resolve " + cmd.getHypervisor() + " to a valid supported hypervisor type");
throw new InvalidParameterValueException("Unable to resolve " + cmd.getHypervisor() + " to a supported ");
}
Cluster.ClusterType clusterType = null;
if (cmd.getClusterType() != null && !cmd.getClusterType().isEmpty()) {
clusterType = Cluster.ClusterType.valueOf(cmd.getClusterType());
}
if (clusterType == null) {
clusterType = Cluster.ClusterType.CloudManaged;
}
Grouping.AllocationState allocationState = null;
if (cmd.getAllocationState() != null && !cmd.getAllocationState().isEmpty()) {
try {
allocationState = Grouping.AllocationState.valueOf(cmd.getAllocationState());
} catch (IllegalArgumentException ex) {
throw new InvalidParameterValueException("Unable to resolve Allocation State '" + cmd.getAllocationState() + "' to a supported state");
}
}
if (allocationState == null) {
allocationState = Grouping.AllocationState.Enabled;
}
Discoverer discoverer = getMatchingDiscover(hypervisorType);
if (discoverer == null) {
throw new InvalidParameterValueException("Could not find corresponding resource manager for " + cmd.getHypervisor());
}
List<ClusterVO> result = new ArrayList<ClusterVO>();
long clusterId = 0;
ClusterVO cluster = new ClusterVO(dcId, podId, clusterName);
cluster.setHypervisorType(cmd.getHypervisor());
cluster.setClusterType(clusterType);
cluster.setAllocationState(allocationState);
try {
cluster = _clusterDao.persist(cluster);
} catch (Exception e) {
// no longer tolerate exception during the cluster creation phase
throw new CloudRuntimeException("Unable to create cluster " + clusterName + " in pod " + podId + " and data center " + dcId, e);
}
clusterId = cluster.getId();
result.add(cluster);
if (clusterType == Cluster.ClusterType.CloudManaged) {
return result;
}
// save cluster details for later cluster/host cross-checking
Map<String, String> details = new HashMap<String, String>();
details.put("url", url);
details.put("username", username);
details.put("password", password);
_clusterDetailsDao.persist(cluster.getId(), details);
boolean success = false;
try {
try {
uri = new URI(UriUtils.encodeURIComponent(url));
if (uri.getScheme() == null) {
throw new InvalidParameterValueException("uri.scheme is null " + url + ", add http:// as a prefix");
} else if (uri.getScheme().equalsIgnoreCase("http")) {
if (uri.getHost() == null || uri.getHost().equalsIgnoreCase("") || uri.getPath() == null || uri.getPath().equalsIgnoreCase("")) {
throw new InvalidParameterValueException("Your host and/or path is wrong. Make sure it's of the format http://hostname/path");
}
}
} catch (URISyntaxException e) {
throw new InvalidParameterValueException(url + " is not a valid uri");
}
List<HostVO> hosts = new ArrayList<HostVO>();
Map<? extends ServerResource, Map<String, String>> resources = null;
try {
resources = discoverer.find(dcId, podId, clusterId, uri, username, password);
} catch (Exception e) {
s_logger.info("Exception in external cluster discovery process with discoverer: " + discoverer.getName());
}
if (resources != null) {
for (Map.Entry<? extends ServerResource, Map<String, String>> entry : resources.entrySet()) {
ServerResource resource = entry.getKey();
// For Hyper-V, we are here means agent have already started and connected to management server
if (hypervisorType == Hypervisor.HypervisorType.Hyperv) {
break;
}
AgentAttache attache = simulateStart(null, resource, entry.getValue(), true, null, null);
if (attache != null) {
hosts.add(_hostDao.findById(attache.getId()));
}
discoverer.postDiscovery(hosts, _nodeId);
}
s_logger.info("External cluster has been successfully discovered by " + discoverer.getName());
success = true;
return result;
}
s_logger.warn("Unable to find the server resources at " + url);
throw new DiscoveryException("Unable to add the external cluster");
} catch (Throwable e) {
s_logger.error("Unexpected exception ", e);
throw new DiscoveryException("Unable to add the external cluster due to unhandled exception");
} finally {
if (!success) {
_clusterDetailsDao.deleteDetails(clusterId);
_clusterDao.remove(clusterId);
}
}
}
private Discoverer getMatchingDiscover(Hypervisor.HypervisorType hypervisorType) {
Enumeration<Discoverer> en = _discoverers.enumeration();
while (en.hasMoreElements()) {
Discoverer discoverer = en.nextElement();
if (discoverer.getHypervisorType() == hypervisorType) {
return discoverer;
}
}
return null;
}
@Override
public List<? extends Host> discoverHosts(AddHostCmd cmd) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException {
Long dcId = cmd.getZoneId();
Long podId = cmd.getPodId();
Long clusterId = cmd.getClusterId();
String clusterName = cmd.getClusterName();
String url = cmd.getUrl();
String username = cmd.getUsername();
String password = cmd.getPassword();
Long memCapacity = cmd.getMemCapacity();
Long cpuSpeed = cmd.getCpuSpeed();
Long cpuNum = cmd.getCpuNum();
String mac = cmd.getMac();
List<String> hostTags = cmd.getHostTags();
Map<String, String> bareMetalParams = new HashMap<String, String>();
dcId = _accountMgr.checkAccessAndSpecifyAuthority(UserContext.current().getCaller(), dcId);
// this is for standalone option
if (clusterName == null && clusterId == null) {
clusterName = "Standalone-" + url;
}
if ( clusterId != null ) {
ClusterVO cluster = _clusterDao.findById(clusterId);
if ( cluster == null ) {
throw new InvalidParameterValueException("can not fine cluster for clusterId " + clusterId);
} else {
if ( cluster.getGuid() == null ) {
List<HostVO> hosts = _hostDao.listByCluster(clusterId);
if ( ! hosts.isEmpty() ) {
throw new CloudRuntimeException("Guid is not updated for cluster " + clusterId + " need to wait hosts in this cluster up");
}
}
}
}
if (cmd.getHypervisor().equalsIgnoreCase(Hypervisor.HypervisorType.BareMetal.toString())) {
if (memCapacity == null) {
memCapacity = Long.valueOf(0);
}
if (cpuSpeed == null) {
cpuSpeed = Long.valueOf(0);
}
if (cpuNum == null) {
cpuNum = Long.valueOf(0);
}
if (mac == null) {
mac = "unknown";
}
bareMetalParams.put("cpuNum", cpuNum.toString());
bareMetalParams.put("cpuCapacity", cpuSpeed.toString());
bareMetalParams.put("memCapacity", memCapacity.toString());
bareMetalParams.put("mac", mac);
if (hostTags != null) {
bareMetalParams.put("hostTag", hostTags.get(0));
}
}
String allocationState = cmd.getAllocationState();
if (allocationState == null) {
allocationState = Host.HostAllocationState.Enabled.toString();
}
return discoverHostsFull(dcId, podId, clusterId, clusterName, url, username, password, cmd.getHypervisor(), hostTags, bareMetalParams, allocationState);
}
@Override
public List<? extends Host> discoverHosts(AddSecondaryStorageCmd cmd) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException {
Long dcId = cmd.getZoneId();
String url = cmd.getUrl();
return discoverHosts(dcId, null, null, null, url, null, null, "SecondaryStorage", null);
}
@Override
public List<HostVO> discoverHosts(Long dcId, Long podId, Long clusterId, String clusterName, String url, String username, String password, String hypervisorType, List<String> hostTags)
throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException {
return discoverHostsFull(dcId, podId, clusterId, clusterName, url, username, password, hypervisorType, hostTags, null, null);
}
private List<HostVO> discoverHostsFull(Long dcId, Long podId, Long clusterId, String clusterName, String url, String username, String password, String hypervisorType, List<String> hostTags,
Map<String, String> params, String allocationState) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException {
URI uri = null;
// Check if the zone exists in the system
DataCenterVO zone = _dcDao.findById(dcId);
if (zone == null) {
throw new InvalidParameterValueException("Can't find zone by id " + dcId);
}
Account account = UserContext.current().getCaller();
if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getType())) {
throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + dcId);
}
// Check if the pod exists in the system
if (podId != null) {
if (_podDao.findById(podId) == null) {
throw new InvalidParameterValueException("Can't find pod by id " + podId);
}
// check if pod belongs to the zone
HostPodVO pod = _podDao.findById(podId);
if (!Long.valueOf(pod.getDataCenterId()).equals(dcId)) {
throw new InvalidParameterValueException("Pod " + podId + " doesn't belong to the zone " + dcId);
}
}
// Verify cluster information and create a new cluster if needed
if (clusterName != null && clusterId != null) {
throw new InvalidParameterValueException("Can't specify cluster by both id and name");
}
if (hypervisorType == null || hypervisorType.isEmpty()) {
throw new InvalidParameterValueException("Need to specify Hypervisor Type");
}
if ((clusterName != null || clusterId != null) && podId == null) {
throw new InvalidParameterValueException("Can't specify cluster without specifying the pod");
}
if (clusterId != null) {
if (_clusterDao.findById(clusterId) == null) {
throw new InvalidParameterValueException("Can't find cluster by id " + clusterId);
}
}
if (clusterName != null) {
ClusterVO cluster = new ClusterVO(dcId, podId, clusterName);
cluster.setHypervisorType(hypervisorType);
try {
cluster = _clusterDao.persist(cluster);
} catch (Exception e) {
cluster = _clusterDao.findBy(clusterName, podId);
if (cluster == null) {
throw new CloudRuntimeException("Unable to create cluster " + clusterName + " in pod " + podId + " and data center " + dcId, e);
}
}
clusterId = cluster.getId();
}
try {
uri = new URI(UriUtils.encodeURIComponent(url));
if (uri.getScheme() == null) {
throw new InvalidParameterValueException("uri.scheme is null " + url + ", add nfs:// as a prefix");
} else if (uri.getScheme().equalsIgnoreCase("nfs")) {
if (uri.getHost() == null || uri.getHost().equalsIgnoreCase("") || uri.getPath() == null || uri.getPath().equalsIgnoreCase("")) {
throw new InvalidParameterValueException("Your host and/or path is wrong. Make sure it's of the format nfs://hostname/path");
}
}
} catch (URISyntaxException e) {
throw new InvalidParameterValueException(url + " is not a valid uri");
}
List<HostVO> hosts = new ArrayList<HostVO>();
s_logger.info("Trying to add a new host at " + url + " in data center " + dcId);
Enumeration<Discoverer> en = _discoverers.enumeration();
boolean isHypervisorTypeSupported = false;
while (en.hasMoreElements()) {
Discoverer discoverer = en.nextElement();
if (params != null) {
discoverer.putParam(params);
}
if (!discoverer.matchHypervisor(hypervisorType)) {
continue;
}
isHypervisorTypeSupported = true;
Map<? extends ServerResource, Map<String, String>> resources = null;
try {
resources = discoverer.find(dcId, podId, clusterId, uri, username, password);
} catch (DiscoveredWithErrorException e) {
throw e;
} catch (Exception e) {
s_logger.info("Exception in host discovery process with discoverer: " + discoverer.getName() + ", skip to another discoverer if there is any");
}
if (resources != null) {
for (Map.Entry<? extends ServerResource, Map<String, String>> entry : resources.entrySet()) {
ServerResource resource = entry.getKey();
/*
* For KVM, if we go to here, that means kvm agent is already connected to mgt svr.
*/
if (resource instanceof KvmDummyResourceBase) {
Map<String, String> details = entry.getValue();
String guid = details.get("guid");
List<HostVO> kvmHosts = _hostDao.listBy(Host.Type.Routing, clusterId, podId, dcId);
for (HostVO host : kvmHosts) {
if (host.getGuid().equalsIgnoreCase(guid)) {
hosts.add(host);
return hosts;
}
}
return null;
}
AgentAttache attache = simulateStart(null, resource, entry.getValue(), true, hostTags, allocationState);
if (attache != null) {
hosts.add(_hostDao.findById(attache.getId()));
}
discoverer.postDiscovery(hosts, _nodeId);
}
s_logger.info("server resources successfully discovered by " + discoverer.getName());
return hosts;
}
}
if (!isHypervisorTypeSupported) {
String msg = "Do not support HypervisorType " + hypervisorType + " for " + url;
s_logger.warn(msg);
throw new DiscoveryException(msg);
}
s_logger.warn("Unable to find the server resources at " + url);
throw new DiscoveryException("Unable to add the host");
}
@Override
@DB
public boolean deleteCluster(DeleteClusterCmd cmd) {
Transaction txn = Transaction.currentTxn();
try {
txn.start();
ClusterVO cluster = _clusterDao.lockRow(cmd.getId(), true);
if (cluster == null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Cluster: " + cmd.getId() + " does not even exist. Delete call is ignored.");
}
txn.rollback();
return true;
}
List<HostVO> hosts = _hostDao.listByCluster(cmd.getId());
if (hosts.size() > 0) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Cluster: " + cmd.getId() + " still has hosts");
}
txn.rollback();
return false;
}
_clusterDao.remove(cmd.getId());
txn.commit();
return true;
} catch (Throwable t) {
s_logger.error("Unable to delete cluster: " + cmd.getId(), t);
txn.rollback();
return false;
}
}
@Override
@DB
public Cluster updateCluster(Cluster clusterToUpdate, String clusterType, String hypervisor, String allocationState) {
ClusterVO cluster = (ClusterVO) clusterToUpdate;
// Verify cluster information and update the cluster if needed
boolean doUpdate = false;
if (hypervisor != null && !hypervisor.isEmpty()) {
Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.getType(hypervisor);
if (hypervisorType == null) {
s_logger.error("Unable to resolve " + hypervisor + " to a valid supported hypervisor type");
throw new InvalidParameterValueException("Unable to resolve " + hypervisor + " to a supported type");
} else {
cluster.setHypervisorType(hypervisor);
doUpdate = true;
}
}
Cluster.ClusterType newClusterType = null;
if (clusterType != null && !clusterType.isEmpty()) {
try {
newClusterType = Cluster.ClusterType.valueOf(clusterType);
} catch (IllegalArgumentException ex) {
throw new InvalidParameterValueException("Unable to resolve " + clusterType + " to a supported type");
}
if (newClusterType == null) {
s_logger.error("Unable to resolve " + clusterType + " to a valid supported cluster type");
throw new InvalidParameterValueException("Unable to resolve " + clusterType + " to a supported type");
} else {
cluster.setClusterType(newClusterType);
doUpdate = true;
}
}
Grouping.AllocationState newAllocationState = null;
if (allocationState != null && !allocationState.isEmpty()) {
try {
newAllocationState = Grouping.AllocationState.valueOf(allocationState);
} catch (IllegalArgumentException ex) {
throw new InvalidParameterValueException("Unable to resolve Allocation State '" + allocationState + "' to a supported state");
}
if (newAllocationState == null) {
s_logger.error("Unable to resolve " + allocationState + " to a valid supported allocation State");
throw new InvalidParameterValueException("Unable to resolve " + allocationState + " to a supported state");
} else {
cluster.setAllocationState(newAllocationState);
doUpdate = true;
}
}
if (doUpdate) {
Transaction txn = Transaction.currentTxn();
try {
txn.start();
_clusterDao.update(cluster.getId(), cluster);
txn.commit();
} catch (Exception e) {
s_logger.error("Unable to update cluster due to " + e.getMessage(), e);
throw new CloudRuntimeException("Failed to update cluster. Please contact Cloud Support.");
}
}
return cluster;
}
@Override
public Cluster getCluster(Long clusterId) {
return _clusterDao.findById(clusterId);
}
@Override
public Answer sendToSecStorage(HostVO ssHost, Command cmd) {
if( ssHost.getType() == Host.Type.LocalSecondaryStorage ) {
@ -1022,8 +483,8 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
return new Answer(cmd, false, msg);
}
}
@Override
public HostVO getSSAgent(HostVO ssHost) {
if( ssHost.getType() == Host.Type.LocalSecondaryStorage ) {
@ -1038,9 +499,9 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
Random rn = new Random(System.currentTimeMillis());
return ssAHosts.get(rn.nextInt(size));
}
return null;
return null;
}
@Override
public long sendToSecStorage(HostVO ssHost, Command cmd, Listener listener) {
if( ssHost.getType() == Host.Type.LocalSecondaryStorage ) {
@ -1068,11 +529,11 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
return -1;
}
}
private Answer sendToSSVM(final long dcId, final Command cmd) {
List<HostVO> ssAHosts = _hostDao.listByTypeDataCenter(Host.Type.SecondaryStorageVM, dcId);
if (ssAHosts == null || ssAHosts.isEmpty() ) {
return new Answer(cmd, false, "can not find secondary storage VM agent for data center " + dcId);
return new Answer(cmd, false, "can not find secondary storage VM agent for data center " + dcId);
}
int size = ssAHosts.size();
Random rn = new Random(System.currentTimeMillis());
@ -1117,10 +578,6 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
return true;
}
if (host.getType() == Type.SecondaryStorage) {
return deleteSecondaryStorageHost(host);
}
AgentAttache attache = findAttache(hostId);
// Get storage pool host mappings here because they can be removed as a part of handleDisconnect later
List<StoragePoolHostVO> pools = _storagePoolHostDao.listByHostId(hostId);
@ -1280,76 +737,6 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
return true;
}
@DB
protected boolean deleteSecondaryStorageHost(HostVO secStorageHost) {
long zoneId = secStorageHost.getDataCenterId();
long hostId = secStorageHost.getId();
Transaction txn = Transaction.currentTxn();
try {
List<VMInstanceVO> allVmsInZone = _vmDao.listByZoneId(zoneId);
if (!allVmsInZone.isEmpty()) {
s_logger.warn("Cannot delete secondary storage host when there are " + allVmsInZone.size() + " vms in zone " + zoneId);
return false;
}
txn.start();
if (!_hostDao.updateStatus(secStorageHost, Event.MaintenanceRequested, _nodeId)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Unable to take host " + hostId + " into maintenance mode. Delete call is ignored");
}
return false;
}
if (!_hostDao.updateStatus(secStorageHost, Event.PreparationComplete, _nodeId)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Unable to take host " + hostId + " into maintenance mode. Delete call is ignored");
}
return false;
}
AgentAttache attache = findAttache(hostId);
if (attache != null) {
handleDisconnect(attache, Status.Event.Remove, false);
}
// now delete the host
secStorageHost.setGuid(null);
_hostDao.update(secStorageHost.getId(), secStorageHost);
_hostDao.remove(secStorageHost.getId());
// delete the templates associated with this host
SearchCriteria<VMTemplateHostVO> templateHostSC = _vmTemplateHostDao.createSearchCriteria();
templateHostSC.addAnd("hostId", SearchCriteria.Op.EQ, secStorageHost.getId());
_vmTemplateHostDao.remove(templateHostSC);
// delete the op_host_capacity entry
SearchCriteria<CapacityVO> secStorageCapacitySC = _capacityDao.createSearchCriteria();
secStorageCapacitySC.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, secStorageHost.getId());
secStorageCapacitySC.addAnd("capacityType", SearchCriteria.Op.EQ, Capacity.CAPACITY_TYPE_SECONDARY_STORAGE);
_capacityDao.remove(secStorageCapacitySC);
/* Disconnected agent needs special handling here */
secStorageHost.setGuid(null);
txn.commit();
return true;
} catch (Throwable t) {
s_logger.error("Unable to delete sec storage host: " + secStorageHost.getId(), t);
return false;
}
}
@Override
public boolean isVirtualMachineUpgradable(final UserVm vm, final ServiceOffering offering) {
Enumeration<HostAllocator> en = _hostAllocators.enumeration();
boolean isMachineUpgradable = true;
while (isMachineUpgradable && en.hasMoreElements()) {
final HostAllocator allocator = en.nextElement();
isMachineUpgradable = allocator.isVirtualMachineUpgradable(vm, offering);
}
return isMachineUpgradable;
}
protected int getPingInterval() {
return _pingInterval;
}
@ -1467,9 +854,9 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
return seq;
}
@Override
public long gatherStats(final Long hostId, final Command cmd, final Listener listener) {
try {
@ -2324,7 +1711,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
resource = null;
}
}else if (ssCmd.getResourceType() == Storage.StorageResourceType.LOCAL_SECONDARY_STORAGE) {
type = Host.Type.LocalSecondaryStorage;
type = Host.Type.LocalSecondaryStorage;
} else {
type = Host.Type.Storage;
@ -2850,7 +2237,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
answer = new StartupAnswer(startup, attache.getId(), getPingInterval());
} else if (cmd instanceof StartupSecondaryStorageCommand) {
final StartupSecondaryStorageCommand startup = (StartupSecondaryStorageCommand) cmd;
answer = new StartupAnswer(startup, attache.getId(), getPingInterval());
answer = new StartupAnswer(startup, attache.getId(), getPingInterval());
} else if (cmd instanceof StartupStorageCommand) {
final StartupStorageCommand startup = (StartupStorageCommand) cmd;
answer = new StartupAnswer(startup, attache.getId(), getPingInterval());

View File

@ -0,0 +1,48 @@
/**
* Copyright (C) 2010 Cloud.com, Inc. All rights reserved.
*
* This software is licensed under the GNU General Public License v3 or later.
*
* It is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.cloud.resource;
import com.cloud.agent.api.StartupCommand;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
/**
* Listener registered with the ResourceManager if you want to be informed
* of a certain type of host's life cycles.
*
*/
public interface ResourceLifeCycleListener {
/**
* @return the type of resource this listener can process.
*/
Host.Type getType();
void add(HostVO host, StartupCommand cmd, boolean created);
/**
* Put the resource into maintenance mode.
*/
void maintain(HostVO host, boolean force);
void removed(HostVO host, boolean force);
void enable(HostVO host);
void disable(HostVO host);
}

View File

@ -17,6 +17,19 @@
*/
package com.cloud.resource;
public interface ResourceManager {
import com.cloud.host.Host;
/**
* ResourceManager manages how physical resources are organized within the
* CloudStack. It also manages the life cycle of the physical resources.
*/
public interface ResourceManager {
/**
* Register a listener for different types of resource life cycle events.
* There can only be one type of listener per type of host.
*
* @param type the resource type the listener is responsible to.
* @param listener the listener to notify.
*/
void registerForLifeCycleEvents(Host.Type type, ResourceLifeCycleListener listener);
}

View File

@ -68,6 +68,7 @@ import com.cloud.org.Grouping;
import com.cloud.storage.GuestOSCategoryVO;
import com.cloud.storage.StorageManager;
import com.cloud.storage.dao.GuestOSCategoryDao;
import com.cloud.storage.secondary.SecondaryStorageVmManager;
import com.cloud.user.Account;
import com.cloud.user.AccountManager;
import com.cloud.user.User;
@ -92,6 +93,8 @@ public class ResourceManagerImpl implements ResourceManager, ResourceService, Ma
AgentManager _agentMgr;
@Inject
StorageManager _storageMgr;
@Inject
protected SecondaryStorageVmManager _secondaryStorageMgr;
@Inject
protected DataCenterDao _dcDao;
@ -113,6 +116,13 @@ public class ResourceManagerImpl implements ResourceManager, ResourceService, Ma
protected long _nodeId = ManagementServerNode.getManagementServerId();
protected HashMap<Host.Type, ResourceLifeCycleListener> _listeners = new HashMap<Host.Type, ResourceLifeCycleListener>();
@Override
public void registerForLifeCycleEvents(Host.Type type, ResourceLifeCycleListener listener) {
_listeners.put(type, listener);
}
@Override
public boolean updateHostPassword(UpdateHostPasswordCmd cmd) {
return _agentMgr.updateHostPassword(cmd);
@ -519,7 +529,11 @@ public class ResourceManagerImpl implements ResourceManager, ResourceService, Ma
throw new InvalidParameterValueException("Host with id " + hostId + " doesn't exist");
}
_accountMgr.checkAccessAndSpecifyAuthority(UserContext.current().getCaller(), host.getDataCenterId());
return _agentMgr.deleteHost(hostId, isForced, caller);
if (Host.Type.SecondaryStorage.equals(host.getType())) {
return _secondaryStorageMgr.destroySecStorageVm(hostId);
} else {
return _agentMgr.deleteHost(hostId, isForced, caller);
}
}
@Override

View File

@ -1657,7 +1657,7 @@ public class ManagementServerImpl implements ManagementServer {
// Show only those that are downloaded.
boolean onlyReady = (templateFilter == TemplateFilter.featured) || (templateFilter == TemplateFilter.selfexecutable) || (templateFilter == TemplateFilter.sharedexecutable)
|| (templateFilter == TemplateFilter.executable && isAccountSpecific) || (templateFilter == TemplateFilter.community);
|| (templateFilter == TemplateFilter.executable && isAccountSpecific) || (templateFilter == TemplateFilter.community);
Account account = null;
DomainVO domain = null;
@ -4225,7 +4225,7 @@ public class ManagementServerImpl implements ManagementServer {
VMTemplateVO template = ApiDBUtils.findTemplateById(volume.getTemplateId());
boolean isExtractable = template != null && template.isExtractable() && template.getTemplateType() != Storage.TemplateType.SYSTEM;
if (!isExtractable && account != null && account.getType() != Account.ACCOUNT_TYPE_ADMIN) { // Global admins are allowed
// to extract
// to extract
throw new PermissionDeniedException("The volume:" + volumeId + " is not allowed to be extracted");
}

View File

@ -41,7 +41,9 @@ import com.cloud.agent.api.StopAnswer;
import com.cloud.agent.api.check.CheckSshAnswer;
import com.cloud.agent.api.check.CheckSshCommand;
import com.cloud.agent.manager.Commands;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.cluster.ClusterManager;
import com.cloud.cluster.ManagementServerNode;
import com.cloud.configuration.Config;
import com.cloud.configuration.ZoneConfig;
import com.cloud.configuration.dao.ConfigurationDao;
@ -88,7 +90,6 @@ import com.cloud.utils.db.GlobalLock;
import com.cloud.utils.events.SubscriptionMgr;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.net.NetUtils;
import com.cloud.utils.net.NfsUtils;
import com.cloud.vm.Nic;
import com.cloud.vm.NicProfile;
import com.cloud.vm.ReservationContext;
@ -104,6 +105,7 @@ import com.cloud.vm.VirtualMachineManager;
import com.cloud.vm.VirtualMachineName;
import com.cloud.vm.VirtualMachineProfile;
import com.cloud.vm.dao.SecondaryStorageVmDao;
import com.cloud.vm.dao.VMInstanceDao;
//
// Possible secondary storage vm state transition cases
@ -128,9 +130,9 @@ public class SecondaryStorageManagerImpl implements SecondaryStorageVmManager, V
private static final Logger s_logger = Logger.getLogger(SecondaryStorageManagerImpl.class);
private static final int DEFAULT_CAPACITY_SCAN_INTERVAL = 30000; // 30
// seconds
// seconds
private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_SYNC = 180; // 3
// minutes
// minutes
private static final int STARTUP_DELAY = 60000; // 60 seconds
@ -175,6 +177,10 @@ public class SecondaryStorageManagerImpl implements SecondaryStorageVmManager, V
private AccountService _accountMgr;
@Inject
private VirtualMachineManager _itMgr;
@Inject
protected VMInstanceDao _vmDao;
@Inject
protected CapacityDao _capacityDao;
private long _capacityScanInterval = DEFAULT_CAPACITY_SCAN_INTERVAL;
@ -185,6 +191,7 @@ public class SecondaryStorageManagerImpl implements SecondaryStorageVmManager, V
private boolean _useLocalStorage;
private boolean _useSSlCopy;
private String _allowedInternalSites;
protected long _nodeId = ManagementServerNode.getManagementServerId();
private SystemVmLoadScanner<Long> _loadScanner;
private Map<Long, ZoneHostInfo> _zoneHostInfoMap; // map <zone id, info about running host in zone>

View File

@ -95,7 +95,6 @@ import com.cloud.event.EventTypes;
import com.cloud.event.UsageEventVO;
import com.cloud.event.dao.EventDao;
import com.cloud.event.dao.UsageEventDao;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.InvalidParameterValueException;
@ -109,8 +108,8 @@ import com.cloud.exception.VirtualMachineMigrationException;
import com.cloud.ha.HighAvailabilityManager;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDetailsDao;
import com.cloud.host.dao.HostDao;
import com.cloud.host.dao.HostDetailsDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.network.IPAddressVO;
import com.cloud.network.Network;
@ -937,7 +936,7 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager
}
// Check that there are enough resources to upgrade the service offering
if (!_agentMgr.isVirtualMachineUpgradable(vmInstance, newServiceOffering)) {
if (!_itMgr.isVirtualMachineUpgradable(vmInstance, newServiceOffering)) {
throw new InvalidParameterValueException("Unable to upgrade virtual machine, not enough resources available for an offering of " + newServiceOffering.getCpu() + " cpu(s) at "
+ newServiceOffering.getSpeed() + " Mhz, and " + newServiceOffering.getRamSize() + " MB of memory");
}
@ -1341,11 +1340,11 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager
if (snapshot == null) {
throw new InvalidParameterValueException("Failed to create private template record, unable to find snapshot " + snapshotId);
}
if (snapshot.getStatus() != Snapshot.Status.BackedUp) {
throw new InvalidParameterValueException("Snapshot id=" + snapshotId + " is not in " + Snapshot.Status.BackedUp + " state yet and can't be used for template creation");
}
domainId = snapshot.getDomainId();
accountId = snapshot.getAccountId();
hyperType = snapshot.getHypervisorType();
@ -1385,11 +1384,11 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager
Long nextTemplateId = _templateDao.getNextInSequence(Long.class, "id");
String description = cmd.getDisplayText();
boolean isExtractable = false;
Long sourceTemplateId = null;
Long sourceTemplateId = null;
if (volume != null) {
VMTemplateVO template = ApiDBUtils.findTemplateById(volume.getTemplateId());
isExtractable = template != null && template.isExtractable() && template.getTemplateType() != Storage.TemplateType.SYSTEM;
sourceTemplateId = template.getId();
sourceTemplateId = template.getId();
}
privateTemplate = new VMTemplateVO(nextTemplateId, uniqueName, name, ImageFormat.RAW, isPublic, featured, isExtractable, TemplateType.USER, null, null, requiresHvmValue, bitsValue, accountId,
null, description, passwordEnabledValue, guestOS.getId(), true, hyperType);
@ -1584,7 +1583,7 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager
return privateTemplate;
}
@Override
public String getChecksum(Long hostId, String templatePath){
HostVO ssHost = _hostDao.findById(hostId);
@ -1600,7 +1599,7 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager
}
return null;
}
// used for vm transitioning to error state
private void updateVmStateForFailedVmCreation(Long vmId) {
UserVmVO vm = _vmDao.findById(vmId);
@ -2259,12 +2258,12 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager
if (isIso && !template.isBootable()) {
throw new InvalidParameterValueException("Installing from ISO requires an ISO that is bootable: " + template.getId());
}
// Check templates permissions
if (!template.isPublicTemplate()) {
Account templateOwner = _accountMgr.getAccount(template.getAccountId());
_accountMgr.checkAccess(owner, templateOwner);
}
}
// If the template represents an ISO, a disk offering must be passed in, and will be used to create the root disk
// Else, a disk offering is optional, and if present will be used to create the data disk
@ -2619,7 +2618,7 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager
if (ipChanged) {
DataCenterVO dc = _dcDao.findById(vm.getDataCenterId());
UserVmVO userVm = profile.getVirtualMachine();
if (dc.getDhcpProvider().equalsIgnoreCase(Provider.ExternalDhcpServer.getName())){
if (dc.getDhcpProvider().equalsIgnoreCase(Provider.ExternalDhcpServer.getName())){
_nicDao.update(guestNic.getId(), guestNic);
userVm.setPrivateIpAddress(guestNic.getIp4Address());
_vmDao.update(userVm.getId(), userVm);

View File

@ -32,11 +32,13 @@ import com.cloud.exception.ResourceUnavailableException;
import com.cloud.exception.VirtualMachineMigrationException;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.network.NetworkVO;
import com.cloud.offering.ServiceOffering;
import com.cloud.service.ServiceOfferingVO;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.VMTemplateVO;
import com.cloud.user.Account;
import com.cloud.user.User;
import com.cloud.uservm.UserVm;
import com.cloud.utils.Pair;
import com.cloud.utils.component.Manager;
@ -76,7 +78,7 @@ public interface VirtualMachineManager extends Manager {
<T extends VMInstanceVO> T start(T vm, Map<VirtualMachineProfile.Param, Object> params, User caller, Account account) throws InsufficientCapacityException, ResourceUnavailableException;
<T extends VMInstanceVO> T start(T vm, Map<VirtualMachineProfile.Param, Object> params, User caller, Account account, DeploymentPlan planToDeploy) throws InsufficientCapacityException, ResourceUnavailableException;
<T extends VMInstanceVO> T start(T vm, Map<VirtualMachineProfile.Param, Object> params, User caller, Account account, DeploymentPlan planToDeploy) throws InsufficientCapacityException, ResourceUnavailableException;
<T extends VMInstanceVO> boolean stop(T vm, User caller, Account account) throws ResourceUnavailableException;
@ -100,11 +102,21 @@ public interface VirtualMachineManager extends Manager {
boolean migrateAway(VirtualMachine.Type type, long vmid, long hostId) throws InsufficientServerCapacityException, VirtualMachineMigrationException;
<T extends VMInstanceVO> T migrate(T vm, long srcHostId, DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException;
<T extends VMInstanceVO> T migrate(T vm, long srcHostId, DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException;
<T extends VMInstanceVO> T reboot(T vm, Map<VirtualMachineProfile.Param, Object> params, User caller, Account account) throws InsufficientCapacityException, ResourceUnavailableException;
<T extends VMInstanceVO> T advanceReboot(T vm, Map<VirtualMachineProfile.Param, Object> params, User caller, Account account) throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, OperationTimedoutException;
VMInstanceVO findById(VirtualMachine.Type type, long vmId);
/**
* Check to see if a virtual machine can be upgraded to the given service offering
*
* @param vm
* @param offering
* @return true if the host can handle the upgrade, false otherwise
*/
boolean isVirtualMachineUpgradable(final UserVm vm, final ServiceOffering offering);
}

View File

@ -20,6 +20,7 @@ package com.cloud.vm;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Date;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -57,6 +58,7 @@ import com.cloud.agent.api.StopAnswer;
import com.cloud.agent.api.StopCommand;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.agent.manager.Commands;
import com.cloud.agent.manager.allocator.HostAllocator;
import com.cloud.alert.AlertManager;
import com.cloud.capacity.CapacityManager;
import com.cloud.cluster.ClusterManager;
@ -99,6 +101,7 @@ import com.cloud.hypervisor.HypervisorGuruManager;
import com.cloud.network.Network;
import com.cloud.network.NetworkManager;
import com.cloud.network.NetworkVO;
import com.cloud.offering.ServiceOffering;
import com.cloud.org.Cluster;
import com.cloud.service.ServiceOfferingVO;
import com.cloud.service.dao.ServiceOfferingDao;
@ -120,6 +123,7 @@ import com.cloud.user.AccountManager;
import com.cloud.user.User;
import com.cloud.user.dao.AccountDao;
import com.cloud.user.dao.UserDao;
import com.cloud.uservm.UserVm;
import com.cloud.utils.Journal;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.Pair;
@ -215,6 +219,9 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene
@Inject(adapter = DeploymentPlanner.class)
protected Adapters<DeploymentPlanner> _planners;
@Inject(adapter = HostAllocator.class)
protected Adapters<HostAllocator> _hostAllocators;
Map<VirtualMachine.Type, VirtualMachineGuru<? extends VMInstanceVO>> _vmGurus = new HashMap<VirtualMachine.Type, VirtualMachineGuru<? extends VMInstanceVO>>();
protected StateMachine2<State, VirtualMachine.Event, VirtualMachine> _stateMachine;
@ -433,7 +440,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene
@Override
public <T extends VMInstanceVO> T start(T vm, Map<VirtualMachineProfile.Param, Object> params, User caller, Account account) throws InsufficientCapacityException, ResourceUnavailableException {
return start(vm, params, caller, account, null);
}
}
@Override
public <T extends VMInstanceVO> T start(T vm, Map<VirtualMachineProfile.Param, Object> params, User caller, Account account, DeploymentPlan planToDeploy) throws InsufficientCapacityException, ResourceUnavailableException {
try {
@ -597,7 +604,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene
for (VolumeVO vol : vols) {
// make sure if the templateId is unchanged. If it is changed, let planner
// reassign pool for the volume even if it ready.
// reassign pool for the volume even if it ready.
Long volTemplateId = vol.getTemplateId();
if (volTemplateId != null && volTemplateId.longValue() != template.getId()) {
if (s_logger.isDebugEnabled()) {
@ -631,7 +638,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene
if (s_logger.isDebugEnabled()) {
s_logger.debug(vol + " is READY, changing deployment plan to use this pool's dcId: " + rootVolDcId + " , podId: " + rootVolPodId + " , and clusterId: " + rootVolClusterId);
}
}
}
}
}
@ -712,7 +719,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene
throw new ExecutionException("Unable to stop " + vm + " so we are unable to retry the start operation");
}
}
}
}
s_logger.info("Unable to start VM on " + dest.getHost() + " due to " + (startAnswer == null ? " no start answer" : startAnswer.getDetails()));
} catch (OperationTimedoutException e) {
s_logger.debug("Unable to send the start command to host " + dest.getHost());
@ -742,7 +749,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene
}
} catch (Exception e) {
s_logger.error("Failed to start instance " + vm, e);
throw new AgentUnavailableException("Unable to start instance", destHostId, e);
throw new AgentUnavailableException("Unable to start instance", destHostId, e);
} finally {
if (startedVm == null && canRetry) {
_workDao.updateStep(work, Step.Release);
@ -752,13 +759,13 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene
}
} finally {
if (startedVm == null) {
// decrement only for user VM's and newly created VM
if (vm.getType().equals(VirtualMachine.Type.User) && (vm.getLastHostId() == null)) {
// decrement only for user VM's and newly created VM
if (vm.getType().equals(VirtualMachine.Type.User) && (vm.getLastHostId() == null)) {
_accountMgr.decrementResourceCount(vm.getAccountId(), ResourceType.user_vm);
}
if (canRetry) {
changeState(vm, Event.OperationFailed, null, work, Step.Done);
}
if (canRetry) {
changeState(vm, Event.OperationFailed, null, work, Step.Done);
}
}
}
@ -1285,6 +1292,18 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene
}
}
@Override
public boolean isVirtualMachineUpgradable(UserVm vm, ServiceOffering offering) {
Enumeration<HostAllocator> en = _hostAllocators.enumeration();
boolean isMachineUpgradable = true;
while (isMachineUpgradable && en.hasMoreElements()) {
final HostAllocator allocator = en.nextElement();
isMachineUpgradable = allocator.isVirtualMachineUpgradable(vm, offering);
}
return isMachineUpgradable;
}
@Override
public <T extends VMInstanceVO> T reboot(T vm, Map<VirtualMachineProfile.Param, Object> params, User caller, Account account) throws InsufficientCapacityException, ResourceUnavailableException {
try {

View File

@ -21,7 +21,8 @@ import org.junit.runner.RunWith;
import org.junit.runners.Suite;
@RunWith(Suite.class)
@Suite.SuiteClasses({AdvanceZone217To224UpgradeTest.class, PortForwarding218To224UpgradeTest.class, InstanceGroup218To224UpgradeTest.class, BasicZone218To224UpgradeTest.class, UsageEvents218To224UpgradeTest.class})
@Suite.SuiteClasses({ AdvanceZone217To224UpgradeTest.class, AdvanceZone223To224UpgradeTest.class, PortForwarding218To224UpgradeTest.class, InstanceGroup218To224UpgradeTest.class,
BasicZone218To224UpgradeTest.class, UsageEvents218To224UpgradeTest.class })
public class DbUpgrade22Test {
}

View File

@ -1,116 +0,0 @@
/**
* Copyright (C) 2010 Cloud.com, Inc. All rights reserved.
*
* This software is licensed under the GNU General Public License v3 or later.
*
* It is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.cloud.utils;
import java.io.Serializable;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
/**
* Constant is different from Enum in that it allows anyone who declares a
* class that extends from it to allow for more Constants to be added. It does
* not all need to be declared in the same place like Enums are. The class
* is really geared toward String as the underneath value.
*
* However, the flexibility of Constant can lead to bad programming practices
* since you can dynamically add Constant values at any time. The correct
* thing to do is declare Constant like you would declare other constants,
* as static variables. Unfortunately, I have no way to enforce that so I
* leave it up to the developer to behave. .... So behave!
*/
public abstract class Constant {
static Map<Class<?>, Map<String, Constant>> _values = new HashMap<Class<?>, Map<String, Constant>>();
Serializable _value;
public Constant(Serializable value) {
Class<?> clazz = value.getClass();
synchronized(Constant.class) {
Map<String, Constant> map = alreadyContains(clazz, value);
if (map == null) {
map = new HashMap<String, Constant>();
_values.put(clazz, map);
}
_value = value;
map.put(value.toString(), this);
}
}
final private Map<String, Constant> alreadyContains(Class<?> clazz, Object value) {
Map<String, Constant> map = _values.get(clazz);
if (map == null) {
return null;
}
assert !map.containsKey(value.toString()) : "Unfortunately you can not declare this constant as it has been declared by someone else";
return map;
}
public final Serializable value() {
return _value;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof Constant)) {
return false;
}
Constant that = (Constant)obj;
return _value.equals(that._value);
}
@Override
public int hashCode() {
return _value.hashCode();
}
@Override
public String toString() {
return _value.toString();
}
@SuppressWarnings("unchecked")
public static <T extends Constant> Collection<T> constants(Class<T> clazz) {
// Note that this method is not synchronized for a reason. We
// expect the developer to behave and use Constant as it is intended.
return (Collection<T>)_values.get(clazz).values();
}
public static Collection<? extends Constant> values(Class<?> clazz) {
// Note that this method is not synchronized for a reason. We
// expect the developer to behave and use Constant as it is intended.
return _values.get(clazz).values();
}
public Collection<? extends Constant> values() {
return values(this.getClass());
}
public static Constant constant(Class<?> clazz, Serializable value) {
// Note that this method is not synchronized for a reason. We
// expect the developer to behave and use Constant as it is intended.
return _values.get(clazz).get(value.toString());
}
public static Constant parseConstant(Class<?> clazz, String value) {
// Note that this method is not synchronized for a reason. We
// expect the developer to behave and use Constant as it is intended.
return _values.get(clazz).get(value);
}
}