Some operations on the lock table allowed through jmx

This commit is contained in:
Alex Huang 2011-07-06 16:08:55 -07:00
parent f1bee86263
commit ee2670edc7
4 changed files with 107 additions and 50 deletions

View File

@ -23,6 +23,7 @@ public class DataCenterDeployment implements DeploymentPlan {
Long _clusterId;
Long _poolId;
Long _hostId;
boolean _recreateDisks;
public DataCenterDeployment(long dataCenterId) {
this(dataCenterId, null, null, null, null);

View File

@ -47,8 +47,8 @@ import com.cloud.exception.InsufficientServerCapacityException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
import com.cloud.host.dao.HostDetailsDao;
import com.cloud.host.dao.HostDao;
import com.cloud.host.dao.HostDetailsDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.offering.ServiceOffering;
import com.cloud.org.Cluster;
@ -85,7 +85,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
@Inject protected HostPodDao _podDao;
@Inject protected ClusterDao _clusterDao;
@Inject protected HostDetailsDao _hostDetailsDao = null;
@Inject protected GuestOSDao _guestOSDao = null;
@Inject protected GuestOSDao _guestOSDao = null;
@Inject protected GuestOSCategoryDao _guestOSCategoryDao = null;
@Inject protected DiskOfferingDao _diskOfferingDao;
@Inject protected StoragePoolHostDao _poolHostDao;
@ -119,24 +119,28 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1);
s_logger.debug("In FirstFitPlanner:: plan");
s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() +
", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested);
s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId()!=null ? "Yes": "No"));
if (s_logger.isDebugEnabled()) {
s_logger.debug("In FirstFitPlanner:: plan");
s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() +
", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested);
s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId()!=null ? "Yes": "No"));
}
if(plan.getHostId() != null){
Long hostIdSpecified = plan.getHostId();
if(s_logger.isDebugEnabled()){
if (s_logger.isDebugEnabled()){
s_logger.debug("DeploymentPlan has host_id specified, making no checks on this host, looks like admin test: "+hostIdSpecified);
}
HostVO host = _hostDao.findById(hostIdSpecified);
if(host == null){
s_logger.debug("The specified host cannot be found");
}else{
s_logger.debug("Looking for suitable pools for this host under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId());
}
if (s_logger.isDebugEnabled()) {
if(host == null){
s_logger.debug("The specified host cannot be found");
}else{
s_logger.debug("Looking for suitable pools for this host under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId());
}
}
//search for storage under the zone, pod, cluster of the host.
DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), hostIdSpecified, plan.getPoolId());
@ -155,21 +159,21 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
Pod pod = _podDao.findById(host.getPodId());
Cluster cluster = _clusterDao.findById(host.getClusterId());
Map<Volume, StoragePool> storageVolMap = potentialResources.second();
// remove the reused vol<->pool from destination, since we don't have to prepare this volume.
// remove the reused vol<->pool from destination, since we don't have to prepare this volume.
for(Volume vol : readyAndReusedVolumes){
storageVolMap.remove(vol);
}
}
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
s_logger.debug("Returning Deployment Destination: "+ dest);
return dest;
}
}
}
s_logger.debug("Cannnot deploy to specified host, returning.");
return null;
}
if (vm.getLastHostId() != null) {
s_logger.debug("This VM has last host_id specified, trying to choose the same host: " +vm.getLastHostId());
s_logger.debug("This VM has last host_id specified, trying to choose the same host: " +vm.getLastHostId());
HostVO host = _hostDao.findById(vm.getLastHostId());
if(host == null){
@ -179,13 +183,13 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
//check zone/pod/cluster are enabled
if(isEnabledForAllocation(host.getDataCenterId(), host.getPodId(), host.getClusterId())){
if(_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOverprovisioningFactor)){
s_logger.debug("The last host of this VM is UP and has enough capacity");
s_logger.debug("The last host of this VM is UP and has enough capacity");
s_logger.debug("Now checking for suitable pools under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId());
//search for storage under the zone, pod, cluster of the last host.
DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId());
DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId());
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoid, RETURN_UPTO_ALL);
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
List<Volume> readyAndReusedVolumes = result.second();
List<Volume> readyAndReusedVolumes = result.second();
//choose the potential pool for this VM for this host
if(!suitableVolumeStoragePools.isEmpty()){
List<Host> suitableHosts = new ArrayList<Host>();
@ -199,7 +203,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
// remove the reused vol<->pool from destination, since we don't have to prepare this volume.
for(Volume vol : readyAndReusedVolumes){
storageVolMap.remove(vol);
}
}
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
s_logger.debug("Returning Deployment Destination: "+ dest);
return dest;
@ -220,14 +224,14 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
if(!isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())){
s_logger.debug("Cannot deploy to specified plan, allocation state is disabled, returning.");
return null;
}
}
List<Long> clusterList = new ArrayList<Long>();
if (plan.getClusterId() != null) {
Long clusterIdSpecified = plan.getClusterId();
s_logger.debug("Searching resources only under specified Cluster: "+ clusterIdSpecified);
ClusterVO cluster = _clusterDao.findById(plan.getClusterId());
if (cluster != null ){
if (cluster != null ){
clusterList.add(clusterIdSpecified);
return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc, _allocationAlgorithm);
}else{
@ -242,7 +246,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
HostPodVO pod = _podDao.findById(podIdSpecified);
if (pod != null) {
//list clusters under this pod by cpu and ram capacity
//list clusters under this pod by cpu and ram capacity
clusterList = listClustersByCapacity(podIdSpecified, cpu_requested, ram_requested, avoid, false, cpuOverprovisioningFactor);
if(!clusterList.isEmpty()){
if(avoid.getClustersToAvoid() != null){
@ -257,7 +261,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Removing from the clusterId list these clusters that are disabled: "+ disabledClusters);
}
clusterList.removeAll(disabledClusters);
clusterList.removeAll(disabledClusters);
}
DeployDestination dest = checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc, _allocationAlgorithm);
@ -280,7 +284,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
}else{
//consider all clusters under this zone.
s_logger.debug("Searching all possible resources under this Zone: "+ plan.getDataCenterId());
//list clusters under this zone by cpu and ram capacity
//list clusters under this zone by cpu and ram capacity
List<Long> prioritizedClusterIds = listClustersByCapacity(plan.getDataCenterId(), cpu_requested, ram_requested, avoid, true, cpuOverprovisioningFactor);
if(!prioritizedClusterIds.isEmpty()){
if(avoid.getClustersToAvoid() != null){
@ -303,7 +307,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
return null;
}
if(!prioritizedClusterIds.isEmpty()){
boolean applyUserConcentrationPodHeuristic = Boolean.parseBoolean(_configDao.getValue(Config.UseUserConcentratedPodAllocation.key()));
boolean applyUserConcentrationPodHeuristic = Boolean.parseBoolean(_configDao.getValue(Config.UseUserConcentratedPodAllocation.key()));
if(applyUserConcentrationPodHeuristic && vmProfile.getOwner() != null){
//user has VMs in certain pods. - prioritize those pods first
//UserConcentratedPod strategy
@ -398,7 +402,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
Pod pod = _podDao.findById(clusterVO.getPodId());
Host host = _hostDao.findById(potentialResources.first().getId());
Map<Volume, StoragePool> storageVolMap = potentialResources.second();
// remove the reused vol<->pool from destination, since we don't have to prepare this volume.
// remove the reused vol<->pool from destination, since we don't have to prepare this volume.
for(Volume vol : readyAndReusedVolumes){
storageVolMap.remove(vol);
}
@ -465,13 +469,13 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
}
return prioritizedPods;
}
}
protected List<Long> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone, float cpuOverprovisioningFactor){
//look at the aggregate available cpu and ram per cluster
//although an aggregate value may be false indicator that a cluster can host a vm, it will at the least eliminate those clusters which definitely cannot
//we need clusters having enough cpu AND RAM
//we need clusters having enough cpu AND RAM
if (s_logger.isDebugEnabled()) {
s_logger.debug("Listing clusters that have enough aggregate CPU and RAM capacity under this "+(isZone ? "Zone: " : "Pod: " )+id);
}
@ -544,7 +548,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, avoid, returnUpTo);
if (suitableHosts != null && !suitableHosts.isEmpty()) {
break;
}
}
}
if(suitableHosts.isEmpty()){
@ -574,7 +578,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
long exstPoolDcId = pool.getDataCenterId();
Long exstPoolPodId = pool.getPodId();
Long exstPoolClusterId = pool.getClusterId();
if(plan.getDataCenterId() == exstPoolDcId && plan.getPodId() == exstPoolPodId && plan.getClusterId() == exstPoolClusterId){
if(plan.getDataCenterId() == exstPoolDcId && plan.getPodId() == exstPoolPodId && plan.getClusterId() == exstPoolClusterId){
s_logger.debug("Planner need not allocate a pool for this volume since its READY");
suitablePools.add(pool);
suitableVolumeStoragePools.put(toBeCreated, suitablePools);
@ -636,7 +640,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
}
return new Pair<Map<Volume, List<StoragePool>>, List<Volume>>(suitableVolumeStoragePools, readyAndReusedVolumes);
}
}
@Override

View File

@ -50,13 +50,11 @@ public class Merovingian2 extends StandardMBean implements MerovingianMBean {
private static final String SELECT_SQL = "SELECT op_lock.key, mac, ip, thread, acquired_on, waiters FROM op_lock";
private static final String INQUIRE_SQL = SELECT_SQL + " WHERE op_lock.key=?";
private static final String DECREMENT_SQL = "UPDATE op_lock SET waiters=waiters-1 where op_lock.key=? AND op_lock.mac=? AND op_lock.ip=? AND op_lock.thread=?";
private static final String RELEASE_SQL = "DELETE FROM op_lock WHERE op_lock.key = ? AND op_lock.mac=? AND waiters=0";
private static final String RELEASE_LOCK_SQL = "DELETE FROM op_lock WHERE op_lock.key = ?";
private static final String RELEASE_SQL = RELEASE_LOCK_SQL + " AND op_lock.mac=? AND waiters=0";
private static final String CLEANUP_MGMT_LOCKS_SQL = "DELETE FROM op_lock WHERE op_lock.mac = ?";
private static final String SELECT_MGMT_LOCKS_SQL = SELECT_SQL + " WHERE mac=?";
private static final String SELECT_THREAD_LOCKS_SQL = SELECT_SQL + " WHERE mac=? AND ip=?";
private static final String SELECT_OWNER_SQL = "SELECT mac, ip, thread FROM op_lock WHERE op_lock.key=?";
private static final String DEADLOCK_DETECT_SQL = "SELECT l2.key FROM op_lock l2 WHERE l2.mac=? AND l2.ip=? AND l2.thread=? AND l2.key in " +
"(SELECT l1.key from op_lock l1 WHERE l1.mac=? AND l1.ip=? AND l1.thread=?)";
private static final String CLEANUP_THREAD_LOCKS_SQL = "DELETE FROM op_lock WHERE mac=? AND ip=? AND thread=?";
TimeZone s_gmtTimeZone = TimeZone.getTimeZone("GMT");
@ -64,19 +62,16 @@ public class Merovingian2 extends StandardMBean implements MerovingianMBean {
private long _msId;
private static Merovingian2 s_instance = null;
ScheduledExecutorService _executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("LockMasterConnectionKeepAlive"));
Connection _conn;
ScheduledExecutorService _executor = null;
Connection _conn = null;
private Merovingian2(long msId) {
super(MerovingianMBean.class, false);
_msId = msId;
try {
_conn = Transaction.getStandaloneConnectionWithException();
_conn.setAutoCommit(true);
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to initialize a connection to the database for locking purposes", e);
String result = resetDbConnection();
if (!result.equalsIgnoreCase("Success")) {
throw new CloudRuntimeException("Unable to initialize a connection to the database for locking purposes due to " + result);
}
_executor.schedule(new KeepAliveTask(), 10, TimeUnit.SECONDS);
}
public static synchronized Merovingian2 createLockMaster(long msId) {
@ -95,6 +90,42 @@ public class Merovingian2 extends StandardMBean implements MerovingianMBean {
return s_instance;
}
@Override
public String resetDbConnection() {
if (_conn != null) {
try {
_conn.close();
} catch (Throwable th) {
s_logger.error("Unable to close connection", th);
}
}
try {
_conn = Transaction.getStandaloneConnectionWithException();
_conn.setAutoCommit(true);
} catch (SQLException e) {
s_logger.error("Unable to get a new db connection", e);
return "Unable to initialize a connection to the database for locking purposes: " + e;
}
if (_conn == null) {
return "Unable to initialize a connection to the database for locking purposes, shutdown this server!";
}
if (_executor != null) {
try {
_executor.shutdown();
} catch (Throwable th) {
s_logger.error("Unable to shutdown the executor", th);
}
}
_executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("LockMasterConnectionKeepAlive"));
_executor.schedule(new KeepAliveTask(), 10, TimeUnit.SECONDS);
return "Success";
}
public boolean acquire(String key, int timeInSeconds) {
Thread th = Thread.currentThread();
String threadName = th.getName();
@ -222,7 +253,8 @@ public class Merovingian2 extends StandardMBean implements MerovingianMBean {
public void cleanupThisServer() {
cleanupForServer(_msId);
}
@Override
public void cleanupForServer(long msId) {
PreparedStatement pstmt = null;
try {
@ -397,6 +429,20 @@ public class Merovingian2 extends StandardMBean implements MerovingianMBean {
}
}
@Override
public boolean releaseLockAsLastResortAndIReallyKnowWhatIAmDoing(String key) {
PreparedStatement pstmt = null;
try {
pstmt = _conn.prepareStatement(RELEASE_LOCK_SQL);
pstmt.setString(1, key);
int rows = pstmt.executeUpdate();
return rows > 0;
} catch (SQLException e) {
s_logger.error("Unable to release lock " + key, e);
return false;
}
}
protected class KeepAliveTask implements Runnable {
@Override
public void run() {
@ -416,4 +462,5 @@ public class Merovingian2 extends StandardMBean implements MerovingianMBean {
}
}
}
}

View File

@ -23,8 +23,13 @@ import java.util.Map;
public interface MerovingianMBean {
public List<Map<String, String>> getAllLocks();
List<Map<String, String>> getAllLocks();
public List<Map<String, String>> getLocksAcquiredByThisServer();
List<Map<String, String>> getLocksAcquiredByThisServer();
boolean releaseLockAsLastResortAndIReallyKnowWhatIAmDoing(String key);
String resetDbConnection();
void cleanupForServer(long msId);
}