bug 8887 : Stats Calculation Improvement - Storage stats wont update DB anymore and would be kept "in memory" just like other stats. For the listCapacityCmd which consumes it (sec. storage used and primary storage used) this would be constructed using the in memory maps rather than DB which wont have sec storage and primary storage used in the DB anymore.

This commit is contained in:
nit 2011-03-14 18:45:00 -07:00
parent bf1aae8413
commit f88fb1e505
8 changed files with 124 additions and 129 deletions

View File

@ -49,8 +49,8 @@ public class ListCapacityCmd extends BaseListCmd {
@Parameter(name=ApiConstants.POD_ID, type=CommandType.LONG, description="lists capacity by the Pod ID")
private Long podId;
@Parameter(name=ApiConstants.TYPE, type=CommandType.STRING, description="lists capacity by type")
private String type;
@Parameter(name=ApiConstants.TYPE, type=CommandType.INTEGER, description="lists capacity by type")
private Integer type;
@Parameter(name=ApiConstants.ZONE_ID, type=CommandType.LONG, description="lists capacity by the Zone ID")
private Long zoneId;
@ -68,7 +68,7 @@ public class ListCapacityCmd extends BaseListCmd {
return podId;
}
public String getType() {
public Integer getType() {
return type;
}

View File

@ -2837,11 +2837,6 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory,
StartupStorageCommand ssCmd = (StartupStorageCommand) startup;
if (ssCmd.getResourceType() == Storage.StorageResourceType.STORAGE_HOST) {
CapacityVO capacity = new CapacityVO(server.getId(),
server.getDataCenterId(), server.getPodId(), server.getClusterId(),0L,
server.getTotalSize(), CapacityVO.CAPACITY_TYPE_STORAGE);
_capacityDao.persist(capacity);
capacity = new CapacityVO(server.getId(),
server.getDataCenterId(), server.getPodId(), server.getClusterId(), 0L,
server.getTotalSize() * _overProvisioningFactor,
CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED);

View File

@ -332,7 +332,8 @@ public class AlertManagerImpl implements AlertManager {
try {
List<CapacityVO> capacityList = _capacityDao.listAllIncludingRemoved();
Map<String, List<CapacityVO>> capacityDcTypeMap = new HashMap<String, List<CapacityVO>>();
Map<String, List<CapacityVO>> capacityDcTypeMap = new HashMap<String, List<CapacityVO>>();
_storageMgr.getStoragePoolUsedStats(null, null, null);
for (CapacityVO capacity : capacityList) {
long dataCenterId = capacity.getDataCenterId();

View File

@ -289,6 +289,10 @@ public class ApiDBUtils {
public static VmStats getVmStatistics(long hostId) {
return _statsCollector.getVmStats(hostId);
}
public static StorageStats getSecondaryStorageStatistics(long id){
return _statsCollector.getStorageStats(id);
}
/////////////////////////////////////////////////////////////
// Dao methods //

View File

@ -43,6 +43,7 @@ import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -132,6 +133,7 @@ import com.cloud.async.AsyncJobResult;
import com.cloud.async.AsyncJobVO;
import com.cloud.async.BaseAsyncJobExecutor;
import com.cloud.async.dao.AsyncJobDao;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.CapacityVO;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.certificate.CertificateVO;
@ -3223,18 +3225,15 @@ public class ManagementServerImpl implements ManagementServer {
@Override
public List<CapacityVO> listCapacities(ListCapacityCmd cmd) {
// make sure capacity is accurate before displaying it anywhere
// NOTE: listCapacities is currently called by the UI only, so this
// shouldn't be called much since it checks all hosts/VMs
// to figure out what has been allocated.
Filter searchFilter = new Filter(CapacityVO.class, "capacityType", true, cmd.getStartIndex(), cmd.getPageSizeVal());
SearchCriteria<CapacityVO> sc = _capacityDao.createSearchCriteria();
List<CapacityVO> capacities = new LinkedList<CapacityVO>();
Object type = cmd.getType();
Object zoneId = cmd.getZoneId();
Object podId = cmd.getPodId();
Object hostId = cmd.getHostId();
Integer type = cmd.getType();
Long zoneId = cmd.getZoneId();
Long podId = cmd.getPodId();
Long hostId = cmd.getHostId();
if (type != null) {
sc.addAnd("capacityType", SearchCriteria.Op.EQ, type);
@ -3251,8 +3250,16 @@ public class ManagementServerImpl implements ManagementServer {
if (hostId != null) {
sc.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, hostId);
}
return _capacityDao.search(sc, searchFilter);
capacities = _capacityDao.search(sc, searchFilter);
// op_host_Capacity contains only allocated stats and the real time stats are stored "in memory".
if (type == null || type == Capacity.CAPACITY_TYPE_SECONDARY_STORAGE){
capacities.addAll(_storageMgr.getSecondaryStorageUsedStats(hostId, podId, zoneId));
}if (type == null || type == Capacity.CAPACITY_TYPE_STORAGE){
capacities.addAll(_storageMgr.getStoragePoolUsedStats(hostId, podId, zoneId));
}
return capacities;
}
@Override

View File

@ -249,112 +249,48 @@ public class StatsCollector {
@Override
public void run() {
try {
SearchCriteria<HostVO> sc = _hostDao.createSearchCriteria();
sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString());
sc.addAnd("type", SearchCriteria.Op.EQ, Host.Type.Storage.toString());
SearchCriteria<HostVO> sc = _hostDao.createSearchCriteria();
ConcurrentHashMap<Long, StorageStats> storageStats = new ConcurrentHashMap<Long, StorageStats>();
List<HostVO> hosts = _hostDao.search(sc, null);
for (HostVO host : hosts) {
GetStorageStatsCommand command = new GetStorageStatsCommand(host.getGuid());
Answer answer = _agentMgr.easySend(host.getId(), command);
if (answer != null && answer.getResult()) {
storageStats.put(host.getId(), (StorageStats)answer);
}
}
sc = _hostDao.createSearchCriteria();
sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString());
sc.addAnd("type", SearchCriteria.Op.EQ, Host.Type.SecondaryStorage.toString());
hosts = _hostDao.search(sc, null);
for (HostVO host : hosts) {
GetStorageStatsCommand command = new GetStorageStatsCommand(host.getGuid());
Answer answer = _agentMgr.easySend(host.getId(), command);
GetStorageStatsCommand command = new GetStorageStatsCommand(host.getGuid());
long hostId = host.getId();
Answer answer = _agentMgr.easySend(hostId, command);
if (answer != null && answer.getResult()) {
storageStats.put(host.getId(), (StorageStats)answer);
storageStats.put(hostId, (StorageStats)answer);
//Seems like we have dynamically updated the sec. storage as prev. size and the current do not match
if (_storageStats.get(hostId)!=null &&
_storageStats.get(hostId).getCapacityBytes() != ((StorageStats)answer).getCapacityBytes()){
host.setTotalSize(((StorageStats)answer).getCapacityBytes());
_hostDao.update(hostId, host);
}
}
}
_storageStats = storageStats;
}
_storageStats = storageStats;
ConcurrentHashMap<Long, StorageStats> storagePoolStats = new ConcurrentHashMap<Long, StorageStats>();
List<StoragePoolVO> storagePools = _storagePoolDao.listAll();
for (StoragePoolVO pool: storagePools) {
GetStorageStatsCommand command = new GetStorageStatsCommand(pool.getUuid(), pool.getPoolType(), pool.getPath());
long poolId = pool.getId();
Answer answer = _storageManager.sendToPool(pool, command);
if (answer != null && answer.getResult()) {
storagePoolStats.put(pool.getId(), (StorageStats)answer);
storagePoolStats.put(pool.getId(), (StorageStats)answer);
// Seems like we have dynamically updated the pool size since the prev. size and the current do not match
if (_storagePoolStats.get(poolId)!= null &&
_storagePoolStats.get(poolId).getCapacityBytes() != ((StorageStats)answer).getCapacityBytes()){
pool.setCapacityBytes(((StorageStats)answer).getCapacityBytes());
_storagePoolDao.update(pool.getId(), pool);
}
}
}
_storagePoolStats = storagePoolStats;
// a list to store the new capacity entries that will be committed once everything is calculated
List<CapacityVO> newCapacities = new ArrayList<CapacityVO>();
// Updating the storage entries and creating new ones if they dont exist.
Transaction txn = Transaction.open(Transaction.CLOUD_DB);
try {
if (s_logger.isTraceEnabled()) {
s_logger.trace("recalculating system storage capacity");
}
txn.start();
for (Long hostId : storageStats.keySet()) {
StorageStats stats = storageStats.get(hostId);
short capacityType = -1;
HostVO host = _hostDao.findById(hostId);
host.setTotalSize(stats.getCapacityBytes());
_hostDao.update(host.getId(), host);
SearchCriteria<CapacityVO> capacitySC = _capacityDao.createSearchCriteria();
capacitySC.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, hostId);
capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, host.getDataCenterId());
if (Host.Type.SecondaryStorage.equals(host.getType())) {
capacityType = CapacityVO.CAPACITY_TYPE_SECONDARY_STORAGE;
} else if (Host.Type.Storage.equals(host.getType())) {
capacityType = CapacityVO.CAPACITY_TYPE_STORAGE;
}
if(-1 != capacityType){
capacitySC.addAnd("capacityType", SearchCriteria.Op.EQ, capacityType);
List<CapacityVO> capacities = _capacityDao.search(capacitySC, null);
if (capacities.size() == 0){ // Create a new one
CapacityVO capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), host.getClusterId(), stats.getByteUsed(), stats.getCapacityBytes(), capacityType);
_capacityDao.persist(capacity);
}else{ //Update if it already exists.
CapacityVO capacity = capacities.get(0);
capacity.setUsedCapacity(stats.getByteUsed());
capacity.setTotalCapacity(stats.getCapacityBytes());
_capacityDao.update(capacity.getId(), capacity);
}
}
}// End of for
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("Unable to start transaction for storage capacity update");
}finally {
txn.close();
}
for (Long poolId : storagePoolStats.keySet()) {
StorageStats stats = storagePoolStats.get(poolId);
StoragePoolVO pool = _storagePoolDao.findById(poolId);
if (pool == null) {
continue;
}
pool.setCapacityBytes(stats.getCapacityBytes());
long available = stats.getCapacityBytes() - stats.getByteUsed();
if( available < 0 ) {
available = 0;
}
pool.setAvailableBytes(available);
_storagePoolDao.update(pool.getId(), pool);
_storageManager.createCapacityEntry(pool, 0L);
}
}
_storagePoolStats = storagePoolStats;
} catch (Throwable t) {
s_logger.error("Error trying to retrieve storage stats", t);
}

View File

@ -22,6 +22,7 @@ import java.util.List;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.Command;
import com.cloud.agent.manager.Commands;
import com.cloud.capacity.CapacityVO;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.HostPodVO;
import com.cloud.deploy.DeployDestination;
@ -184,4 +185,8 @@ public interface StorageManager extends Manager {
void prepareForMigration(VirtualMachineProfile<? extends VirtualMachine> vm, DeployDestination dest);
Answer sendToPool(StoragePool pool, long[] hostIdsToTryFirst, Command cmd) throws StorageUnavailableException;
List<CapacityVO> getSecondaryStorageUsedStats(Long hostId, Long podId, Long zoneId);
List<CapacityVO> getStoragePoolUsedStats(Long poolId, Long podId, Long zoneId);
}

View File

@ -29,6 +29,7 @@ import java.util.Formatter;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -63,6 +64,7 @@ import com.cloud.agent.api.to.StorageFilerTO;
import com.cloud.agent.api.to.VolumeTO;
import com.cloud.agent.manager.Commands;
import com.cloud.alert.AlertManager;
import com.cloud.api.ApiDBUtils;
import com.cloud.api.commands.CancelPrimaryStorageMaintenanceCmd;
import com.cloud.api.commands.CreateStoragePoolCmd;
import com.cloud.api.commands.CreateVolumeCmd;
@ -72,6 +74,7 @@ import com.cloud.api.commands.PreparePrimaryStorageForMaintenanceCmd;
import com.cloud.api.commands.UpdateStoragePoolCmd;
import com.cloud.async.AsyncInstanceCreateStatus;
import com.cloud.async.AsyncJobManager;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.CapacityVO;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.cluster.ClusterManagerListener;
@ -1587,29 +1590,9 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag
@Override
public void createCapacityEntry(StoragePoolVO storagePool, long allocated) {
SearchCriteria<CapacityVO> capacitySC = _capacityDao.createSearchCriteria();
capacitySC.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, storagePool.getId());
capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, storagePool.getDataCenterId());
capacitySC.addAnd("capacityType", SearchCriteria.Op.EQ, CapacityVO.CAPACITY_TYPE_STORAGE);
SearchCriteria<CapacityVO> capacitySC = _capacityDao.createSearchCriteria();
List<CapacityVO> capacities = _capacityDao.search(capacitySC, null);
if (capacities.size() == 0) {
CapacityVO capacity = new CapacityVO(storagePool.getId(), storagePool.getDataCenterId(), storagePool.getPodId(), storagePool.getClusterId(),
storagePool.getAvailableBytes(), storagePool.getCapacityBytes(), CapacityVO.CAPACITY_TYPE_STORAGE);
_capacityDao.persist(capacity);
} else {
CapacityVO capacity = capacities.get(0);
capacity.setTotalCapacity(storagePool.getCapacityBytes());
long used = storagePool.getCapacityBytes() - storagePool.getAvailableBytes();
if (used <= 0) {
used = 0;
}
capacity.setUsedCapacity(used);
_capacityDao.update(capacity.getId(), capacity);
}
s_logger.debug("Successfully set Capacity - " + storagePool.getCapacityBytes() + " for CAPACITY_TYPE_STORAGE, DataCenterId - "
+ storagePool.getDataCenterId() + ", HostOrPoolId - " + storagePool.getId() + ", PodId " + storagePool.getPodId());
List<CapacityVO> capacities = _capacityDao.search(capacitySC, null);
capacitySC = _capacityDao.createSearchCriteria();
capacitySC.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, storagePool.getId());
capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, storagePool.getDataCenterId());
@ -2711,5 +2694,69 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag
}
}
}
@Override
public List<CapacityVO> getSecondaryStorageUsedStats(Long hostId, Long podId, Long zoneId){
SearchCriteria<HostVO> sc = _hostDao.createSearchCriteria();
if (zoneId != null) {
sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId);
}
if (podId != null) {
sc.addAnd("podId", SearchCriteria.Op.EQ, podId);
}
if (hostId != null) {
sc.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, hostId);
}
List<HostVO> hosts = _hostDao.search(sc, null);
List<CapacityVO> capacities = new LinkedList<CapacityVO>();
for (HostVO host : hosts){
StorageStats stats = ApiDBUtils.getSecondaryStorageStatistics(host.getId());
if (stats == null)
continue;
CapacityVO capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(),
host.getClusterId(), stats.getByteUsed(), stats.getCapacityBytes(), Capacity.CAPACITY_TYPE_SECONDARY_STORAGE);
capacities.add(capacity);
}
return capacities;
}
@Override
public List<CapacityVO> getStoragePoolUsedStats(Long poolId, Long podId, Long zoneId){
SearchCriteria<StoragePoolVO> sc = _storagePoolDao.createSearchCriteria();
List<StoragePoolVO> pools = new ArrayList<StoragePoolVO>();
if (zoneId != null) {
sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId);
}
if (podId != null) {
sc.addAnd("podId", SearchCriteria.Op.EQ, podId);
}
if (poolId != null) {
sc.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, poolId);
}
if (poolId != null){
pools.add(_storagePoolDao.findById(poolId));
}else{
pools = _storagePoolDao.search(sc, null);
}
List<CapacityVO> capacities = new LinkedList<CapacityVO>();
for (StoragePoolVO storagePool : pools){
StorageStats stats = ApiDBUtils.getStoragePoolStatistics(storagePool.getId());
if (stats == null)
continue;
CapacityVO capacity = new CapacityVO(storagePool.getId(), storagePool.getDataCenterId(), storagePool.getPodId(), storagePool.getClusterId(),
stats.getByteUsed(), stats.getCapacityBytes(), CapacityVO.CAPACITY_TYPE_STORAGE);
capacities.add(capacity);
}
return capacities;
}
}