mirror of
https://github.com/apache/cloudstack.git
synced 2025-11-02 20:02:29 +01:00
bug 6181: ingore provisioning factor for iscsi primary storage
status 6181: resolved fixed
This commit is contained in:
parent
6f92235365
commit
abaa66f6e3
@ -22,7 +22,6 @@ import com.cloud.capacity.CapacityVO;
|
||||
import com.cloud.utils.db.GenericDao;
|
||||
|
||||
public interface CapacityDao extends GenericDao<CapacityVO, Long> {
|
||||
void setUsedStorage(Long hostId, long totalUsed);
|
||||
void clearNonStorageCapacities();
|
||||
void clearStorageCapacities();
|
||||
}
|
||||
|
||||
@ -34,9 +34,8 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
|
||||
|
||||
private static final String ADD_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity + ? WHERE host_id = ? AND capacity_type = ?";
|
||||
private static final String SUBTRACT_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity - ? WHERE host_id = ? AND capacity_type = ?";
|
||||
private static final String SET_USED_STORAGE_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = ? WHERE host_id = ? AND capacity_type = 2";
|
||||
private static final String CLEAR_STORAGE_CAPACITIES = "DELETE FROM `cloud`.`op_host_capacity` WHERE capacity_type=2 OR capacity_type=6"; //clear storage and secondary_storage capacities
|
||||
private static final String CLEAR_NON_STORAGE_CAPACITIES = "DELETE FROM `cloud`.`op_host_capacity` WHERE capacity_type<>2 AND capacity_type <>6"; //clear non-storage and non-secondary_storage capacities
|
||||
private static final String CLEAR_STORAGE_CAPACITIES = "DELETE FROM `cloud`.`op_host_capacity` WHERE capacity_type=2 OR capacity_type=3 OR capacity_type=6"; //clear storage and secondary_storage capacities
|
||||
private static final String CLEAR_NON_STORAGE_CAPACITIES = "DELETE FROM `cloud`.`op_host_capacity` WHERE capacity_type<>2 AND capacity_type<>3 AND capacity_type<>6"; //clear non-storage and non-secondary_storage capacities
|
||||
|
||||
public void updateAllocated(Long hostId, long allocatedAmount, short capacityType, boolean add) {
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
@ -61,22 +60,6 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
|
||||
}
|
||||
}
|
||||
|
||||
public void setUsedStorage(Long hostId, long totalUsed) {
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = null;
|
||||
try {
|
||||
txn.start();
|
||||
String sql = SET_USED_STORAGE_SQL;
|
||||
pstmt = txn.prepareAutoCloseStatement(sql);
|
||||
pstmt.setLong(1, totalUsed);
|
||||
pstmt.setLong(2, hostId);
|
||||
pstmt.executeUpdate(); // TODO: Make sure exactly 1 row was updated?
|
||||
txn.commit();
|
||||
} catch (Exception e) {
|
||||
txn.rollback();
|
||||
s_logger.warn("Exception setting used storage for host: " + hostId, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearNonStorageCapacities() {
|
||||
|
||||
@ -312,4 +312,6 @@ public interface StorageManager extends Manager {
|
||||
|
||||
<T extends VMInstanceVO> void create(T vm);
|
||||
Long findHostIdForStoragePool(StoragePoolVO pool);
|
||||
void createCapacityEntry(StoragePoolVO storagePool, long allocated);
|
||||
|
||||
}
|
||||
|
||||
@ -59,12 +59,14 @@ import com.cloud.offering.ServiceOffering;
|
||||
import com.cloud.service.ServiceOfferingVO;
|
||||
import com.cloud.service.dao.ServiceOfferingDao;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
import com.cloud.storage.StorageManager;
|
||||
import com.cloud.storage.StoragePoolVO;
|
||||
import com.cloud.storage.dao.StoragePoolDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.component.ComponentLocator;
|
||||
import com.cloud.utils.component.Inject;
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
@ -93,7 +95,8 @@ public class AlertManagerImpl implements AlertManager {
|
||||
private String _name = null;
|
||||
private EmailAlert _emailAlert;
|
||||
private AlertDao _alertDao;
|
||||
private HostDao _hostDao;
|
||||
private HostDao _hostDao;
|
||||
@Inject protected StorageManager _storageMgr;
|
||||
private ServiceOfferingDao _offeringsDao;
|
||||
private CapacityDao _capacityDao;
|
||||
private VMInstanceDao _vmDao;
|
||||
@ -109,7 +112,6 @@ public class AlertManagerImpl implements AlertManager {
|
||||
private StoragePoolDao _storagePoolDao;
|
||||
|
||||
private Timer _timer = null;
|
||||
private int _overProvisioningFactor = 1;
|
||||
private float _cpuOverProvisioningFactor = 1;
|
||||
private long _capacityCheckPeriod = 60L * 60L * 1000L; // one hour by default
|
||||
private double _memoryCapacityThreshold = 0.75;
|
||||
@ -258,11 +260,6 @@ public class AlertManagerImpl implements AlertManager {
|
||||
if (capacityCheckPeriodStr != null) {
|
||||
_capacityCheckPeriod = Long.parseLong(capacityCheckPeriodStr);
|
||||
}
|
||||
|
||||
String overProvisioningFactorStr = configs.get("storage.overprovisioning.factor");
|
||||
if (overProvisioningFactorStr != null) {
|
||||
_overProvisioningFactor = Integer.parseInt(overProvisioningFactorStr);
|
||||
}
|
||||
|
||||
String cpuOverProvisioningFactorStr = configs.get("cpu.overprovisioning.factor");
|
||||
if (cpuOverProvisioningFactorStr != null) {
|
||||
@ -399,14 +396,7 @@ public class AlertManagerImpl implements AlertManager {
|
||||
long disk = 0l;
|
||||
Pair<Long, Long> sizes = _volumeDao.getCountAndTotalByPool(pool.getId());
|
||||
disk = sizes.second();
|
||||
int provFactor = 1;
|
||||
if( pool.getPoolType() == StoragePoolType.NetworkFilesystem ) {
|
||||
provFactor = _overProvisioningFactor;
|
||||
}
|
||||
CapacityVO newStorageCapacity = new CapacityVO(pool.getId(), pool.getDataCenterId(), pool.getPodId(), disk, pool.getCapacityBytes() * provFactor, CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED);
|
||||
newCapacities.add(newStorageCapacity);
|
||||
|
||||
continue;
|
||||
_storageMgr.createCapacityEntry(pool, disk);
|
||||
}
|
||||
|
||||
// Calculate new Public IP capacity
|
||||
|
||||
@ -331,9 +331,7 @@ public class StatsCollector {
|
||||
pool.setAvailableBytes(available);
|
||||
_storagePoolDao.update(pool.getId(), pool);
|
||||
|
||||
CapacityVO capacity = new CapacityVO(poolId, pool.getDataCenterId(), pool.getPodId(), stats.getByteUsed(), stats.getCapacityBytes(), CapacityVO.CAPACITY_TYPE_STORAGE);
|
||||
newCapacities.add(capacity);
|
||||
// _capacityDao.persist(capacity);
|
||||
_storageManager.createCapacityEntry(pool, 0L);
|
||||
}
|
||||
|
||||
Transaction txn = Transaction.open(Transaction.CLOUD_DB);
|
||||
@ -342,8 +340,6 @@ public class StatsCollector {
|
||||
s_logger.trace("recalculating system storage capacity");
|
||||
}
|
||||
txn.start();
|
||||
_capacityDao.clearStorageCapacities();
|
||||
|
||||
for (CapacityVO newCapacity : newCapacities) {
|
||||
s_logger.trace("Executing capacity update");
|
||||
_capacityDao.persist(newCapacity);
|
||||
|
||||
@ -1062,13 +1062,14 @@ public class StorageManagerImpl implements StorageManager {
|
||||
|
||||
_discoverers = locator.getAdapters(StoragePoolDiscoverer.class);
|
||||
|
||||
String overProvisioningFactorStr = (String) params.get("storage.overprovisioning.factor");
|
||||
Map<String, String> configs = configDao.getConfiguration("management-server", params);
|
||||
|
||||
|
||||
String overProvisioningFactorStr = configs.get("storage.overprovisioning.factor");
|
||||
if (overProvisioningFactorStr != null) {
|
||||
_overProvisioningFactor = Integer.parseInt(overProvisioningFactorStr);
|
||||
}
|
||||
|
||||
Map<String, String> configs = configDao.getConfiguration("management-server", params);
|
||||
|
||||
_retry = NumbersUtil.parseInt(configs.get(Config.StartRetry.key()), 2);
|
||||
_pingInterval = NumbersUtil.parseInt(configs.get("ping.interval"), 60);
|
||||
_hostRetry = NumbersUtil.parseInt(configs.get("host.retry"), 2);
|
||||
@ -1725,10 +1726,15 @@ public class StorageManagerImpl implements StorageManager {
|
||||
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void createCapacityEntry(StoragePoolVO storagePool) {
|
||||
SearchCriteria<CapacityVO> capacitySC = _capacityDao.createSearchCriteria();
|
||||
createCapacityEntry(storagePool, 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void createCapacityEntry(StoragePoolVO storagePool, long allocated) {
|
||||
SearchCriteria capacitySC = _capacityDao.createSearchCriteria();
|
||||
capacitySC.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, storagePool.getId());
|
||||
capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, storagePool.getDataCenterId());
|
||||
capacitySC.addAnd("capacityType", SearchCriteria.Op.EQ, CapacityVO.CAPACITY_TYPE_STORAGE);
|
||||
@ -1736,15 +1742,14 @@ public class StorageManagerImpl implements StorageManager {
|
||||
List<CapacityVO> capacities = _capacityDao.search(capacitySC, null);
|
||||
|
||||
if (capacities.size() == 0) {
|
||||
CapacityVO capacity = new CapacityVO(storagePool.getId(), storagePool.getDataCenterId(), storagePool.getPodId(), 0L, storagePool.getCapacityBytes(),
|
||||
CapacityVO capacity = new CapacityVO(storagePool.getId(), storagePool.getDataCenterId(), storagePool.getPodId(), storagePool.getAvailableBytes(), storagePool.getCapacityBytes(),
|
||||
CapacityVO.CAPACITY_TYPE_STORAGE);
|
||||
_capacityDao.persist(capacity);
|
||||
} else {
|
||||
CapacityVO capacity = capacities.get(0);
|
||||
if (capacity.getTotalCapacity() != storagePool.getCapacityBytes()) {
|
||||
capacity.setTotalCapacity(storagePool.getCapacityBytes());
|
||||
_capacityDao.update(capacity.getId(), capacity);
|
||||
}
|
||||
capacity.setTotalCapacity(storagePool.getCapacityBytes());
|
||||
capacity.setUsedCapacity(storagePool.getAvailableBytes());
|
||||
_capacityDao.update(capacity.getId(), capacity);
|
||||
}
|
||||
s_logger.debug("Successfully set Capacity - " +storagePool.getCapacityBytes()+ " for CAPACITY_TYPE_STORAGE, DataCenterId - " +storagePool.getDataCenterId()+ ", HostOrPoolId - " +storagePool.getId()+ ", PodId " +storagePool.getPodId());
|
||||
capacitySC = _capacityDao.createSearchCriteria();
|
||||
@ -1754,21 +1759,28 @@ public class StorageManagerImpl implements StorageManager {
|
||||
|
||||
capacities = _capacityDao.search(capacitySC, null);
|
||||
|
||||
int provFactor = 1;
|
||||
if( storagePool.getPoolType() == StoragePoolType.NetworkFilesystem ) {
|
||||
provFactor = _overProvisioningFactor;
|
||||
}
|
||||
if (capacities.size() == 0) {
|
||||
int provFactor = 1;
|
||||
if( storagePool.getPoolType() == StoragePoolType.NetworkFilesystem ) {
|
||||
provFactor = _overProvisioningFactor;
|
||||
}
|
||||
|
||||
CapacityVO capacity = new CapacityVO(storagePool.getId(), storagePool.getDataCenterId(), storagePool.getPodId(), 0L, storagePool.getCapacityBytes()
|
||||
CapacityVO capacity = new CapacityVO(storagePool.getId(), storagePool.getDataCenterId(), storagePool.getPodId(), allocated, storagePool.getCapacityBytes()
|
||||
* provFactor, CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED);
|
||||
_capacityDao.persist(capacity);
|
||||
} else {
|
||||
CapacityVO capacity = capacities.get(0);
|
||||
long currCapacity = _overProvisioningFactor * storagePool.getCapacityBytes();
|
||||
long currCapacity = provFactor * storagePool.getCapacityBytes();
|
||||
boolean update = false;
|
||||
if (capacity.getTotalCapacity() != currCapacity) {
|
||||
capacity.setTotalCapacity(currCapacity);
|
||||
_capacityDao.update(capacity.getId(), capacity);
|
||||
update = true;
|
||||
}
|
||||
if ( allocated != 0 ) {
|
||||
capacity.setUsedCapacity(allocated);
|
||||
update = true;
|
||||
}
|
||||
if ( update ) {
|
||||
_capacityDao.update(capacity.getId(), capacity);
|
||||
}
|
||||
}
|
||||
s_logger.debug("Successfully set Capacity - " +storagePool.getCapacityBytes()* _overProvisioningFactor+ " for CAPACITY_TYPE_STORAGE_ALLOCATED, DataCenterId - " +storagePool.getDataCenterId()+ ", HostOrPoolId - " +storagePool.getId()+ ", PodId " +storagePool.getPodId());
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user