Storage pool response improvements (#10740)

* Return details of the storage pool in the response including url, and update capacityBytes and capacityIops if applicable while creating storage pool

* Added capacitybytes parameter to the storage pool response in sync with the capacityiops response parameter and createStoragePool cmd request parameter (existing disksizetotal parameter in the storage pool response can be deprecated)

* Don't keep url in details

* Persist the capacityBytes and capacityIops in the storage_pool_details table while creating storage pool as well, for consistency - as these are updated with during update storage pool

* rebase with main fixes
This commit is contained in:
Suresh Kumar Anaparti 2025-10-08 11:20:37 +05:30 committed by GitHub
parent d2615bb142
commit 09b63bc2e8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 62 additions and 14 deletions

View File

@ -77,19 +77,24 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations {
@Param(description = "the name of the cluster for the storage pool")
private String clusterName;
@SerializedName(ApiConstants.CAPACITY_BYTES)
@Param(description = "bytes CloudStack can provision from this storage pool", since = "4.22.0")
private Long capacityBytes;
@Deprecated(since = "4.22.0")
@SerializedName("disksizetotal")
@Param(description = "the total disk size of the storage pool")
private Long diskSizeTotal;
@SerializedName("disksizeallocated")
@Param(description = "the host's currently allocated disk size")
@Param(description = "the pool's currently allocated disk size")
private Long diskSizeAllocated;
@SerializedName("disksizeused")
@Param(description = "the host's currently used disk size")
@Param(description = "the pool's currently used disk size")
private Long diskSizeUsed;
@SerializedName("capacityiops")
@SerializedName(ApiConstants.CAPACITY_IOPS)
@Param(description = "IOPS CloudStack can provision from this storage pool")
private Long capacityIops;
@ -288,6 +293,14 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations {
this.clusterName = clusterName;
}
public Long getCapacityBytes() {
return capacityBytes;
}
public void setCapacityBytes(Long capacityBytes) {
this.capacityBytes = capacityBytes;
}
public Long getDiskSizeTotal() {
return diskSizeTotal;
}

View File

@ -24,8 +24,8 @@ import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.StoragePool;
public interface PrimaryDataStoreLifeCycle extends DataStoreLifeCycle {
public static final String CAPACITY_BYTES = "capacityBytes";
public static final String CAPACITY_IOPS = "capacityIops";
String CAPACITY_BYTES = "capacityBytes";
String CAPACITY_IOPS = "capacityIops";
void updateStoragePool(StoragePool storagePool, Map<String, String> details);
void enableStoragePool(DataStore store);

View File

@ -320,6 +320,9 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
pool = super.persist(pool);
if (details != null) {
for (Map.Entry<String, String> detail : details.entrySet()) {
if (detail.getKey().toLowerCase().contains("password") || detail.getKey().toLowerCase().contains("token")) {
displayDetails = false;
}
StoragePoolDetailVO vo = new StoragePoolDetailVO(pool.getId(), detail.getKey(), detail.getValue(), displayDetails);
_detailsDao.persist(vo);
}

View File

@ -34,3 +34,7 @@ UPDATE `cloud`.`ldap_configuration` SET uuid = UUID() WHERE uuid IS NULL OR uuid
-- Add the column cross_zone_instance_creation to cloud.backup_repository. if enabled it means that new Instance can be created on all Zones from Backups on this Repository.
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backup_repository', 'cross_zone_instance_creation', 'TINYINT(1) DEFAULT NULL COMMENT ''Backup Repository can be used for disaster recovery on another zone''');
-- Updated display to false for password/token detail of the storage pool details
UPDATE `cloud`.`storage_pool_details` SET display = 0 WHERE name LIKE '%password%';
UPDATE `cloud`.`storage_pool_details` SET display = 0 WHERE name LIKE '%token%';

View File

@ -85,8 +85,7 @@ public class PrimaryDataStoreHelper {
DataStoreProviderManager dataStoreProviderMgr;
public DataStore createPrimaryDataStore(PrimaryDataStoreParameters params) {
if(params == null)
{
if (params == null) {
throw new InvalidParameterValueException("createPrimaryDataStore: Input params is null, please check");
}
StoragePoolVO dataStoreVO = dataStoreDao.findPoolByUUID(params.getUuid());

View File

@ -18,6 +18,7 @@
*/
package org.apache.cloudstack.storage.datastore.lifecycle;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
@ -139,7 +140,6 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor
Long clusterId = (Long)dsInfos.get("clusterId");
Long podId = (Long)dsInfos.get("podId");
Long zoneId = (Long)dsInfos.get("zoneId");
String url = (String)dsInfos.get("url");
String providerName = (String)dsInfos.get("providerName");
HypervisorType hypervisorType = (HypervisorType)dsInfos.get("hypervisorType");
if (clusterId != null && podId == null) {
@ -148,19 +148,43 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
String tags = (String)dsInfos.get("tags");
String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS);
Map<String, String> details = (Map<String, String>)dsInfos.get("details");
if (dsInfos.get("capacityBytes") != null) {
Long capacityBytes = (Long)dsInfos.get("capacityBytes");
if (capacityBytes <= 0) {
throw new IllegalArgumentException("'capacityBytes' must be greater than 0.");
}
if (details == null) {
details = new HashMap<>();
}
details.put(PrimaryDataStoreLifeCycle.CAPACITY_BYTES, String.valueOf(capacityBytes));
parameters.setCapacityBytes(capacityBytes);
}
if (dsInfos.get("capacityIops") != null) {
Long capacityIops = (Long)dsInfos.get("capacityIops");
if (capacityIops <= 0) {
throw new IllegalArgumentException("'capacityIops' must be greater than 0.");
}
if (details == null) {
details = new HashMap<>();
}
details.put(PrimaryDataStoreLifeCycle.CAPACITY_IOPS, String.valueOf(capacityIops));
parameters.setCapacityIops(capacityIops);
}
parameters.setTags(tags);
parameters.setStorageAccessGroups(storageAccessGroups);
parameters.setIsTagARule((Boolean)dsInfos.get("isTagARule"));
parameters.setDetails(details);
String tags = (String)dsInfos.get("tags");
parameters.setTags(tags);
parameters.setIsTagARule((Boolean)dsInfos.get("isTagARule"));
String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS);
parameters.setStorageAccessGroups(storageAccessGroups);
String scheme = dsInfos.get("scheme").toString();
String storageHost = dsInfos.get("host").toString();
String hostPath = dsInfos.get("hostPath").toString();
String uri = String.format("%s://%s%s", scheme, storageHost, hostPath);
Object localStorage = dsInfos.get("localStorage");
if (localStorage != null) {

View File

@ -40,6 +40,7 @@ import org.springframework.stereotype.Component;
import com.cloud.api.ApiDBUtils;
import com.cloud.api.query.vo.StoragePoolJoinVO;
import com.cloud.capacity.CapacityManager;
import com.cloud.server.ResourceTag;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage;
@ -152,6 +153,7 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
}
}
}
poolResponse.setCapacityBytes(pool.getCapacityBytes());
poolResponse.setDiskSizeTotal(pool.getCapacityBytes());
poolResponse.setDiskSizeAllocated(allocatedSize);
poolResponse.setDiskSizeUsed(pool.getUsedBytes());
@ -180,6 +182,8 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
poolResponse.setIsTagARule(pool.getIsTagARule());
poolResponse.setOverProvisionFactor(Double.toString(CapacityManager.StorageOverprovisioningFactor.valueIn(pool.getId())));
poolResponse.setManaged(storagePool.isManaged());
Map<String, String> details = ApiDBUtils.getResourceDetails(pool.getId(), ResourceTag.ResourceObjectType.Storage);
poolResponse.setDetails(details);
// set async job
if (pool.getJobId() != null) {
@ -252,6 +256,7 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
}
long allocatedSize = pool.getUsedCapacity();
poolResponse.setCapacityBytes(pool.getCapacityBytes());
poolResponse.setDiskSizeTotal(pool.getCapacityBytes());
poolResponse.setDiskSizeAllocated(allocatedSize);
poolResponse.setCapacityIops(pool.getCapacityIops());