Merge branch '4.16' into main

This commit is contained in:
Suresh Kumar Anaparti 2022-01-06 17:47:37 +05:30
commit 5c02f6d507
No known key found for this signature in database
GPG Key ID: D7CEAE3A9E71D0AA
10 changed files with 135 additions and 44 deletions

View File

@ -33,6 +33,8 @@ public interface DataObject {
Long getSize();
long getPhysicalSize();
DataObjectType getType();
String getUuid();

View File

@ -39,5 +39,6 @@ public interface SecondaryStorageService {
}
}
AsyncCallFuture<DataObjectResult> migrateData(DataObject srcDataObject, DataStore srcDatastore, DataStore destDatastore, Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChain);
AsyncCallFuture<DataObjectResult> migrateData(DataObject srcDataObject, DataStore srcDatastore, DataStore destDatastore, Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChain,
Map<DataObject, Pair<List<TemplateInfo>, Long>> templateChain);
}

View File

@ -32,9 +32,11 @@ import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.storage.ImageStoreService;
@ -59,8 +61,11 @@ import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.SecondaryStorageVmVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.dao.SecondaryStorageVmDao;
import org.apache.log4j.Logger;
public class DataMigrationUtility {
private static Logger LOGGER = Logger.getLogger(DataMigrationUtility.class);
@Inject
SecondaryStorageVmDao secStorageVmDao;
@Inject
@ -87,19 +92,22 @@ public class DataMigrationUtility {
* the migration is terminated.
*/
private boolean filesReadyToMigrate(Long srcDataStoreId) {
String[] validStates = new String[]{"Ready", "Allocated", "Destroying", "Destroyed", "Failed"};
State[] validStates = {State.Ready, State.Allocated, State.Destroying, State.Destroyed, State.Failed};
boolean isReady = true;
List<TemplateDataStoreVO> templates = templateDataStoreDao.listByStoreId(srcDataStoreId);
for (TemplateDataStoreVO template : templates) {
isReady &= (Arrays.asList(validStates).contains(template.getState().toString()));
isReady &= (Arrays.asList(validStates).contains(template.getState()));
LOGGER.trace(String.format("template state: %s", template.getState()));
}
List<SnapshotDataStoreVO> snapshots = snapshotDataStoreDao.listByStoreId(srcDataStoreId, DataStoreRole.Image);
for (SnapshotDataStoreVO snapshot : snapshots) {
isReady &= (Arrays.asList(validStates).contains(snapshot.getState().toString()));
isReady &= (Arrays.asList(validStates).contains(snapshot.getState()));
LOGGER.trace(String.format("snapshot state: %s", snapshot.getState()));
}
List<VolumeDataStoreVO> volumes = volumeDataStoreDao.listByStoreId(srcDataStoreId);
for (VolumeDataStoreVO volume : volumes) {
isReady &= (Arrays.asList(validStates).contains(volume.getState().toString()));
isReady &= (Arrays.asList(validStates).contains(volume.getState()));
LOGGER.trace(String.format("volume state: %s", volume.getState()));
}
return isReady;
}
@ -113,12 +121,17 @@ public class DataMigrationUtility {
return;
}
protected Long getFileSize(DataObject file, Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChain) {
Long size = file.getSize();
protected Long getFileSize(DataObject file, Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChain, Map<DataObject, Pair<List<TemplateInfo>, Long>> templateChain) {
Long size = file.getPhysicalSize();
Pair<List<SnapshotInfo>, Long> chain = snapshotChain.get(file);
Pair<List<TemplateInfo>, Long> tempChain = templateChain.get(file);
if (file instanceof SnapshotInfo && chain.first() != null && !chain.first().isEmpty()) {
size = chain.second();
}
if (file instanceof TemplateInfo && tempChain.first() != null && !tempChain.first().isEmpty()) {
size = tempChain.second();
}
return size;
}
@ -144,9 +157,10 @@ public class DataMigrationUtility {
return new ArrayList<>(temp.keySet());
}
protected List<DataObject> getSortedValidSourcesList(DataStore srcDataStore, Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChains) {
protected List<DataObject> getSortedValidSourcesList(DataStore srcDataStore, Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChains,
Map<DataObject, Pair<List<TemplateInfo>, Long>> childTemplates) {
List<DataObject> files = new ArrayList<>();
files.addAll(getAllReadyTemplates(srcDataStore));
files.addAll(getAllReadyTemplates(srcDataStore, childTemplates));
files.addAll(getAllReadySnapshotsAndChains(srcDataStore, snapshotChains));
files.addAll(getAllReadyVolumes(srcDataStore));
@ -159,8 +173,8 @@ public class DataMigrationUtility {
Collections.sort(files, new Comparator<DataObject>() {
@Override
public int compare(DataObject o1, DataObject o2) {
Long size1 = o1.getSize();
Long size2 = o2.getSize();
Long size1 = o1.getPhysicalSize();
Long size2 = o2.getPhysicalSize();
if (o1 instanceof SnapshotInfo) {
size1 = snapshotChains.get(o1).second();
}
@ -173,19 +187,28 @@ public class DataMigrationUtility {
return files;
}
protected List<DataObject> getAllReadyTemplates(DataStore srcDataStore) {
protected List<DataObject> getAllReadyTemplates(DataStore srcDataStore, Map<DataObject, Pair<List<TemplateInfo>, Long>> childTemplates) {
List<DataObject> files = new LinkedList<>();
List<TemplateInfo> files = new LinkedList<>();
List<TemplateDataStoreVO> templates = templateDataStoreDao.listByStoreId(srcDataStore.getId());
for (TemplateDataStoreVO template : templates) {
VMTemplateVO templateVO = templateDao.findById(template.getTemplateId());
if (template.getState() == ObjectInDataStoreStateMachine.State.Ready && templateVO != null &&
(!templateVO.isPublicTemplate() || (templateVO.isPublicTemplate() && templateVO.getUrl() == null)) &&
templateVO.getHypervisorType() != Hypervisor.HypervisorType.Simulator) {
templateVO.getHypervisorType() != Hypervisor.HypervisorType.Simulator && templateVO.getParentTemplateId() == null) {
files.add(templateFactory.getTemplate(template.getTemplateId(), srcDataStore));
}
}
return files;
for (TemplateInfo template: files) {
List<VMTemplateVO> children = templateDao.listByParentTemplatetId(template.getId());
List<TemplateInfo> temps = new ArrayList<>();
temps.add(template);
for(VMTemplateVO child : children) {
temps.add(templateFactory.getTemplate(child.getId(), srcDataStore));
}
childTemplates.put(template, new Pair<>(temps, getTotalChainSize(temps)));
}
return (List<DataObject>) (List<?>) files;
}
/** Returns parent snapshots and snapshots that do not have any children; snapshotChains comprises of the snapshot chain info
@ -217,21 +240,20 @@ public class DataMigrationUtility {
chain.addAll(children);
}
}
snapshotChains.put(parent, new Pair<List<SnapshotInfo>, Long>(chain, getSizeForChain(chain)));
snapshotChains.put(parent, new Pair<List<SnapshotInfo>, Long>(chain, getTotalChainSize(chain)));
}
return (List<DataObject>) (List<?>) files;
}
protected Long getSizeForChain(List<SnapshotInfo> chain) {
protected Long getTotalChainSize(List<? extends DataObject> chain) {
Long size = 0L;
for (SnapshotInfo snapshot : chain) {
size += snapshot.getSize();
for (DataObject dataObject : chain) {
size += dataObject.getPhysicalSize();
}
return size;
}
protected List<DataObject> getAllReadyVolumes(DataStore srcDataStore) {
List<DataObject> files = new LinkedList<>();
List<VolumeDataStoreVO> volumes = volumeDataStoreDao.listByStoreId(srcDataStore.getId());

View File

@ -44,6 +44,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SecondaryStorageServic
import org.apache.cloudstack.engine.subsystem.api.storage.SecondaryStorageService.DataObjectResult;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
@ -144,10 +145,11 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
migrationHelper.checkIfCompleteMigrationPossible(migrationPolicy, srcDataStoreId);
DataStore srcDatastore = dataStoreManager.getDataStore(srcDataStoreId, DataStoreRole.Image);
Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChains = new HashMap<>();
files = migrationHelper.getSortedValidSourcesList(srcDatastore, snapshotChains);
Map<DataObject, Pair<List<TemplateInfo>, Long>> childTemplates = new HashMap<>();
files = migrationHelper.getSortedValidSourcesList(srcDatastore, snapshotChains, childTemplates);
if (files.isEmpty()) {
return new MigrationResponse("No files in Image store "+srcDatastore.getId()+ " to migrate", migrationPolicy.toString(), true);
return new MigrationResponse(String.format("No files in Image store: %s to migrate", srcDatastore.getId()), migrationPolicy.toString(), true);
}
Map<Long, Pair<Long, Long>> storageCapacities = new Hashtable<>();
for (Long storeId : destDatastores) {
@ -155,7 +157,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
}
storageCapacities.put(srcDataStoreId, new Pair<>(null, null));
if (migrationPolicy == MigrationPolicy.COMPLETE) {
s_logger.debug("Setting source image store "+srcDatastore.getId()+ " to read-only");
s_logger.debug(String.format("Setting source image store: %s to read-only", srcDatastore.getId()));
storageService.updateImageStoreStatus(srcDataStoreId, true);
}
@ -172,6 +174,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
return response;
}
int skipped = 0;
List<Future<AsyncCallFuture<DataObjectResult>>> futures = new ArrayList<>();
while (true) {
DataObject chosenFileForMigration = null;
@ -184,7 +187,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
Long destDatastoreId = orderedDS.get(0);
if (chosenFileForMigration == null || destDatastoreId == null || (destDatastoreId == srcDatastore.getId() && migrationPolicy == MigrationPolicy.BALANCE) ) {
Pair<String, Boolean> result = migrateCompleted(destDatastoreId, srcDatastore, files, migrationPolicy);
Pair<String, Boolean> result = migrateCompleted(destDatastoreId, srcDatastore, files, migrationPolicy, skipped);
message = result.first();
success = result.second();
break;
@ -194,13 +197,14 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
destDatastoreId = orderedDS.get(1);
}
if (chosenFileForMigration.getSize() > storageCapacities.get(destDatastoreId).first()) {
s_logger.debug("file: " + chosenFileForMigration.getId() + " too large to be migrated to " + destDatastoreId);
if (chosenFileForMigration.getPhysicalSize() > storageCapacities.get(destDatastoreId).first()) {
s_logger.debug(String.format("%s: %s too large to be migrated to %s", chosenFileForMigration.getType().name() , chosenFileForMigration.getUuid(), destDatastoreId));
skipped += 1;
continue;
}
if (shouldMigrate(chosenFileForMigration, srcDatastore.getId(), destDatastoreId, storageCapacities, snapshotChains, migrationPolicy)) {
storageCapacities = migrateAway(chosenFileForMigration, storageCapacities, snapshotChains, srcDatastore, destDatastoreId, executor, futures);
if (shouldMigrate(chosenFileForMigration, srcDatastore.getId(), destDatastoreId, storageCapacities, snapshotChains, childTemplates, migrationPolicy)) {
storageCapacities = migrateAway(chosenFileForMigration, storageCapacities, snapshotChains, childTemplates, srcDatastore, destDatastoreId, executor, futures);
} else {
if (migrationPolicy == MigrationPolicy.BALANCE) {
continue;
@ -215,7 +219,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
return handleResponse(futures, migrationPolicy, message, success);
}
protected Pair<String, Boolean> migrateCompleted(Long destDatastoreId, DataStore srcDatastore, List<DataObject> files, MigrationPolicy migrationPolicy) {
protected Pair<String, Boolean> migrateCompleted(Long destDatastoreId, DataStore srcDatastore, List<DataObject> files, MigrationPolicy migrationPolicy, int skipped) {
String message = "";
boolean success = true;
if (destDatastoreId == srcDatastore.getId() && !files.isEmpty()) {
@ -233,14 +237,27 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
}
} else {
message = "Migration completed";
if (migrationPolicy == MigrationPolicy.COMPLETE && skipped > 0) {
message += ". Not all data objects were migrated. Some were probably skipped due to lack of storage capacity.";
success = false;
}
}
return new Pair<String, Boolean>(message, success);
}
protected Map<Long, Pair<Long, Long>> migrateAway(DataObject chosenFileForMigration, Map<Long, Pair<Long, Long>> storageCapacities,
Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChains, DataStore srcDatastore, Long destDatastoreId, ThreadPoolExecutor executor,
List<Future<AsyncCallFuture<DataObjectResult>>> futures) {
Long fileSize = migrationHelper.getFileSize(chosenFileForMigration, snapshotChains);
protected Map<Long, Pair<Long, Long>> migrateAway(
DataObject chosenFileForMigration,
Map<Long, Pair<Long, Long>> storageCapacities,
Map<DataObject,
Pair<List<SnapshotInfo>, Long>> snapshotChains,
Map<DataObject, Pair<List<TemplateInfo>, Long>> templateChains,
DataStore srcDatastore,
Long destDatastoreId,
ThreadPoolExecutor executor,
List<Future<AsyncCallFuture<DataObjectResult>>> futures) {
Long fileSize = migrationHelper.getFileSize(chosenFileForMigration, snapshotChains, templateChains);
storageCapacities = assumeMigrate(storageCapacities, srcDatastore.getId(), destDatastoreId, fileSize);
long activeSsvms = migrationHelper.activeSSVMCount(srcDatastore);
long totalJobs = activeSsvms * numConcurrentCopyTasksPerSSVM;
@ -254,8 +271,11 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
if (chosenFileForMigration instanceof SnapshotInfo ) {
task.setSnapshotChains(snapshotChains);
}
if (chosenFileForMigration instanceof TemplateInfo) {
task.setTemplateChain(templateChains);
}
futures.add((executor.submit(task)));
s_logger.debug("Migration of file " + chosenFileForMigration.getId() + " is initiated");
s_logger.debug(String.format("Migration of %s: %s is initiated. ", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid()));
return storageCapacities;
}
@ -374,13 +394,19 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
* @param migrationPolicy determines whether a "Balance" or "Complete" migration operation is to be performed
* @return
*/
private boolean shouldMigrate(DataObject chosenFile, Long srcDatastoreId, Long destDatastoreId, Map<Long, Pair<Long, Long>> storageCapacities,
Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChains, MigrationPolicy migrationPolicy) {
private boolean shouldMigrate(
DataObject chosenFile,
Long srcDatastoreId,
Long destDatastoreId,
Map<Long, Pair<Long, Long>> storageCapacities,
Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChains,
Map<DataObject, Pair<List<TemplateInfo>, Long>> templateChains,
MigrationPolicy migrationPolicy) {
if (migrationPolicy == MigrationPolicy.BALANCE) {
double meanStdDevCurrent = getStandardDeviation(storageCapacities);
Long fileSize = migrationHelper.getFileSize(chosenFile, snapshotChains);
Long fileSize = migrationHelper.getFileSize(chosenFile, snapshotChains, templateChains);
Map<Long, Pair<Long, Long>> proposedCapacities = assumeMigrate(storageCapacities, srcDatastoreId, destDatastoreId, fileSize);
double meanStdDevAfter = getStandardDeviation(proposedCapacities);
@ -426,6 +452,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
private DataStore srcDataStore;
private DataStore destDataStore;
private Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChain;
private Map<DataObject, Pair<List<TemplateInfo>, Long>> templateChain;
public MigrateDataTask(DataObject file, DataStore srcDataStore, DataStore destDataStore) {
this.file = file;
this.srcDataStore = srcDataStore;
@ -439,13 +466,19 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
public Map<DataObject, Pair<List<SnapshotInfo>, Long>> getSnapshotChain() {
return snapshotChain;
}
public Map<DataObject, Pair<List<TemplateInfo>, Long>> getTemplateChain() {
return templateChain;
}
public void setTemplateChain(Map<DataObject, Pair<List<TemplateInfo>, Long>> templateChain) {
this.templateChain = templateChain;
}
public DataObject getFile() {
return file;
}
@Override
public AsyncCallFuture<DataObjectResult> call() throws Exception {
return secStgSrv.migrateData(file, srcDataStore, destDataStore, snapshotChain);
return secStgSrv.migrateData(file, srcDataStore, destDataStore, snapshotChain, templateChain);
}
}
}

View File

@ -83,7 +83,8 @@ public class SecondaryStorageServiceImpl implements SecondaryStorageService {
}
@Override
public AsyncCallFuture<DataObjectResult> migrateData(DataObject srcDataObject, DataStore srcDatastore, DataStore destDatastore, Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChain) {
public AsyncCallFuture<DataObjectResult> migrateData(DataObject srcDataObject, DataStore srcDatastore, DataStore destDatastore,
Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChain, Map<DataObject, Pair<List<TemplateInfo>, Long>> templateChain) {
AsyncCallFuture<DataObjectResult> future = new AsyncCallFuture<DataObjectResult>();
DataObjectResult res = new DataObjectResult(srcDataObject);
DataObject destDataObject = null;
@ -114,7 +115,15 @@ public class SecondaryStorageServiceImpl implements SecondaryStorageService {
break;
}
}
} else {
} else if (srcDataObject instanceof TemplateInfo && templateChain != null && templateChain.containsKey(srcDataObject)) {
for (TemplateInfo templateInfo : templateChain.get(srcDataObject).first()) {
destDataObject = destDatastore.create(templateInfo);
templateInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrateDataRequested);
destDataObject.processEvent(ObjectInDataStoreStateMachine.Event.MigrateDataRequested);
migrateJob(future, templateInfo, destDataObject, destDatastore);
}
}
else {
// Check if template in destination store, if yes, do not proceed
if (srcDataObject instanceof TemplateInfo) {
s_logger.debug("Checking if template present at destination");

View File

@ -141,6 +141,15 @@ public class TemplateObject implements TemplateInfo {
return image.getSize();
}
@Override
public long getPhysicalSize() {
TemplateDataStoreVO templateDataStoreVO = templateStoreDao.findByTemplate(imageVO.getId(), DataStoreRole.Image);
if (templateDataStoreVO != null) {
return templateDataStoreVO.getPhysicalSize();
}
return imageVO.getSize();
}
@Override
public DataObjectType getType() {
return DataObjectType.TEMPLATE;

View File

@ -231,6 +231,15 @@ public class VolumeObject implements VolumeInfo {
return diskOfferingId == null ? null : diskOfferingDao.findById(diskOfferingId);
}
@Override
public long getPhysicalSize() {
VolumeDataStoreVO volumeDataStoreVO = volumeStoreDao.findByVolume(volumeVO.getId());
if (volumeDataStoreVO != null) {
return volumeDataStoreVO.getPhysicalSize();
}
return volumeVO.getSize();
}
@Override
public Long getBytesReadRate() {
return getLongValueFromDiskOfferingVoMethod(DiskOfferingVO::getBytesReadRate);

View File

@ -178,19 +178,20 @@ public class DomainChecker extends AdapterBase implements SecurityChecker {
} else {
if (_accountService.isNormalUser(caller.getId())) {
Account account = _accountDao.findById(entity.getAccountId());
String errorMessage = String.format("%s does not have permission to operate with resource", caller);
if (account != null && account.getType() == Account.ACCOUNT_TYPE_PROJECT) {
//only project owner can delete/modify the project
if (accessType != null && accessType == AccessType.ModifyProject) {
if (!_projectMgr.canModifyProjectAccount(caller, account.getId())) {
throw new PermissionDeniedException(caller + " does not have permission to operate with resource " + entity);
throw new PermissionDeniedException(errorMessage);
}
} else if (!_projectMgr.canAccessProjectAccount(caller, account.getId())) {
throw new PermissionDeniedException(caller + " does not have permission to operate with resource " + entity);
throw new PermissionDeniedException(errorMessage);
}
checkOperationPermitted(caller, entity);
} else {
if (caller.getId() != entity.getAccountId()) {
throw new PermissionDeniedException(caller + " does not have permission to operate with resource " + entity);
throw new PermissionDeniedException(errorMessage);
}
}
}

View File

@ -59,6 +59,11 @@ public class DiagnosticsDataObject implements DataObject {
return null;
}
@Override
public long getPhysicalSize() {
return 0;
}
@Override
public DataObjectType getType() {
return dataTO.getObjectType();

View File

@ -48,7 +48,7 @@
:autoFocus="customDiskOffering || resource.type === 'ROOT'"/>
</a-form-item>
</div>
<a-form-item :label="$t('label.shrinkok')">
<a-form-item :label="$t('label.shrinkok')" v-if="!['XenServer'].includes(resource.hypervisor)">
<a-checkbox v-decorator="['shrinkok']" />
</a-form-item>
<div :span="24" class="action-button">