Merge branch '4.19'

This commit is contained in:
Daan Hoogland 2024-06-18 19:58:43 +02:00
commit 373f017002
53 changed files with 1011 additions and 445 deletions

View File

@ -29,7 +29,6 @@ public interface VpcVirtualNetworkApplianceService extends VirtualNetworkApplian
/**
* @param router
* @param network
* @param isRedundant
* @param params TODO
* @return
* @throws ConcurrentOperationException
@ -42,11 +41,30 @@ public interface VpcVirtualNetworkApplianceService extends VirtualNetworkApplian
/**
* @param router
* @param network
* @param isRedundant
* @return
* @throws ConcurrentOperationException
* @throws ResourceUnavailableException
*/
boolean removeVpcRouterFromGuestNetwork(VirtualRouter router, Network network) throws ConcurrentOperationException, ResourceUnavailableException;
/**
* @param router
* @param network
* @return
* @throws ConcurrentOperationException
* @throws ResourceUnavailableException
*/
boolean stopKeepAlivedOnRouter(VirtualRouter router, Network network) throws ConcurrentOperationException, ResourceUnavailableException;
/**
* @param router
* @param network
* @return
* @throws ConcurrentOperationException
* @throws ResourceUnavailableException
*/
boolean startKeepAlivedOnRouter(VirtualRouter router, Network network) throws ConcurrentOperationException, ResourceUnavailableException;
}

View File

@ -29,6 +29,7 @@ import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.UpdateObjectStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd;
import com.cloud.exception.DiscoveryException;
@ -110,6 +111,8 @@ public interface StorageService {
*/
ImageStore migrateToObjectStore(String name, String url, String providerName, Map<String, String> details) throws DiscoveryException;
ImageStore updateImageStore(UpdateImageStoreCmd cmd);
ImageStore updateImageStoreStatus(Long id, Boolean readonly);
void updateStorageCapabilities(Long poolId, boolean failOnChecks);

View File

@ -39,10 +39,17 @@ public class UpdateImageStoreCmd extends BaseCmd {
@Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = ImageStoreResponse.class, required = true, description = "Image Store UUID")
private Long id;
@Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = true, description = "If set to true, it designates the corresponding image store to read-only, " +
"hence not considering them during storage migration")
@Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = false, description = "The new name for the Image Store.")
private String name;
@Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = false,
description = "If set to true, it designates the corresponding image store to read-only, hence not considering them during storage migration")
private Boolean readonly;
@Parameter(name = ApiConstants.CAPACITY_BYTES, type = CommandType.LONG, required = false,
description = "The number of bytes CloudStack can use on this image storage.\n\tNOTE: this will be overwritten by the StatsCollector as soon as there is a SSVM to query the storage.")
private Long capacityBytes;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -51,17 +58,25 @@ public class UpdateImageStoreCmd extends BaseCmd {
return id;
}
public String getName() {
return name;
}
public Boolean getReadonly() {
return readonly;
}
public Long getCapacityBytes() {
return capacityBytes;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public void execute() {
ImageStore result = _storageService.updateImageStoreStatus(getId(), getReadonly());
ImageStore result = _storageService.updateImageStore(this);
ImageStoreResponse storeResponse = null;
if (result != null) {
storeResponse = _responseGenerator.createImageStoreResponse(result);

View File

@ -27,11 +27,11 @@ import com.google.gson.annotations.SerializedName;
@EntityReference(value = ImageStore.class)
public class ImageStoreResponse extends BaseResponseWithAnnotations {
@SerializedName("id")
@SerializedName(ApiConstants.ID)
@Param(description = "the ID of the image store")
private String id;
@SerializedName("zoneid")
@SerializedName(ApiConstants.ZONE_ID)
@Param(description = "the Zone ID of the image store")
private String zoneId;
@ -39,15 +39,15 @@ public class ImageStoreResponse extends BaseResponseWithAnnotations {
@Param(description = "the Zone name of the image store")
private String zoneName;
@SerializedName("name")
@SerializedName(ApiConstants.NAME)
@Param(description = "the name of the image store")
private String name;
@SerializedName("url")
@SerializedName(ApiConstants.URL)
@Param(description = "the url of the image store")
private String url;
@SerializedName("protocol")
@SerializedName(ApiConstants.PROTOCOL)
@Param(description = "the protocol of the image store")
private String protocol;
@ -55,11 +55,11 @@ public class ImageStoreResponse extends BaseResponseWithAnnotations {
@Param(description = "the provider name of the image store")
private String providerName;
@SerializedName("scope")
@SerializedName(ApiConstants.SCOPE)
@Param(description = "the scope of the image store")
private ScopeType scope;
@SerializedName("readonly")
@SerializedName(ApiConstants.READ_ONLY)
@Param(description = "defines if store is read-only")
private Boolean readonly;

View File

@ -81,4 +81,5 @@ public class VRScripts {
public static final String VR_UPDATE_INTERFACE_CONFIG = "update_interface_config.sh";
public static final String ROUTER_FILESYSTEM_WRITABLE_CHECK = "filesystem_writable_check.py";
public static final String MANAGE_SERVICE = "manage_service.sh";
}

View File

@ -34,6 +34,7 @@ import java.util.concurrent.locks.ReentrantLock;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.agent.routing.ManageServiceCommand;
import com.cloud.agent.api.routing.UpdateNetworkCommand;
import com.cloud.agent.api.to.IpAddressTO;
import com.cloud.network.router.VirtualRouter;
@ -144,6 +145,10 @@ public class VirtualRoutingResource {
return execute((UpdateNetworkCommand) cmd);
}
if (cmd instanceof ManageServiceCommand) {
return execute((ManageServiceCommand) cmd);
}
if (_vrAggregateCommandsSet.containsKey(routerName)) {
_vrAggregateCommandsSet.get(routerName).add(cmd);
aggregated = true;
@ -271,6 +276,20 @@ public class VirtualRoutingResource {
return new Answer(cmd, new CloudRuntimeException("Failed to update interface mtu"));
}
private Answer execute(ManageServiceCommand cmd) {
String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP);
String args = cmd.getAction() + " " + cmd.getServiceName();
ExecutionResult result = _vrDeployer.executeInVR(routerIp, VRScripts.MANAGE_SERVICE, args);
if (result.isSuccess()) {
return new Answer(cmd, true,
String.format("Successfully executed action: %s on service: %s. Details: %s",
cmd.getAction(), cmd.getServiceName(), result.getDetails()));
} else {
return new Answer(cmd, false, String.format("Failed to execute action: %s on service: %s. Details: %s",
cmd.getAction(), cmd.getServiceName(), result.getDetails()));
}
}
private ExecutionResult applyConfigToVR(String routerAccessIp, ConfigItem c) {
return applyConfigToVR(routerAccessIp, c, VRScripts.VR_SCRIPT_EXEC_TIMEOUT);
}

View File

@ -0,0 +1,49 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package org.apache.cloudstack.agent.routing;
import com.cloud.agent.api.routing.NetworkElementCommand;
public class ManageServiceCommand extends NetworkElementCommand {
String serviceName;
String action;
@Override
public boolean executeInSequence() {
return true;
}
protected ManageServiceCommand() {
}
public ManageServiceCommand(String serviceName, String action) {
this.serviceName = serviceName;
this.action = action;
}
public String getServiceName() {
return serviceName;
}
public String getAction() {
return action;
}
}

View File

@ -121,4 +121,6 @@ public interface VolumeService {
Pair<String, String> checkAndRepairVolume(VolumeInfo volume);
void checkAndRepairVolumeBasedOnConfig(DataObject dataObject, Host host);
void validateChangeDiskOfferingEncryptionType(long existingDiskOfferingId, long newDiskOfferingId);
}

View File

@ -368,6 +368,8 @@ public interface StorageManager extends StorageService {
Long getDiskIopsWriteRate(ServiceOffering offering, DiskOffering diskOffering);
ImageStore updateImageStoreStatus(Long id, String name, Boolean readonly, Long capacityBytes);
void cleanupDownloadUrls();
void setDiskProfileThrottling(DiskProfile dskCh, ServiceOffering offering, DiskOffering diskOffering);

View File

@ -31,4 +31,6 @@ public interface DomainDetailsDao extends GenericDao<DomainDetailVO, Long> {
void deleteDetails(long domainId);
void update(long domainId, Map<String, String> details);
String getActualValue(DomainDetailVO domainDetailVO);
}

View File

@ -24,6 +24,7 @@ import javax.inject.Inject;
import com.cloud.domain.DomainDetailVO;
import com.cloud.domain.DomainVO;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.QueryBuilder;
import com.cloud.utils.db.SearchBuilder;
@ -34,6 +35,7 @@ import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.ConfigKey.Scope;
import org.apache.cloudstack.framework.config.ScopedConfigStorage;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
public class DomainDetailsDaoImpl extends GenericDaoBase<DomainDetailVO, Long> implements DomainDetailsDao, ScopedConfigStorage {
protected final SearchBuilder<DomainDetailVO> domainSearch;
@ -111,7 +113,7 @@ public class DomainDetailsDaoImpl extends GenericDaoBase<DomainDetailVO, Long> i
String enableDomainSettingsForChildDomain = _configDao.getValue("enable.domain.settings.for.child.domain");
if (!Boolean.parseBoolean(enableDomainSettingsForChildDomain)) {
vo = findDetail(id, key.key());
return vo == null ? null : vo.getValue();
return vo == null ? null : getActualValue(vo);
}
DomainVO domain = _domainDao.findById(id);
// if value is not configured in domain then check its parent domain till ROOT
@ -125,6 +127,15 @@ public class DomainDetailsDaoImpl extends GenericDaoBase<DomainDetailVO, Long> i
break;
}
}
return vo == null ? null : vo.getValue();
return vo == null ? null : getActualValue(vo);
}
@Override
public String getActualValue(DomainDetailVO domainDetailVO) {
ConfigurationVO configurationVO = _configDao.findByName(domainDetailVO.getName());
if (configurationVO != null && configurationVO.isEncrypted()) {
return DBEncryptionUtil.decrypt(domainDetailVO.getValue());
}
return domainDetailVO.getValue();
}
}

View File

@ -54,7 +54,7 @@ public interface ServiceOfferingDao extends GenericDao<ServiceOfferingVO, Long>
List<ServiceOfferingVO> listPublicByCpuAndMemory(Integer cpus, Integer memory);
ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId);
List<ServiceOfferingVO> listByHostTag(String tag);
ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId, boolean includingRemoved);
}

View File

@ -282,10 +282,10 @@ public class ServiceOfferingDaoImpl extends GenericDaoBase<ServiceOfferingVO, Lo
}
@Override
public ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId) {
public ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId, boolean includingRemoved) {
SearchCriteria<ServiceOfferingVO> sc = SearchComputeOfferingByComputeOnlyDiskOffering.create();
sc.setParameters("disk_offering_id", diskOfferingId);
List<ServiceOfferingVO> vos = listBy(sc);
List<ServiceOfferingVO> vos = includingRemoved ? listIncludingRemovedBy(sc) : listBy(sc);
if (vos.size() == 0) {
return null;
}

View File

@ -34,4 +34,6 @@ public interface AccountDetailsDao extends GenericDao<AccountDetailVO, Long> {
* they will get created
*/
void update(long accountId, Map<String, String> details);
String getActualValue(AccountDetailVO accountDetailVO);
}

View File

@ -23,6 +23,7 @@ import java.util.Optional;
import javax.inject.Inject;
import com.cloud.utils.crypt.DBEncryptionUtil;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.ConfigKey.Scope;
import org.apache.cloudstack.framework.config.ScopedConfigStorage;
@ -40,6 +41,7 @@ import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.TransactionLegacy;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
public class AccountDetailsDaoImpl extends GenericDaoBase<AccountDetailVO, Long> implements AccountDetailsDao, ScopedConfigStorage {
protected final SearchBuilder<AccountDetailVO> accountSearch;
@ -119,7 +121,7 @@ public class AccountDetailsDaoImpl extends GenericDaoBase<AccountDetailVO, Long>
public String getConfigValue(long id, ConfigKey<?> key) {
// check if account level setting is configured
AccountDetailVO vo = findDetail(id, key.key());
String value = vo == null ? null : vo.getValue();
String value = vo == null ? null : getActualValue(vo);
if (value != null) {
return value;
}
@ -140,7 +142,7 @@ public class AccountDetailsDaoImpl extends GenericDaoBase<AccountDetailVO, Long>
while (domain != null) {
DomainDetailVO domainVO = _domainDetailsDao.findDetail(domain.getId(), key.key());
if (domainVO != null) {
value = domainVO.getValue();
value = _domainDetailsDao.getActualValue(domainVO);
break;
} else if (domain.getParent() != null) {
domain = _domainDao.findById(domain.getParent());
@ -152,4 +154,13 @@ public class AccountDetailsDaoImpl extends GenericDaoBase<AccountDetailVO, Long>
}
return value;
}
@Override
public String getActualValue(AccountDetailVO accountDetailVO) {
ConfigurationVO configurationVO = _configDao.findByName(accountDetailVO.getName());
if (configurationVO != null && configurationVO.isEncrypted()) {
return DBEncryptionUtil.decrypt(accountDetailVO.getValue());
}
return accountDetailVO.getValue();
}
}

View File

@ -32,7 +32,6 @@ import java.util.concurrent.ExecutionException;
import javax.inject.Inject;
import com.cloud.storage.VolumeApiServiceImpl;
import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
import org.apache.cloudstack.api.command.user.volume.CheckAndRepairVolumeCmd;
@ -105,6 +104,7 @@ import com.cloud.configuration.Resource.ResourceType;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.event.EventTypes;
import com.cloud.event.UsageEventUtils;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.StorageAccessException;
import com.cloud.host.Host;
@ -118,6 +118,7 @@ import com.cloud.org.Grouping.AllocationState;
import com.cloud.resource.ResourceState;
import com.cloud.server.ManagementService;
import com.cloud.storage.CheckAndRepairVolumePayload;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.RegisterVolumePayload;
import com.cloud.storage.ScopeType;
@ -130,6 +131,7 @@ import com.cloud.storage.VMTemplateStorageResourceAssoc;
import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeApiServiceImpl;
import com.cloud.storage.Volume.State;
import com.cloud.storage.VolumeDetailVO;
import com.cloud.storage.VolumeVO;
@ -215,7 +217,7 @@ public class VolumeServiceImpl implements VolumeService {
@Inject
private PassphraseDao passphraseDao;
@Inject
private DiskOfferingDao diskOfferingDao;
protected DiskOfferingDao diskOfferingDao;
public VolumeServiceImpl() {
}
@ -290,12 +292,12 @@ public class VolumeServiceImpl implements VolumeService {
@Override
public AsyncCallFuture<VolumeApiResult> createVolumeAsync(VolumeInfo volume, DataStore dataStore) {
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<>();
DataObject volumeOnStore = dataStore.create(volume);
volumeOnStore.processEvent(Event.CreateOnlyRequested);
try {
CreateVolumeContext<VolumeApiResult> context = new CreateVolumeContext<VolumeApiResult>(null, volumeOnStore, future);
CreateVolumeContext<VolumeApiResult> context = new CreateVolumeContext<>(null, volumeOnStore, future);
AsyncCallbackDispatcher<VolumeServiceImpl, CreateCmdResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().createVolumeCallback(null, null)).setContext(context);
@ -371,7 +373,7 @@ public class VolumeServiceImpl implements VolumeService {
@DB
@Override
public AsyncCallFuture<VolumeApiResult> expungeVolumeAsync(VolumeInfo volume) {
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<>();
VolumeApiResult result = new VolumeApiResult(volume);
if (volume.getDataStore() == null) {
logger.info("Expunge volume with no data store specified");
@ -427,7 +429,7 @@ public class VolumeServiceImpl implements VolumeService {
volume.processEvent(Event.ExpungeRequested);
}
DeleteVolumeContext<VolumeApiResult> context = new DeleteVolumeContext<VolumeApiResult>(null, vo, future);
DeleteVolumeContext<VolumeApiResult> context = new DeleteVolumeContext<>(null, vo, future);
AsyncCallbackDispatcher<VolumeServiceImpl, CommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().deleteVolumeCallback(null, null)).setContext(context);
@ -636,7 +638,7 @@ public class VolumeServiceImpl implements VolumeService {
}
}
long templatePoolRefId = templatePoolRef.getId();
CreateBaseImageContext<CreateCmdResult> context = new CreateBaseImageContext<CreateCmdResult>(null, volume, dataStore, template, future, templateOnPrimaryStoreObj, templatePoolRefId);
CreateBaseImageContext<CreateCmdResult> context = new CreateBaseImageContext<>(null, volume, dataStore, template, future, templateOnPrimaryStoreObj, templatePoolRefId);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().copyBaseImageCallback(null, null)).setContext(context);
@ -806,7 +808,7 @@ public class VolumeServiceImpl implements VolumeService {
DataObject volumeOnPrimaryStorage = pd.create(volume, volume.getDeployAsIsConfiguration());
volumeOnPrimaryStorage.processEvent(Event.CreateOnlyRequested);
CreateVolumeFromBaseImageContext<VolumeApiResult> context = new CreateVolumeFromBaseImageContext<VolumeApiResult>(null, volumeOnPrimaryStorage, pd, templateOnPrimaryStore, future, null, volume.getDeployAsIsConfiguration());
CreateVolumeFromBaseImageContext<VolumeApiResult> context = new CreateVolumeFromBaseImageContext<>(null, volumeOnPrimaryStorage, pd, templateOnPrimaryStore, future, null, volume.getDeployAsIsConfiguration());
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().createVolumeFromBaseImageCallBack(null, null));
caller.setContext(context);
@ -1174,7 +1176,7 @@ public class VolumeServiceImpl implements VolumeService {
// Refresh the volume info from the DB.
volumeInfo = volFactory.getVolume(volumeInfo.getId(), destPrimaryDataStore);
Map<String, String> details = new HashMap<String, String>();
Map<String, String> details = new HashMap<>();
details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString());
details.put(PrimaryDataStore.STORAGE_HOST, destPrimaryDataStore.getHostAddress());
details.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(destPrimaryDataStore.getPort()));
@ -1278,12 +1280,12 @@ public class VolumeServiceImpl implements VolumeService {
// Refresh the volume info from the DB.
volumeInfo = volFactory.getVolume(volumeInfo.getId(), primaryDataStore);
ManagedCreateBaseImageContext<CreateCmdResult> context = new ManagedCreateBaseImageContext<CreateCmdResult>(null, volumeInfo, primaryDataStore, srcTemplateInfo, future);
ManagedCreateBaseImageContext<CreateCmdResult> context = new ManagedCreateBaseImageContext<>(null, volumeInfo, primaryDataStore, srcTemplateInfo, future);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().managedCopyBaseImageCallback(null, null)).setContext(context);
Map<String, String> details = new HashMap<String, String>();
Map<String, String> details = new HashMap<>();
details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString());
details.put(PrimaryDataStore.STORAGE_HOST, primaryDataStore.getHostAddress());
@ -1639,14 +1641,14 @@ public class VolumeServiceImpl implements VolumeService {
@Override
public AsyncCallFuture<VolumeApiResult> createVolumeFromSnapshot(VolumeInfo volume, DataStore store, SnapshotInfo snapshot) {
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<>();
try {
DataObject volumeOnStore = store.create(volume);
volumeOnStore.processEvent(Event.CreateOnlyRequested);
_volumeDetailsDao.addDetail(volume.getId(), SNAPSHOT_ID, Long.toString(snapshot.getId()), false);
CreateVolumeFromBaseImageContext<VolumeApiResult> context = new CreateVolumeFromBaseImageContext<VolumeApiResult>(null, volume, store, volumeOnStore, future, snapshot, null);
CreateVolumeFromBaseImageContext<VolumeApiResult> context = new CreateVolumeFromBaseImageContext<>(null, volume, store, volumeOnStore, future, snapshot, null);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().createVolumeFromSnapshotCallback(null, null)).setContext(context);
motionSrv.copyAsync(snapshot, volumeOnStore, caller);
@ -1733,7 +1735,7 @@ public class VolumeServiceImpl implements VolumeService {
}
protected AsyncCallFuture<VolumeApiResult> copyVolumeFromImageToPrimary(VolumeInfo srcVolume, DataStore destStore) {
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<>();
VolumeApiResult res = new VolumeApiResult(srcVolume);
VolumeInfo destVolume = null;
try {
@ -1741,7 +1743,7 @@ public class VolumeServiceImpl implements VolumeService {
destVolume.processEvent(Event.CopyingRequested);
srcVolume.processEvent(Event.CopyingRequested);
CopyVolumeContext<VolumeApiResult> context = new CopyVolumeContext<VolumeApiResult>(null, future, srcVolume, destVolume, destStore);
CopyVolumeContext<VolumeApiResult> context = new CopyVolumeContext<>(null, future, srcVolume, destVolume, destStore);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().copyVolumeFromImageToPrimaryCallback(null, null)).setContext(context);
@ -1787,7 +1789,7 @@ public class VolumeServiceImpl implements VolumeService {
}
protected AsyncCallFuture<VolumeApiResult> copyVolumeFromPrimaryToImage(VolumeInfo srcVolume, DataStore destStore) {
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<>();
VolumeApiResult res = new VolumeApiResult(srcVolume);
VolumeInfo destVolume = null;
try {
@ -1795,7 +1797,7 @@ public class VolumeServiceImpl implements VolumeService {
srcVolume.processEvent(Event.MigrationRequested); // this is just used for locking that src volume record in DB to avoid using lock
destVolume.processEventOnly(Event.CreateOnlyRequested);
CopyVolumeContext<VolumeApiResult> context = new CopyVolumeContext<VolumeApiResult>(null, future, srcVolume, destVolume, destStore);
CopyVolumeContext<VolumeApiResult> context = new CopyVolumeContext<>(null, future, srcVolume, destVolume, destStore);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().copyVolumeFromPrimaryToImageCallback(null, null)).setContext(context);
@ -1868,7 +1870,7 @@ public class VolumeServiceImpl implements VolumeService {
// OfflineVmwareMigration: aren't we missing secondary to secondary in this logic?
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<>();
VolumeApiResult res = new VolumeApiResult(srcVolume);
try {
if (!snapshotMgr.canOperateOnVolume(srcVolume)) {
@ -1884,7 +1886,7 @@ public class VolumeServiceImpl implements VolumeService {
destVolume.processEvent(Event.MigrationCopyRequested);
srcVolume.processEvent(Event.MigrationRequested);
CopyVolumeContext<VolumeApiResult> context = new CopyVolumeContext<VolumeApiResult>(null, future, srcVolume, destVolume, destStore);
CopyVolumeContext<VolumeApiResult> context = new CopyVolumeContext<>(null, future, srcVolume, destVolume, destStore);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().copyVolumeCallBack(null, null)).setContext(context);
motionSrv.copyAsync(srcVolume, destVolume, caller);
@ -2018,7 +2020,7 @@ public class VolumeServiceImpl implements VolumeService {
}
private AsyncCallFuture<VolumeApiResult> copyManagedVolume(VolumeInfo srcVolume, DataStore destStore) {
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<>();
VolumeApiResult res = new VolumeApiResult(srcVolume);
try {
if (!snapshotMgr.canOperateOnVolume(srcVolume)) {
@ -2035,7 +2037,7 @@ public class VolumeServiceImpl implements VolumeService {
return future;
}
List<Long> poolIds = new ArrayList<Long>();
List<Long> poolIds = new ArrayList<>();
poolIds.add(srcVolume.getPoolId());
poolIds.add(destStore.getId());
@ -2067,7 +2069,7 @@ public class VolumeServiceImpl implements VolumeService {
PrimaryDataStore srcPrimaryDataStore = (PrimaryDataStore) srcVolume.getDataStore();
if (srcPrimaryDataStore.isManaged()) {
Map<String, String> srcPrimaryDataStoreDetails = new HashMap<String, String>();
Map<String, String> srcPrimaryDataStoreDetails = new HashMap<>();
srcPrimaryDataStoreDetails.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString());
srcPrimaryDataStoreDetails.put(PrimaryDataStore.STORAGE_HOST, srcPrimaryDataStore.getHostAddress());
srcPrimaryDataStoreDetails.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(srcPrimaryDataStore.getPort()));
@ -2080,7 +2082,7 @@ public class VolumeServiceImpl implements VolumeService {
}
PrimaryDataStore destPrimaryDataStore = (PrimaryDataStore) destStore;
Map<String, String> destPrimaryDataStoreDetails = new HashMap<String, String>();
Map<String, String> destPrimaryDataStoreDetails = new HashMap<>();
destPrimaryDataStoreDetails.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString());
destPrimaryDataStoreDetails.put(PrimaryDataStore.STORAGE_HOST, destPrimaryDataStore.getHostAddress());
destPrimaryDataStoreDetails.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(destPrimaryDataStore.getPort()));
@ -2095,7 +2097,7 @@ public class VolumeServiceImpl implements VolumeService {
destVolume.processEvent(Event.CreateRequested);
srcVolume.processEvent(Event.MigrationRequested);
CopyManagedVolumeContext<VolumeApiResult> context = new CopyManagedVolumeContext<VolumeApiResult>(null, future, srcVolume, destVolume, hostWithPoolsAccess);
CopyManagedVolumeContext<VolumeApiResult> context = new CopyManagedVolumeContext<>(null, future, srcVolume, destVolume, hostWithPoolsAccess);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().copyManagedVolumeCallBack(null, null)).setContext(context);
@ -2233,7 +2235,7 @@ public class VolumeServiceImpl implements VolumeService {
@Override
public AsyncCallFuture<VolumeApiResult> migrateVolume(VolumeInfo srcVolume, DataStore destStore) {
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<>();
VolumeApiResult res = new VolumeApiResult(srcVolume);
try {
if (!snapshotMgr.canOperateOnVolume(srcVolume)) {
@ -2245,7 +2247,7 @@ public class VolumeServiceImpl implements VolumeService {
VolumeInfo destVolume = volFactory.getVolume(srcVolume.getId(), destStore);
srcVolume.processEvent(Event.MigrationRequested);
MigrateVolumeContext<VolumeApiResult> context = new MigrateVolumeContext<VolumeApiResult>(null, future, srcVolume, destVolume, destStore);
MigrateVolumeContext<VolumeApiResult> context = new MigrateVolumeContext<>(null, future, srcVolume, destVolume, destStore);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().migrateVolumeCallBack(null, null)).setContext(context);
motionSrv.copyAsync(srcVolume, destVolume, caller);
@ -2298,13 +2300,13 @@ public class VolumeServiceImpl implements VolumeService {
@Override
public AsyncCallFuture<CommandResult> migrateVolumes(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost) {
AsyncCallFuture<CommandResult> future = new AsyncCallFuture<CommandResult>();
AsyncCallFuture<CommandResult> future = new AsyncCallFuture<>();
CommandResult res = new CommandResult();
try {
// Check to make sure there are no snapshot operations on a volume
// and
// put it in the migrating state.
List<VolumeInfo> volumesMigrating = new ArrayList<VolumeInfo>();
List<VolumeInfo> volumesMigrating = new ArrayList<>();
for (Map.Entry<VolumeInfo, DataStore> entry : volumeMap.entrySet()) {
VolumeInfo volume = entry.getKey();
if (!snapshotMgr.canOperateOnVolume(volume)) {
@ -2324,7 +2326,7 @@ public class VolumeServiceImpl implements VolumeService {
}
}
MigrateVmWithVolumesContext<CommandResult> context = new MigrateVmWithVolumesContext<CommandResult>(null, future, volumeMap);
MigrateVmWithVolumesContext<CommandResult> context = new MigrateVmWithVolumesContext<>(null, future, volumeMap);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().migrateVmWithVolumesCallBack(null, null)).setContext(context);
motionSrv.copyAsync(volumeMap, vmTo, srcHost, destHost, caller);
@ -2371,13 +2373,13 @@ public class VolumeServiceImpl implements VolumeService {
@Override
public AsyncCallFuture<VolumeApiResult> registerVolume(VolumeInfo volume, DataStore store) {
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<>();
DataObject volumeOnStore = store.create(volume);
volumeOnStore.processEvent(Event.CreateOnlyRequested);
try {
CreateVolumeContext<VolumeApiResult> context = new CreateVolumeContext<VolumeApiResult>(null, volumeOnStore, future);
CreateVolumeContext<VolumeApiResult> context = new CreateVolumeContext<>(null, volumeOnStore, future);
AsyncCallbackDispatcher<VolumeServiceImpl, CreateCmdResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().registerVolumeCallback(null, null));
caller.setContext(context);
@ -2472,7 +2474,7 @@ public class VolumeServiceImpl implements VolumeService {
@Override
public AsyncCallFuture<VolumeApiResult> resize(VolumeInfo volume) {
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<>();
VolumeApiResult result = new VolumeApiResult(volume);
try {
volume.processEvent(Event.ResizeRequested);
@ -2482,7 +2484,7 @@ public class VolumeServiceImpl implements VolumeService {
future.complete(result);
return future;
}
CreateVolumeContext<VolumeApiResult> context = new CreateVolumeContext<VolumeApiResult>(null, volume, future);
CreateVolumeContext<VolumeApiResult> context = new CreateVolumeContext<>(null, volume, future);
AsyncCallbackDispatcher<VolumeServiceImpl, CreateCmdResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().resizeVolumeCallback(caller, context)).setContext(context);
@ -2581,7 +2583,7 @@ public class VolumeServiceImpl implements VolumeService {
// find all the db volumes including those with NULL url column to avoid accidentally deleting volumes on image store later.
List<VolumeDataStoreVO> dbVolumes = _volumeStoreDao.listByStoreId(storeId);
List<VolumeDataStoreVO> toBeDownloaded = new ArrayList<VolumeDataStoreVO>(dbVolumes);
List<VolumeDataStoreVO> toBeDownloaded = new ArrayList<>(dbVolumes);
for (VolumeDataStoreVO volumeStore : dbVolumes) {
VolumeVO volume = volDao.findById(volumeStore.getVolumeId());
if (volume == null) {
@ -2797,6 +2799,16 @@ public class VolumeServiceImpl implements VolumeService {
}
}
@Override
public void validateChangeDiskOfferingEncryptionType(long existingDiskOfferingId, long newDiskOfferingId) {
DiskOfferingVO existingDiskOffering = diskOfferingDao.findByIdIncludingRemoved(existingDiskOfferingId);
DiskOfferingVO newDiskOffering = diskOfferingDao.findById(newDiskOfferingId);
if (existingDiskOffering.getEncrypt() != newDiskOffering.getEncrypt()) {
throw new InvalidParameterValueException("Cannot change the encryption type of a volume, please check the selected offering");
}
}
@Override
public Pair<String, String> checkAndRepairVolume(VolumeInfo volume) {
Long poolId = volume.getPoolId();

View File

@ -19,6 +19,23 @@
package org.apache.cloudstack.storage.volume;
import com.cloud.agent.api.storage.CheckAndRepairVolumeAnswer;
import com.cloud.agent.api.storage.CheckAndRepairVolumeCommand;
import com.cloud.agent.api.to.StorageFilerTO;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.StorageUnavailableException;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.storage.CheckAndRepairVolumePayload;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.snapshot.SnapshotManager;
import com.cloud.utils.Pair;
import java.util.ArrayList;
import java.util.Arrays;
@ -39,21 +56,6 @@ import org.mockito.Mockito;
import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner;
import com.cloud.agent.api.storage.CheckAndRepairVolumeAnswer;
import com.cloud.agent.api.storage.CheckAndRepairVolumeCommand;
import com.cloud.agent.api.to.StorageFilerTO;
import com.cloud.exception.StorageUnavailableException;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.storage.CheckAndRepairVolumePayload;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.snapshot.SnapshotManager;
import com.cloud.utils.Pair;
import junit.framework.TestCase;
@RunWith(MockitoJUnitRunner.class)
@ -92,6 +94,9 @@ public class VolumeServiceTest extends TestCase{
@Mock
HostDao hostDaoMock;
@Mock
DiskOfferingDao diskOfferingDaoMock;
@Before
public void setup(){
volumeServiceImplSpy = Mockito.spy(new VolumeServiceImpl());
@ -100,6 +105,7 @@ public class VolumeServiceTest extends TestCase{
volumeServiceImplSpy.snapshotMgr = snapshotManagerMock;
volumeServiceImplSpy._storageMgr = storageManagerMock;
volumeServiceImplSpy._hostDao = hostDaoMock;
volumeServiceImplSpy.diskOfferingDao = diskOfferingDaoMock;
}
@Test(expected = InterruptedException.class)
@ -309,4 +315,40 @@ public class VolumeServiceTest extends TestCase{
Assert.assertEquals(null, result);
}
@Test
public void validateDiskOfferingCheckForEncryption1Test() {
prepareOfferingsForEncryptionValidation(1L, true);
prepareOfferingsForEncryptionValidation(2L, true);
volumeServiceImplSpy.validateChangeDiskOfferingEncryptionType(1L, 2L);
}
@Test
public void validateDiskOfferingCheckForEncryption2Test() {
prepareOfferingsForEncryptionValidation(1L, false);
prepareOfferingsForEncryptionValidation(2L, false);
volumeServiceImplSpy.validateChangeDiskOfferingEncryptionType(1L, 2L);
}
@Test (expected = InvalidParameterValueException.class)
public void validateDiskOfferingCheckForEncryptionFail1Test() {
prepareOfferingsForEncryptionValidation(1L, false);
prepareOfferingsForEncryptionValidation(2L, true);
volumeServiceImplSpy.validateChangeDiskOfferingEncryptionType(1L, 2L);
}
@Test (expected = InvalidParameterValueException.class)
public void validateDiskOfferingCheckForEncryptionFail2Test() {
prepareOfferingsForEncryptionValidation(1L, true);
prepareOfferingsForEncryptionValidation(2L, false);
volumeServiceImplSpy.validateChangeDiskOfferingEncryptionType(1L, 2L);
}
private void prepareOfferingsForEncryptionValidation(long diskOfferingId, boolean encryption) {
DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class);
Mockito.when(diskOffering.getEncrypt()).thenReturn(encryption);
Mockito.when(diskOfferingDaoMock.findByIdIncludingRemoved(diskOfferingId)).thenReturn(diskOffering);
Mockito.when(diskOfferingDaoMock.findById(diskOfferingId)).thenReturn(diskOffering);
}
}

View File

@ -22,6 +22,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.TimeZone;
@ -54,6 +55,7 @@ import org.apache.commons.lang3.BooleanUtils;
import org.apache.commons.lang3.ObjectUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.math.NumberUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.springframework.stereotype.Component;
import com.cloud.usage.UsageVO;
@ -145,79 +147,81 @@ public class QuotaManagerImpl extends ManagerBase implements QuotaManager {
return;
}
QuotaUsageVO firstQuotaUsage = accountQuotaUsages.get(0);
Date startDate = firstQuotaUsage.getStartDate();
Date endDate = firstQuotaUsage.getStartDate();
Date startDate = accountQuotaUsages.get(0).getStartDate();
Date endDate = accountQuotaUsages.get(0).getEndDate();
Date lastQuotaUsageEndDate = accountQuotaUsages.get(accountQuotaUsages.size() - 1).getEndDate();
logger.info("Processing quota balance for account [{}] between [{}] and [{}].", accountToString,
DateUtil.displayDateInTimezone(usageAggregationTimeZone, startDate),
DateUtil.displayDateInTimezone(usageAggregationTimeZone, accountQuotaUsages.get(accountQuotaUsages.size() - 1).getEndDate()));
LinkedHashSet<Pair<Date, Date>> periods = accountQuotaUsages.stream()
.map(quotaUsageVO -> new Pair<>(quotaUsageVO.getStartDate(), quotaUsageVO.getEndDate()))
.collect(Collectors.toCollection(LinkedHashSet::new));
logger.info(String.format("Processing quota balance for account[{}] between [{}] and [{}].", accountToString, startDate, lastQuotaUsageEndDate));
BigDecimal aggregatedUsage = BigDecimal.ZERO;
long accountId = accountVo.getAccountId();
long domainId = accountVo.getDomainId();
BigDecimal accountBalance = retrieveBalanceForUsageCalculation(accountId, domainId, startDate, accountToString);
aggregatedUsage = getUsageValueAccordingToLastQuotaUsageEntryAndLastQuotaBalance(accountId, domainId, startDate, endDate, aggregatedUsage, accountToString);
for (Pair<Date, Date> period : periods) {
startDate = period.first();
endDate = period.second();
for (QuotaUsageVO quotaUsage : accountQuotaUsages) {
Date quotaUsageStartDate = quotaUsage.getStartDate();
Date quotaUsageEndDate = quotaUsage.getEndDate();
BigDecimal quotaUsed = quotaUsage.getQuotaUsed();
if (quotaUsed.equals(BigDecimal.ZERO)) {
aggregatedUsage = aggregatedUsage.add(aggregateCreditBetweenDates(accountId, domainId, quotaUsageStartDate, quotaUsageEndDate, accountToString));
continue;
}
if (startDate.compareTo(quotaUsageStartDate) == 0) {
aggregatedUsage = aggregatedUsage.subtract(quotaUsed);
continue;
}
_quotaBalanceDao.saveQuotaBalance(new QuotaBalanceVO(accountId, domainId, aggregatedUsage, endDate));
aggregatedUsage = BigDecimal.ZERO;
startDate = quotaUsageStartDate;
endDate = quotaUsageEndDate;
QuotaBalanceVO lastRealBalanceEntry = _quotaBalanceDao.findLastBalanceEntry(accountId, domainId, endDate);
Date lastBalanceDate = new Date(0);
if (lastRealBalanceEntry != null) {
lastBalanceDate = lastRealBalanceEntry.getUpdatedOn();
aggregatedUsage = aggregatedUsage.add(lastRealBalanceEntry.getCreditBalance());
}
aggregatedUsage = aggregatedUsage.add(aggregateCreditBetweenDates(accountId, domainId, lastBalanceDate, endDate, accountToString));
aggregatedUsage = aggregatedUsage.subtract(quotaUsed);
accountBalance = calculateBalanceConsideringCreditsAddedAndQuotaUsed(accountBalance, accountQuotaUsages, accountId, domainId, startDate, endDate, accountToString);
_quotaBalanceDao.saveQuotaBalance(new QuotaBalanceVO(accountId, domainId, accountBalance, endDate));
}
_quotaBalanceDao.saveQuotaBalance(new QuotaBalanceVO(accountId, domainId, aggregatedUsage, endDate));
saveQuotaAccount(accountId, aggregatedUsage, endDate);
saveQuotaAccount(accountId, accountBalance, endDate);
}
protected BigDecimal getUsageValueAccordingToLastQuotaUsageEntryAndLastQuotaBalance(long accountId, long domainId, Date startDate, Date endDate, BigDecimal aggregatedUsage,
String accountToString) {
/**
* Calculates the balance for the given account considering the specified period. The balance is calculated as follows:
* <ol>
* <li>The credits added in this period are added to the balance.</li>
* <li>All quota consumed in this period are subtracted from the account balance.</li>
* </ol>
*/
protected BigDecimal calculateBalanceConsideringCreditsAddedAndQuotaUsed(BigDecimal accountBalance, List<QuotaUsageVO> accountQuotaUsages, long accountId, long domainId,
Date startDate, Date endDate, String accountToString) {
accountBalance = accountBalance.add(aggregateCreditBetweenDates(accountId, domainId, startDate, endDate, accountToString));
for (QuotaUsageVO quotaUsageVO : accountQuotaUsages) {
if (DateUtils.isSameInstant(quotaUsageVO.getStartDate(), startDate)) {
accountBalance = accountBalance.subtract(quotaUsageVO.getQuotaUsed());
}
}
return accountBalance;
}
/**
* Retrieves the initial balance prior to the period of the quota processing.
* <ul>
* <li>
* If it is the first time of processing for the account, the credits prior to the quota processing are added, and the first balance is persisted in the DB.
* </li>
* <li>
* Otherwise, the last real balance of the account is retrieved.
* </li>
* </ul>
*/
protected BigDecimal retrieveBalanceForUsageCalculation(long accountId, long domainId, Date startDate, String accountToString) {
BigDecimal accountBalance = BigDecimal.ZERO;
QuotaUsageVO lastQuotaUsage = _quotaUsageDao.findLastQuotaUsageEntry(accountId, domainId, startDate);
if (lastQuotaUsage == null) {
aggregatedUsage = aggregatedUsage.add(aggregateCreditBetweenDates(accountId, domainId, new Date(0), startDate, accountToString));
QuotaBalanceVO firstBalance = new QuotaBalanceVO(accountId, domainId, aggregatedUsage, startDate);
accountBalance = accountBalance.add(aggregateCreditBetweenDates(accountId, domainId, new Date(0), startDate, accountToString));
QuotaBalanceVO firstBalance = new QuotaBalanceVO(accountId, domainId, accountBalance, startDate);
logger.debug(String.format("Persisting the first quota balance [%s] for account [%s].", firstBalance, accountToString));
_quotaBalanceDao.saveQuotaBalance(firstBalance);
} else {
QuotaBalanceVO lastRealBalance = _quotaBalanceDao.findLastBalanceEntry(accountId, domainId, endDate);
QuotaBalanceVO lastRealBalance = _quotaBalanceDao.findLastBalanceEntry(accountId, domainId, startDate);
if (lastRealBalance != null) {
aggregatedUsage = aggregatedUsage.add(lastRealBalance.getCreditBalance());
aggregatedUsage = aggregatedUsage.add(aggregateCreditBetweenDates(accountId, domainId, lastRealBalance.getUpdatedOn(), endDate, accountToString));
if (lastRealBalance == null) {
logger.warn("Account [{}] has quota usage entries, however it does not have a quota balance.", accountToString);
} else {
logger.warn(String.format("Account [%s] has quota usage entries, however it does not have a quota balance.", accountToString));
accountBalance = accountBalance.add(lastRealBalance.getCreditBalance());
}
}
return aggregatedUsage;
return accountBalance;
}
protected void saveQuotaAccount(long accountId, BigDecimal aggregatedUsage, Date endDate) {

View File

@ -43,7 +43,6 @@ import com.cloud.resource.ResourceManager;
import com.cloud.storage.VMTemplateVO;
import com.cloud.utils.Pair;
import com.cloud.utils.component.AdapterBase;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
@ -89,7 +88,7 @@ public class RandomAllocator extends AdapterBase implements HostAllocator {
Long clusterId = plan.getClusterId();
ServiceOffering offering = vmProfile.getServiceOffering();
List<? extends Host> hostsCopy = null;
List<Host> suitableHosts = new ArrayList<Host>();
List<Host> suitableHosts = new ArrayList<>();
if (type == Host.Type.Storage) {
return suitableHosts;
@ -107,7 +106,7 @@ public class RandomAllocator extends AdapterBase implements HostAllocator {
}
if (hosts != null) {
// retain all computing hosts, regardless of whether they support routing...it's random after all
hostsCopy = new ArrayList<Host>(hosts);
hostsCopy = new ArrayList<>(hosts);
if (ObjectUtils.anyNotNull(offeringHostTag, templateTag)) {
hostsCopy.retainAll(listHostsByTags(type, dcId, podId, clusterId, offeringHostTag, templateTag));
} else {
@ -124,14 +123,15 @@ public class RandomAllocator extends AdapterBase implements HostAllocator {
hostsCopy = ListUtils.union(hostsCopy, _hostDao.findHostsWithTagRuleThatMatchComputeOferringTags(offeringHostTag));
if (hostsCopy.isEmpty()) {
logger.error(String.format("No suitable host found for vm [%s] with tags [%s].", vmProfile, hostTag));
throw new CloudRuntimeException(String.format("No suitable host found for vm [%s].", vmProfile));
logger.info("No suitable host found for VM [{}] in {}.", vmProfile, hostTag);
return null;
}
logger.debug("Random Allocator found " + hostsCopy.size() + " hosts");
if (hostsCopy.size() == 0) {
logger.debug("Random Allocator found {} hosts", hostsCopy.size());
if (hostsCopy.isEmpty()) {
return suitableHosts;
}
Collections.shuffle(hostsCopy);
for (Host host : hostsCopy) {
if (suitableHosts.size() == returnUpTo) {
@ -174,7 +174,7 @@ public class RandomAllocator extends AdapterBase implements HostAllocator {
if (logger.isDebugEnabled()) {
logger.debug("Random Allocator found 0 hosts as given host list is empty");
}
return new ArrayList<Host>();
return new ArrayList<>();
}
return findSuitableHosts(vmProfile, plan, type, avoid, hosts, returnUpTo, considerReservedCapacity);
}

View File

@ -782,7 +782,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
*/
protected EnumMap<VmwareStorageProcessorConfigurableFields, Object> examineStorageSubSystemCommandFullCloneFlagForVmware(CopyCommand cmd,
EnumMap<VmwareStorageProcessorConfigurableFields, Object> params) {
EnumMap<VmwareStorageProcessorConfigurableFields, Object> paramsCopy = new EnumMap<VmwareStorageProcessorConfigurableFields, Object>(params);
EnumMap<VmwareStorageProcessorConfigurableFields, Object> paramsCopy = new EnumMap<>(params);
HypervisorType hypervisor = cmd.getDestTO().getHypervisorType();
if (hypervisor != null && hypervisor.equals(HypervisorType.VMware)) {
DataStoreTO destDataStore = cmd.getDestTO().getDataStore();
@ -2201,7 +2201,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
throw new Exception("Failed to find the newly create or relocated VM. vmName: " + vmInternalCSName);
}
}
if (deployAsIs) {
if (deployAsIs && !vmMo.hasSnapshot()) {
logger.info("Mapping VM disks to spec disks and tearing down datadisks (if any)");
mapSpecDisksToClonedDisksAndTearDownDatadisks(vmMo, vmInternalCSName, specDisks);
}

View File

@ -136,9 +136,9 @@ public class OAuth2AuthManagerImpl extends ManagerBase implements OAuth2AuthMana
public OauthProviderVO registerOauthProvider(RegisterOAuthProviderCmd cmd) {
String description = cmd.getDescription();
String provider = cmd.getProvider();
String clientId = cmd.getClientId();
String redirectUri = cmd.getRedirectUri();
String secretKey = cmd.getSecretKey();
String clientId = StringUtils.trim(cmd.getClientId());
String redirectUri = StringUtils.trim(cmd.getRedirectUri());
String secretKey = StringUtils.trim(cmd.getSecretKey());
if (!isOAuthPluginEnabled()) {
throw new CloudRuntimeException("OAuth is not enabled, please enable to register");
@ -168,9 +168,9 @@ public class OAuth2AuthManagerImpl extends ManagerBase implements OAuth2AuthMana
public OauthProviderVO updateOauthProvider(UpdateOAuthProviderCmd cmd) {
Long id = cmd.getId();
String description = cmd.getDescription();
String clientId = cmd.getClientId();
String redirectUri = cmd.getRedirectUri();
String secretKey = cmd.getSecretKey();
String clientId = StringUtils.trim(cmd.getClientId());
String redirectUri = StringUtils.trim(cmd.getRedirectUri());
String secretKey = StringUtils.trim(cmd.getSecretKey());
Boolean enabled = cmd.getEnabled();
OauthProviderVO providerVO = _oauthProviderDao.findById(id);

View File

@ -128,12 +128,10 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
if (type == Host.Type.Storage) {
// FirstFitAllocator should be used for user VMs only since it won't care whether the host is capable of routing or not
return new ArrayList<Host>();
return new ArrayList<>();
}
if (logger.isDebugEnabled()) {
logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId);
}
logger.debug("Looking for hosts in zone [{}], pod [{}], cluster [{}]", dcId, podId, clusterId);
String hostTagOnOffering = offering.getHostTag();
String hostTagOnTemplate = template.getTemplateTag();
@ -142,8 +140,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
boolean hasSvcOfferingTag = hostTagOnOffering != null ? true : false;
boolean hasTemplateTag = hostTagOnTemplate != null ? true : false;
List<HostVO> clusterHosts = new ArrayList<HostVO>();
List<HostVO> hostsMatchingUefiTag = new ArrayList<HostVO>();
List<HostVO> clusterHosts = new ArrayList<>();
List<HostVO> hostsMatchingUefiTag = new ArrayList<>();
if(isVMDeployedWithUefi){
hostsMatchingUefiTag = _hostDao.listByHostCapability(type, clusterId, podId, dcId, Host.HOST_UEFI_ENABLE);
if (logger.isDebugEnabled()) {
@ -159,8 +157,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
if (hostTagOnOffering == null && hostTagOnTemplate == null) {
clusterHosts = _resourceMgr.listAllUpAndEnabledNonHAHosts(type, clusterId, podId, dcId);
} else {
List<HostVO> hostsMatchingOfferingTag = new ArrayList<HostVO>();
List<HostVO> hostsMatchingTemplateTag = new ArrayList<HostVO>();
List<HostVO> hostsMatchingOfferingTag = new ArrayList<>();
List<HostVO> hostsMatchingTemplateTag = new ArrayList<>();
if (hasSvcOfferingTag) {
if (logger.isDebugEnabled()) {
logger.debug("Looking for hosts having tag specified on SvcOffering:" + hostTagOnOffering);
@ -205,7 +203,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
if (clusterHosts.isEmpty()) {
logger.error(String.format("No suitable host found for vm [%s] with tags [%s].", vmProfile, hostTagOnOffering));
logger.error("No suitable host found for vm [{}] with tags [{}].", vmProfile, hostTagOnOffering);
throw new CloudRuntimeException(String.format("No suitable host found for vm [%s].", vmProfile));
}
// add all hosts that we are not considering to the avoid list
@ -231,8 +229,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
ServiceOffering offering = vmProfile.getServiceOffering();
VMTemplateVO template = (VMTemplateVO)vmProfile.getTemplate();
Account account = vmProfile.getOwner();
List<Host> suitableHosts = new ArrayList<Host>();
List<Host> hostsCopy = new ArrayList<Host>(hosts);
List<Host> suitableHosts = new ArrayList<>();
List<Host> hostsCopy = new ArrayList<>(hosts);
if (type == Host.Type.Storage) {
// FirstFitAllocator should be used for user VMs only since it won't care whether the host is capable of
@ -314,7 +312,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
}
long serviceOfferingId = offering.getId();
List<Host> suitableHosts = new ArrayList<Host>();
List<Host> suitableHosts = new ArrayList<>();
ServiceOfferingDetailsVO offeringDetails = null;
for (Host host : hosts) {
@ -383,15 +381,15 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
}
//now filter the given list of Hosts by this ordered list
Map<Long, Host> hostMap = new HashMap<Long, Host>();
Map<Long, Host> hostMap = new HashMap<>();
for (Host host : hosts) {
hostMap.put(host.getId(), host);
}
List<Long> matchingHostIds = new ArrayList<Long>(hostMap.keySet());
List<Long> matchingHostIds = new ArrayList<>(hostMap.keySet());
hostIdsByFreeCapacity.retainAll(matchingHostIds);
List<Host> reorderedHosts = new ArrayList<Host>();
List<Host> reorderedHosts = new ArrayList<>();
for(Long id: hostIdsByFreeCapacity){
reorderedHosts.add(hostMap.get(id));
}
@ -413,15 +411,15 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
}
//now filter the given list of Hosts by this ordered list
Map<Long, Host> hostMap = new HashMap<Long, Host>();
Map<Long, Host> hostMap = new HashMap<>();
for (Host host : hosts) {
hostMap.put(host.getId(), host);
}
List<Long> matchingHostIds = new ArrayList<Long>(hostMap.keySet());
List<Long> matchingHostIds = new ArrayList<>(hostMap.keySet());
hostIdsByVmCount.retainAll(matchingHostIds);
List<Host> reorderedHosts = new ArrayList<Host>();
List<Host> reorderedHosts = new ArrayList<>();
for (Long id : hostIdsByVmCount) {
reorderedHosts.add(hostMap.get(id));
}
@ -444,11 +442,11 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
// Determine the guest OS category of the template
String templateGuestOSCategory = getTemplateGuestOSCategory(template);
List<Host> prioritizedHosts = new ArrayList<Host>();
List<Host> noHvmHosts = new ArrayList<Host>();
List<Host> prioritizedHosts = new ArrayList<>();
List<Host> noHvmHosts = new ArrayList<>();
// If a template requires HVM and a host doesn't support HVM, remove it from consideration
List<Host> hostsToCheck = new ArrayList<Host>();
List<Host> hostsToCheck = new ArrayList<>();
if (template.isRequiresHvm()) {
for (Host host : hosts) {
if (hostSupportsHVM(host)) {
@ -468,8 +466,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
}
// If a host is tagged with the same guest OS category as the template, move it to a high priority list
// If a host is tagged with a different guest OS category than the template, move it to a low priority list
List<Host> highPriorityHosts = new ArrayList<Host>();
List<Host> lowPriorityHosts = new ArrayList<Host>();
List<Host> highPriorityHosts = new ArrayList<>();
List<Host> lowPriorityHosts = new ArrayList<>();
for (Host host : hostsToCheck) {
String hostGuestOSCategory = getHostGuestOSCategory(host);
if (hostGuestOSCategory == null) {
@ -502,7 +500,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
// if service offering is not GPU enabled then move all the GPU enabled hosts to the end of priority list.
if (_serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString()) == null) {
List<Host> gpuEnabledHosts = new ArrayList<Host>();
List<Host> gpuEnabledHosts = new ArrayList<>();
// Check for GPU enabled hosts.
for (Host host : prioritizedHosts) {
if (_resourceMgr.isHostGpuEnabled(host.getId())) {

View File

@ -26,6 +26,7 @@ import java.util.Set;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
@ -73,7 +74,7 @@ public class RecreateHostAllocator extends FirstFitRoutingAllocator {
public List<Host> allocateTo(VirtualMachineProfile vm, DeploymentPlan plan, Type type, ExcludeList avoid, int returnUpTo) {
List<Host> hosts = super.allocateTo(vm, plan, type, avoid, returnUpTo);
if (hosts != null && !hosts.isEmpty()) {
if (CollectionUtils.isNotEmpty(hosts)) {
return hosts;
}

View File

@ -1103,8 +1103,8 @@ public class ApiDBUtils {
return null;
}
public static ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(Long diskOfferingId) {
ServiceOfferingVO off = s_serviceOfferingDao.findServiceOfferingByComputeOnlyDiskOffering(diskOfferingId);
public static ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(Long diskOfferingId, boolean includingRemoved) {
ServiceOfferingVO off = s_serviceOfferingDao.findServiceOfferingByComputeOnlyDiskOffering(diskOfferingId, includingRemoved);
return off;
}
public static DomainVO findDomainById(Long domainId) {

View File

@ -3127,7 +3127,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
@Override
public ListResponse<ImageStoreResponse> searchForImageStores(ListImageStoresCmd cmd) {
Pair<List<ImageStoreJoinVO>, Integer> result = searchForImageStoresInternal(cmd);
ListResponse<ImageStoreResponse> response = new ListResponse<ImageStoreResponse>();
ListResponse<ImageStoreResponse> response = new ListResponse<>();
List<ImageStoreResponse> poolResponses = ViewResponseHelper.createImageStoreResponse(result.first().toArray(new ImageStoreJoinVO[result.first().size()]));
response.setResponses(poolResponses, result.second());

View File

@ -186,8 +186,8 @@ public class VolumeJoinDaoImpl extends GenericDaoBaseWithTagInformation<VolumeJo
if (volume.getDiskOfferingId() > 0) {
DiskOffering computeOnlyDiskOffering = ApiDBUtils.findComputeOnlyDiskOfferingById(volume.getDiskOfferingId());
if (computeOnlyDiskOffering != null) {
ServiceOffering serviceOffering = ApiDBUtils.findServiceOfferingByComputeOnlyDiskOffering(volume.getDiskOfferingId());
ServiceOffering serviceOffering = getServiceOfferingForDiskOffering(volume, computeOnlyDiskOffering);
if (serviceOffering != null) {
volResponse.setServiceOfferingId(String.valueOf(serviceOffering.getId()));
volResponse.setServiceOfferingName(serviceOffering.getName());
volResponse.setServiceOfferingDisplayText(serviceOffering.getDisplayText());
@ -281,6 +281,26 @@ public class VolumeJoinDaoImpl extends GenericDaoBaseWithTagInformation<VolumeJo
return volResponse;
}
/**
* gets the {@see ServiceOffering} for the {@see Volume} with {@see DiskOffering}
* It will first try existing ones
* If not found it will try to get a removed one
*
* @param volume
* @param computeOnlyDiskOffering
* @return the resulting offering or null
*/
private static ServiceOffering getServiceOfferingForDiskOffering(VolumeJoinVO volume, DiskOffering computeOnlyDiskOffering) {
ServiceOffering serviceOffering = null;
if (computeOnlyDiskOffering != null) {
serviceOffering = ApiDBUtils.findServiceOfferingByComputeOnlyDiskOffering(volume.getDiskOfferingId(), false);
}
if (serviceOffering == null) {
serviceOffering = ApiDBUtils.findServiceOfferingByComputeOnlyDiskOffering(volume.getDiskOfferingId(), true);
}
return serviceOffering;
}
@Override
public VolumeResponse setVolumeResponse(ResponseView view, VolumeResponse volData, VolumeJoinVO vol) {
long tag_id = vol.getTagId();

View File

@ -496,7 +496,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
Pod pod = _podDao.findById(host.getPodId());
Cluster cluster = _clusterDao.findById(host.getClusterId());
if (vm.getHypervisorType() == HypervisorType.BareMetal) {
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<Volume, StoragePool>(), displayStorage);
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<>(), displayStorage);
logger.debug("Returning Deployment Destination: {}.", dest);
return dest;
}
@ -517,7 +517,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
logger.debug("Cannot find suitable storage pools in host [{}] to deploy VM [{}]", host.getUuid(), vm.getUuid());
return null;
}
List<Host> suitableHosts = new ArrayList<Host>();
List<Host> suitableHosts = new ArrayList<>();
suitableHosts.add(host);
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
suitableHosts, suitableVolumeStoragePools, avoids,
@ -610,7 +610,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile);
if (vm.getHypervisorType() == HypervisorType.BareMetal) {
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<Volume, StoragePool>(),
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<>(),
displayStorage);
logger.debug("Returning Deployment Destination: {}.", dest);
return dest;
@ -625,7 +625,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
List<Volume> readyAndReusedVolumes = result.second();
if (!suitableVolumeStoragePools.isEmpty()) {
List<Host> suitableHosts = new ArrayList<Host>();
List<Host> suitableHosts = new ArrayList<>();
suitableHosts.add(host);
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts,
suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, avoids),
@ -867,9 +867,9 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
long vmAccountId = vm.getAccountId();
long vmDomainId = vm.getDomainId();
List<Long> allPodsFromDedicatedID = new ArrayList<Long>();
List<Long> allClustersFromDedicatedID = new ArrayList<Long>();
List<Long> allHostsFromDedicatedID = new ArrayList<Long>();
List<Long> allPodsFromDedicatedID = new ArrayList<>();
List<Long> allClustersFromDedicatedID = new ArrayList<>();
List<Long> allHostsFromDedicatedID = new ArrayList<>();
List<AffinityGroupDomainMapVO> domainGroupMappings = _affinityGroupDomainMapDao.listByDomain(vmDomainId);
@ -999,7 +999,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
final PlannerResourceUsage hostResourceTypeFinal = hostResourceType;
// reserve the host for required resourceType
// let us lock the reservation entry before updating.
return Transaction.execute(new TransactionCallback<Boolean>() {
return Transaction.execute(new TransactionCallback<>() {
@Override
public Boolean doInTransaction(TransactionStatus status) {
final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true);
@ -1091,7 +1091,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
final long id = reservationEntry.getId();
return Transaction.execute(new TransactionCallback<Boolean>() {
return Transaction.execute(new TransactionCallback<>() {
@Override
public Boolean doInTransaction(TransactionStatus status) {
final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true);
@ -1307,7 +1307,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
List<Host> suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
// if found suitable hosts in this cluster, find suitable storage
// pools for each volume of the VM
if (suitableHosts != null && !suitableHosts.isEmpty()) {
if (CollectionUtils.isNotEmpty(suitableHosts)) {
if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) {
DeployDestination dest = new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0));
return dest;
@ -1453,7 +1453,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
}
}
return new Pair<Boolean, Boolean>(requiresShared, requiresLocal);
return new Pair<>(requiresShared, requiresLocal);
}
protected Pair<Host, Map<Volume, StoragePool>> findPotentialDeploymentResources(List<Host> suitableHosts, Map<Volume, List<StoragePool>> suitableVolumeStoragePools,
@ -1465,10 +1465,10 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
boolean hostAffinityCheck = false;
if (readyAndReusedVolumes == null) {
readyAndReusedVolumes = new ArrayList<Volume>();
readyAndReusedVolumes = new ArrayList<>();
}
Map<Volume, StoragePool> storage = new HashMap<Volume, StoragePool>();
TreeSet<Volume> volumesOrderBySizeDesc = new TreeSet<Volume>(new Comparator<Volume>() {
Map<Volume, StoragePool> storage = new HashMap<>();
TreeSet<Volume> volumesOrderBySizeDesc = new TreeSet<>(new Comparator<>() {
@Override
public int compare(Volume v1, Volume v2) {
if (v1.getSize() < v2.getSize())
@ -1481,7 +1481,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
boolean multipleVolume = volumesOrderBySizeDesc.size() > 1;
boolean deployAsIs = isDeployAsIs(vm);
for (Host potentialHost : suitableHosts) {
Map<StoragePool, List<Volume>> volumeAllocationMap = new HashMap<StoragePool, List<Volume>>();
Map<StoragePool, List<Volume>> volumeAllocationMap = new HashMap<>();
if (deployAsIs) {
storage = new HashMap<>();
// Find the common suitable pools
@ -1553,7 +1553,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
if (volumeAllocationMap.containsKey(potentialSPool))
requestVolumes = volumeAllocationMap.get(potentialSPool);
else
requestVolumes = new ArrayList<Volume>();
requestVolumes = new ArrayList<>();
requestVolumes.add(vol);
List<Pair<Volume, DiskProfile>> volumeDiskProfilePair = getVolumeDiskProfilePairs(requestVolumes);
if (potentialHost.getHypervisorType() == HypervisorType.VMware) {
@ -1603,7 +1603,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + potentialHost.getName() +
" and associated storage pools for this VM");
volumeAllocationMap.clear();
return new Pair<Host, Map<Volume, StoragePool>>(potentialHost, storage);
return new Pair<>(potentialHost, storage);
} else {
logger.debug("Adding host [{}] to the avoid set because: can access Pool [{}], has enough space [{}], affinity check [{}], fits planner [{}] usage [{}].",
potentialHost.getUuid(), hostCanAccessPool, haveEnoughSpace, hostAffinityCheck, resourceUsageRequired.getClass().getSimpleName(), hostFitsPlannerUsage);
@ -1655,21 +1655,20 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
}
protected List<Host> findSuitableHosts(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
List<Host> suitableHosts = new ArrayList<Host>();
List<Host> suitableHosts = new ArrayList<>();
for (HostAllocator allocator : _hostAllocators) {
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, avoid, returnUpTo);
if (suitableHosts != null && !suitableHosts.isEmpty()) {
if (CollectionUtils.isNotEmpty(suitableHosts)) {
break;
}
}
if (suitableHosts.isEmpty()) {
logger.debug("No suitable hosts found");
if (CollectionUtils.isEmpty(suitableHosts)) {
logger.debug("No suitable hosts found.");
} else {
reorderHostsByPriority(plan.getHostPriorities(), suitableHosts);
}
// re-order hosts by priority
reorderHostsByPriority(plan.getHostPriorities(), suitableHosts);
return suitableHosts;
}
@ -1698,8 +1697,8 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
protected Pair<Map<Volume, List<StoragePool>>, List<Volume>> findSuitablePoolsForVolumes(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid,
int returnUpTo) {
List<VolumeVO> volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId());
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = new HashMap<Volume, List<StoragePool>>();
List<Volume> readyAndReusedVolumes = new ArrayList<Volume>();
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = new HashMap<>();
List<Volume> readyAndReusedVolumes = new ArrayList<>();
// There should be at least the ROOT volume of the VM in usable state
if (volumesTobeCreated.isEmpty()) {
@ -1784,7 +1783,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
}
}
HashSet<Long> toRemove = new HashSet<Long>();
HashSet<Long> toRemove = new HashSet<>();
for (List<StoragePool> lsp : suitableVolumeStoragePools.values()) {
for (StoragePool sp : lsp) {
toRemove.add(sp.getId());
@ -1800,7 +1799,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
logger.debug("No suitable pools found");
}
return new Pair<Map<Volume, List<StoragePool>>, List<Volume>>(suitableVolumeStoragePools, readyAndReusedVolumes);
return new Pair<>(suitableVolumeStoragePools, readyAndReusedVolumes);
}
private boolean tryToFindPotentialPoolsToAlocateVolume(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo,
@ -1824,7 +1823,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
Map<Volume, List<StoragePool>> suitableVolumeStoragePools, List<Volume> readyAndReusedVolumes,
VolumeVO toBeCreated) {
logger.debug("Volume [{}] of VM [{}] has pool [{}] already specified. Checking if this pool can be reused.", toBeCreated.getUuid(), vmProfile.getUuid(), toBeCreated.getPoolId());
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
List<StoragePool> suitablePools = new ArrayList<>();
StoragePool pool = null;
if (toBeCreated.getPoolId() != null) {
pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId());
@ -1951,7 +1950,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
final VirtualMachine vm = vmProfile.getVirtualMachine();
final long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId());
return Transaction.execute(new TransactionCallback<String>() {
return Transaction.execute(new TransactionCallback<>() {
@Override
public String doInTransaction(TransactionStatus status) {
boolean saveReservation = true;
@ -1977,7 +1976,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
if (planner != null) {
vmReservation.setDeploymentPlanner(planner.getName());
}
Map<Long, Long> volumeReservationMap = new HashMap<Long, Long>();
Map<Long, Long> volumeReservationMap = new HashMap<>();
if (vm.getHypervisorType() != HypervisorType.BareMetal) {
for (Volume vo : plannedDestination.getStorageForDisks().keySet()) {

View File

@ -138,7 +138,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla
return null;
}
List<Long> clusterList = new ArrayList<Long>();
List<Long> clusterList = new ArrayList<>();
if (plan.getClusterId() != null) {
Long clusterIdSpecified = plan.getClusterId();
logger.debug("Searching resources only under specified Cluster: " + clusterIdSpecified);
@ -209,7 +209,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla
}
private void reorderClustersBasedOnImplicitTags(List<Long> clusterList, int requiredCpu, long requiredRam) {
final HashMap<Long, Long> UniqueTagsInClusterMap = new HashMap<Long, Long>();
final HashMap<Long, Long> UniqueTagsInClusterMap = new HashMap<>();
Long uniqueTags;
for (Long clusterId : clusterList) {
uniqueTags = (long) 0;
@ -220,7 +220,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla
}
UniqueTagsInClusterMap.put(clusterId, uniqueTags);
}
Collections.sort(clusterList, new Comparator<Long>() {
Collections.sort(clusterList, new Comparator<>() {
@Override
public int compare(Long o1, Long o2) {
Long t1 = UniqueTagsInClusterMap.get(o1);
@ -249,7 +249,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla
int requiredCpu = offering.getCpu() * offering.getSpeed();
long requiredRam = offering.getRamSize() * 1024L * 1024L;
//list pods under this zone by cpu and ram capacity
List<Long> prioritizedPodIds = new ArrayList<Long>();
List<Long> prioritizedPodIds;
Pair<List<Long>, Map<Long, Double>> podCapacityInfo = listPodsByCapacity(plan.getDataCenterId(), requiredCpu, requiredRam);
List<Long> podsWithCapacity = podCapacityInfo.first();
@ -277,7 +277,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla
return null;
}
List<Long> clusterList = new ArrayList<Long>();
List<Long> clusterList = new ArrayList<>();
//loop over pods
for (Long podId : prioritizedPodIds) {
logger.debug("Checking resources under Pod: " + podId);
@ -298,7 +298,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla
private Map<Short, Float> getCapacityThresholdMap() {
// Lets build this real time so that the admin won't have to restart MS
// if anyone changes these values
Map<Short, Float> disableThresholdMap = new HashMap<Short, Float>();
Map<Short, Float> disableThresholdMap = new HashMap<>();
String cpuDisableThresholdString = ClusterCPUCapacityDisableThreshold.value().toString();
float cpuDisableThreshold = NumbersUtil.parseFloat(cpuDisableThresholdString, 0.85F);
@ -312,7 +312,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla
}
private List<Short> getCapacitiesForCheckingThreshold() {
List<Short> capacityList = new ArrayList<Short>();
List<Short> capacityList = new ArrayList<>();
capacityList.add(Capacity.CAPACITY_TYPE_CPU);
capacityList.add(Capacity.CAPACITY_TYPE_MEMORY);
return capacityList;
@ -339,7 +339,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla
}
List<Short> capacityList = getCapacitiesForCheckingThreshold();
List<Long> clustersCrossingThreshold = new ArrayList<Long>();
List<Long> clustersCrossingThreshold = new ArrayList<>();
ServiceOffering offering = vmProfile.getServiceOffering();
int cpu_requested = offering.getCpu() * offering.getSpeed();
@ -523,7 +523,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla
matchingClusters.addAll(hostDao.findClustersThatMatchHostTagRule(hostTagOnOffering));
if (matchingClusters.isEmpty()) {
logger.error(String.format("No suitable host found for the following compute offering tags [%s].", hostTagOnOffering));
logger.error("No suitable host found for the following compute offering tags [{}].", hostTagOnOffering);
throw new CloudRuntimeException("No suitable host found.");
}

View File

@ -149,7 +149,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc
public boolean implementVpc(final Vpc vpc, final DeployDestination dest, final ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException,
InsufficientCapacityException {
final Map<VirtualMachineProfile.Param, Object> params = new HashMap<VirtualMachineProfile.Param, Object>(1);
final Map<VirtualMachineProfile.Param, Object> params = new HashMap<>(1);
params.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true);
if (vpc.isRollingRestart()) {
@ -194,7 +194,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc
return false;
}
final Map<VirtualMachineProfile.Param, Object> params = new HashMap<VirtualMachineProfile.Param, Object>(1);
final Map<VirtualMachineProfile.Param, Object> params = new HashMap<>(1);
params.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true);
if (network.isRollingRestart()) {
@ -221,24 +221,58 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc
return true;
}
protected void configureGuestNetwork(final Network network, final List<DomainRouterVO> routers )
protected boolean configureGuestNetworkForRouter(final Network network,
final DomainRouterVO router) throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException {
if (!_networkMdl.isVmPartOfNetwork(router.getId(), network.getId())) {
final Map<VirtualMachineProfile.Param, Object> paramsForRouter = new HashMap<>(1);
if (network.getState() == State.Setup) {
paramsForRouter.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true);
}
if (!_vpcRouterMgr.addVpcRouterToGuestNetwork(router, network, paramsForRouter)) {
logger.error("Failed to add VPC router {} to guest network {}", router, network);
return false;
} else {
logger.debug("Successfully added VPC router {} to guest network {}", router, network);
return true;
}
}
return true;
}
protected void configureGuestNetwork(final Network network, final List<DomainRouterVO> routers)
throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException {
logger.info("Adding VPC routers to Guest Network: " + routers.size() + " to be added!");
logger.info("Adding VPC routers to Guest Network: {} to be added!", routers.size());
for (final DomainRouterVO router : routers) {
List<DomainRouterVO> backupRouters = new ArrayList<>();
List<DomainRouterVO> remainingRouters = new ArrayList<>();
for (DomainRouterVO router : routers) {
if (!_networkMdl.isVmPartOfNetwork(router.getId(), network.getId())) {
final Map<VirtualMachineProfile.Param, Object> paramsForRouter = new HashMap<VirtualMachineProfile.Param, Object>(1);
if (network.getState() == State.Setup) {
paramsForRouter.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true);
}
if (!_vpcRouterMgr.addVpcRouterToGuestNetwork(router, network, paramsForRouter)) {
logger.error("Failed to add VPC router " + router + " to guest network " + network);
if (router.getRedundantState().equals(DomainRouterVO.RedundantState.BACKUP)) {
backupRouters.add(router);
} else {
logger.debug("Successfully added VPC router " + router + " to guest network " + network);
remainingRouters.add(router);
}
}
}
for (final DomainRouterVO router : backupRouters) {
if (network.getState() != State.Setup) {
if (!_vpcRouterMgr.stopKeepAlivedOnRouter(router, network)) {
logger.error("Failed to stop keepalived on VPC router {} to guest network {}", router, network);
} else {
logger.debug("Successfully stopped keepalived on VPC router {} to guest network {}", router, network);
}
}
}
for (final DomainRouterVO router : remainingRouters) {
configureGuestNetworkForRouter(network, router);
}
for (final DomainRouterVO router : backupRouters) {
if (!configureGuestNetworkForRouter(network, router) && !_vpcRouterMgr.startKeepAlivedOnRouter(router, network)) {
logger.error("Failed to start keepalived on VPC router {} to guest network {}", router, network);
}
}
}
@Override
@ -258,7 +292,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc
}
if (vm.getType() == VirtualMachine.Type.User) {
final Map<VirtualMachineProfile.Param, Object> params = new HashMap<VirtualMachineProfile.Param, Object>(1);
final Map<VirtualMachineProfile.Param, Object> params = new HashMap<>(1);
params.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true);
final RouterDeploymentDefinition routerDeploymentDefinition = routerDeploymentDefinitionBuilder.create()
@ -283,30 +317,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc
@Override
public boolean shutdown(final Network network, final ReservationContext context, final boolean cleanup) throws ConcurrentOperationException, ResourceUnavailableException {
final Long vpcId = network.getVpcId();
if (vpcId == null) {
logger.debug("Network " + network + " doesn't belong to any vpc, so skipping unplug nic part");
return true;
}
boolean success = true;
final List<? extends VirtualRouter> routers = _routerDao.listByVpcId(vpcId);
for (final VirtualRouter router : routers) {
// 1) Check if router is already a part of the network
if (!_networkMdl.isVmPartOfNetwork(router.getId(), network.getId())) {
logger.debug("Router " + router + " is not a part the network " + network);
continue;
}
// 2) Call unplugNics in the network service
success = success && _vpcRouterMgr.removeVpcRouterFromGuestNetwork(router, network);
if (!success) {
logger.warn("Failed to unplug nic in network " + network + " for virtual router " + router);
} else {
logger.debug("Successfully unplugged nic in network " + network + " for virtual router " + router);
}
}
return success;
return destroy(network, context);
}
@Override
@ -385,16 +396,16 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc
}
private static Map<Service, Map<Capability, String>> setCapabilities() {
final Map<Service, Map<Capability, String>> capabilities = new HashMap<Service, Map<Capability, String>>();
final Map<Service, Map<Capability, String>> capabilities = new HashMap<>();
capabilities.putAll(VirtualRouterElement.capabilities);
final Map<Capability, String> sourceNatCapabilities = new HashMap<Capability, String>();
final Map<Capability, String> sourceNatCapabilities = new HashMap<>();
sourceNatCapabilities.putAll(capabilities.get(Service.SourceNat));
// TODO This kind of logic is already placed in the DB
sourceNatCapabilities.put(Capability.RedundantRouter, "true");
capabilities.put(Service.SourceNat, sourceNatCapabilities);
final Map<Capability, String> vpnCapabilities = new HashMap<Capability, String>();
final Map<Capability, String> vpnCapabilities = new HashMap<>();
vpnCapabilities.putAll(capabilities.get(Service.Vpn));
vpnCapabilities.put(Capability.VpnTypes, "s2svpn");
capabilities.put(Service.Vpn, vpnCapabilities);
@ -667,7 +678,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc
final NetworkTopology networkTopology = networkTopologyContext.retrieveNetworkTopology(dcVO);
String[] result = null;
final List<String> combinedResults = new ArrayList<String>();
final List<String> combinedResults = new ArrayList<>();
for (final DomainRouterVO domainRouterVO : routers) {
result = networkTopology.applyVpnUsers(vpn, users, domainRouterVO);
combinedResults.addAll(Arrays.asList(result));

View File

@ -30,6 +30,8 @@ import javax.naming.ConfigurationException;
import com.cloud.network.dao.NetworkDao;
import com.cloud.network.vpc.dao.VpcDao;
import org.apache.cloudstack.agent.routing.ManageServiceCommand;
import com.cloud.agent.api.routing.NetworkElementCommand;
import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Component;
@ -231,6 +233,54 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian
return result;
}
@Override
public boolean stopKeepAlivedOnRouter(VirtualRouter router,
Network network) throws ConcurrentOperationException, ResourceUnavailableException {
return manageKeepalivedServiceOnRouter(router, network, "stop");
}
@Override
public boolean startKeepAlivedOnRouter(VirtualRouter router,
Network network) throws ConcurrentOperationException, ResourceUnavailableException {
return manageKeepalivedServiceOnRouter(router, network, "start");
}
private boolean manageKeepalivedServiceOnRouter(VirtualRouter router,
Network network, String action) throws ConcurrentOperationException, ResourceUnavailableException {
if (network.getTrafficType() != TrafficType.Guest) {
logger.warn("Network {} is not of type {}", network, TrafficType.Guest);
return false;
}
boolean result = true;
try {
if (router.getState() == State.Running) {
final ManageServiceCommand stopCommand = new ManageServiceCommand("keepalived", action);
stopCommand.setAccessDetail(NetworkElementCommand.ROUTER_IP, _routerControlHelper.getRouterControlIp(router.getId()));
final Commands cmds = new Commands(Command.OnError.Stop);
cmds.addCommand("manageKeepalived", stopCommand);
_nwHelper.sendCommandsToRouter(router, cmds);
final Answer setupAnswer = cmds.getAnswer("manageKeepalived");
if (!(setupAnswer != null && setupAnswer.getResult())) {
logger.warn("Unable to {} keepalived on router {}", action, router);
result = false;
}
} else if (router.getState() == State.Stopped || router.getState() == State.Stopping) {
logger.debug("Router {} is in {}, so not sending command to the backend", router.getInstanceName(), router.getState());
} else {
String message = "Unable to " + action + " keepalived on virtual router [" + router + "] is not in the right state " + router.getState();
logger.warn(message);
throw new ResourceUnavailableException(message, DataCenter.class, router.getDataCenterId());
}
} catch (final Exception ex) {
logger.warn("Failed to {} keepalived on router {} to network {} due to {}", action, router, network, ex.getLocalizedMessage());
logger.debug("Failed to {} keepalived on router {} to network {}", action, router, network, ex);
result = false;
}
return result;
}
protected boolean setupVpcGuestNetwork(final Network network, final VirtualRouter router, final boolean add, final NicProfile guestNic) throws ConcurrentOperationException,
ResourceUnavailableException {

View File

@ -844,10 +844,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
protected StateMachine2<State, VirtualMachine.Event, VirtualMachine> _stateMachine;
static final String FOR_SYSTEMVMS = "forsystemvms";
static final ConfigKey<Integer> vmPasswordLength = new ConfigKey<Integer>("Advanced", Integer.class, "vm.password.length", "6", "Specifies the length of a randomly generated password", false);
static final ConfigKey<Integer> sshKeyLength = new ConfigKey<Integer>("Advanced", Integer.class, "ssh.key.length", "2048", "Specifies custom SSH key length (bit)", true, ConfigKey.Scope.Global);
static final ConfigKey<Boolean> humanReadableSizes = new ConfigKey<Boolean>("Advanced", Boolean.class, "display.human.readable.sizes", "true", "Enables outputting human readable byte sizes to logs and usage records.", false, ConfigKey.Scope.Global);
public static final ConfigKey<String> customCsIdentifier = new ConfigKey<String>("Advanced", String.class, "custom.cs.identifier", UUID.randomUUID().toString().split("-")[0].substring(4), "Custom identifier for the cloudstack installation", true, ConfigKey.Scope.Global);
static final ConfigKey<Integer> vmPasswordLength = new ConfigKey<>("Advanced", Integer.class, "vm.password.length", "6", "Specifies the length of a randomly generated password", false);
static final ConfigKey<Integer> sshKeyLength = new ConfigKey<>("Advanced", Integer.class, "ssh.key.length", "2048", "Specifies custom SSH key length (bit)", true, ConfigKey.Scope.Global);
static final ConfigKey<Boolean> humanReadableSizes = new ConfigKey<>("Advanced", Boolean.class, "display.human.readable.sizes", "true", "Enables outputting human readable byte sizes to logs and usage records.", false, ConfigKey.Scope.Global);
public static final ConfigKey<String> customCsIdentifier = new ConfigKey<>("Advanced", String.class, "custom.cs.identifier", UUID.randomUUID().toString().split("-")[0].substring(4), "Custom identifier for the cloudstack installation", true, ConfigKey.Scope.Global);
private static final VirtualMachine.Type []systemVmTypes = { VirtualMachine.Type.SecondaryStorageVm, VirtualMachine.Type.ConsoleProxy};
private static final List<HypervisorType> LIVE_MIGRATION_SUPPORTING_HYPERVISORS = List.of(HypervisorType.Hyperv, HypervisorType.KVM,
HypervisorType.LXC, HypervisorType.Ovm, HypervisorType.Ovm3, HypervisorType.Simulator, HypervisorType.VMware, HypervisorType.XenServer);
@ -1034,7 +1034,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
protected List<DeploymentPlanner> _planners;
private final List<HypervisorType> supportedHypervisors = new ArrayList<HypervisorType>();
private final List<HypervisorType> supportedHypervisors = new ArrayList<>();
public List<DeploymentPlanner> getPlanners() {
return _planners;
@ -1112,7 +1112,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
}
final String[] availableIds = TimeZone.getAvailableIDs();
_availableIdsMap = new HashMap<String, Boolean>(availableIds.length);
_availableIdsMap = new HashMap<>(availableIds.length);
for (final String id : availableIds) {
_availableIdsMap.put(id, true);
}
@ -1196,7 +1196,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
final Account caller = getCaller();
final List<Long> ids = cmd.getIds();
boolean result = true;
List<Long> permittedAccountIds = new ArrayList<Long>();
List<Long> permittedAccountIds = new ArrayList<>();
if (_accountService.isNormalUser(caller.getId()) || caller.getType() == Account.Type.PROJECT) {
permittedAccountIds.add(caller.getId());
@ -1211,8 +1211,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
_accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, false, sameOwnerEvents);
if (ids != null && events.size() < ids.size()) {
result = false;
return result;
return false;
}
_eventDao.archiveEvents(events);
return result;
@ -1223,7 +1222,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
final Account caller = getCaller();
final List<Long> ids = cmd.getIds();
boolean result = true;
List<Long> permittedAccountIds = new ArrayList<Long>();
List<Long> permittedAccountIds = new ArrayList<>();
if (_accountMgr.isNormalUser(caller.getId()) || caller.getType() == Account.Type.PROJECT) {
permittedAccountIds.add(caller.getId());
@ -1238,8 +1237,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
_accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, false, sameOwnerEvents);
if (ids != null && events.size() < ids.size()) {
result = false;
return result;
return false;
}
for (final EventVO event : events) {
_eventDao.remove(event.getId());
@ -1322,7 +1320,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
}
final Pair<List<ClusterVO>, Integer> result = _clusterDao.searchAndCount(sc, searchFilter);
return new Pair<List<? extends Cluster>, Integer>(result.first(), result.second());
return new Pair<>(result.first(), result.second());
}
private HypervisorType getHypervisorType(VMInstanceVO vm, StoragePool srcVolumePool) {
@ -1362,7 +1360,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
final Pair<List<HostVO>, Integer> result = searchForServers(cmd.getStartIndex(), cmd.getPageSizeVal(), name, type, state, zoneId, pod,
cluster, id, keyword, resourceState, haHosts, null, null);
return new Pair<List<? extends Host>, Integer>(result.first(), result.second());
return new Pair<>(result.first(), result.second());
}
protected Pair<Boolean, List<HostVO>> filterUefiHostsForMigration(List<HostVO> allHosts, List<HostVO> filteredHosts, VirtualMachine vm) {
@ -1596,20 +1594,17 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, HostAllocator.RETURN_UPTO_ALL, false);
}
if (suitableHosts != null && !suitableHosts.isEmpty()) {
if (CollectionUtils.isNotEmpty(suitableHosts)) {
break;
}
}
// re-order hosts by priority
_dpMgr.reorderHostsByPriority(plan.getHostPriorities(), suitableHosts);
if (logger.isDebugEnabled()) {
if (suitableHosts.isEmpty()) {
logger.debug("No suitable hosts found");
} else {
logger.debug("Hosts having capacity and suitable for migration: " + suitableHosts);
}
if (suitableHosts.isEmpty()) {
logger.warn("No suitable hosts found.");
} else {
logger.debug("Hosts having capacity and suitable for migration: {}", suitableHosts);
}
return new Ternary<>(otherHosts, suitableHosts, requiresStorageMotion);
@ -1660,9 +1655,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
StoragePool datastoreCluster = _poolDao.findById(srcVolumePool.getParent());
avoidPools.add(datastoreCluster);
}
abstractDataStoreClustersList((List<StoragePool>) allPools, new ArrayList<StoragePool>());
abstractDataStoreClustersList((List<StoragePool>) allPools, new ArrayList<>());
abstractDataStoreClustersList((List<StoragePool>) suitablePools, avoidPools);
return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
return new Pair<>(allPools, suitablePools);
}
@Override
@ -1694,13 +1689,13 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
}
// Volume must be attached to an instance for live migration.
List<? extends StoragePool> allPools = new ArrayList<StoragePool>();
List<? extends StoragePool> suitablePools = new ArrayList<StoragePool>();
List<? extends StoragePool> allPools = new ArrayList<>();
List<? extends StoragePool> suitablePools = new ArrayList<>();
// Volume must be in Ready state to be migrated.
if (!Volume.State.Ready.equals(volume.getState())) {
logger.info("Volume " + volume + " must be in ready state for migration.");
return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
return new Pair<>(allPools, suitablePools);
}
final Long instanceId = volume.getInstanceId();
@ -1736,7 +1731,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
if (!storageMotionSupported) {
logger.info("Volume " + volume + " is attached to a running vm and the hypervisor doesn't support" + " storage motion.");
return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
return new Pair<>(allPools, suitablePools);
}
}
@ -1759,7 +1754,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
}
removeDataStoreClusterParents((List<StoragePool>) allPools);
removeDataStoreClusterParents((List<StoragePool>) suitablePools);
return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
return new Pair<>(allPools, suitablePools);
}
private void removeDataStoreClusterParents(List<StoragePool> storagePools) {
@ -2034,7 +2029,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
}
final Pair<List<HostPodVO>, Integer> result = _hostPodDao.searchAndCount(sc, searchFilter);
return new Pair<List<? extends Pod>, Integer>(result.first(), result.second());
return new Pair<>(result.first(), result.second());
}
@Override
@ -2172,7 +2167,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
}
final Pair<List<VlanVO>, Integer> result = _vlanDao.searchAndCount(sc, searchFilter);
return new Pair<List<? extends Vlan>, Integer>(result.first(), result.second());
return new Pair<>(result.first(), result.second());
}
@Override
@ -2305,7 +2300,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
if (scope != null && !scope.isEmpty()) {
// Populate values corresponding the resource id
final List<ConfigurationVO> configVOList = new ArrayList<ConfigurationVO>();
final List<ConfigurationVO> configVOList = new ArrayList<>();
for (final ConfigurationVO param : result.first()) {
final ConfigurationVO configVo = _configDao.findByName(param.getName());
if (configVo != null) {
@ -2327,10 +2322,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
}
}
return new Pair<List<? extends Configuration>, Integer>(configVOList, configVOList.size());
return new Pair<>(configVOList, configVOList.size());
}
return new Pair<List<? extends Configuration>, Integer>(result.first(), result.second());
return new Pair<>(result.first(), result.second());
}
@Override
@ -2344,7 +2339,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
}
final Pair<List<ConfigurationGroupVO>, Integer> result = _configGroupDao.searchAndCount(sc, searchFilter);
return new Pair<List<? extends ConfigurationGroup>, Integer>(result.first(), result.second());
return new Pair<>(result.first(), result.second());
}
@Override
@ -2556,7 +2551,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
Collections.sort(addrs, Comparator.comparing(IPAddressVO::getAddress));
List<? extends IpAddress> wPagination = com.cloud.utils.StringUtils.applyPagination(addrs, cmd.getStartIndex(), cmd.getPageSizeVal());
if (wPagination != null) {
return new Pair<List<? extends IpAddress>, Integer>(wPagination, addrs.size());
return new Pair<>(wPagination, addrs.size());
}
return new Pair<>(addrs, addrs.size());
}
@ -2728,7 +2723,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
}
final Pair<List<GuestOSCategoryVO>, Integer> result = _guestOSCategoryDao.searchAndCount(sc, searchFilter);
return new Pair<List<? extends GuestOsCategory>, Integer>(result.first(), result.second());
return new Pair<>(result.first(), result.second());
}
@Override
@ -2781,7 +2776,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
}
final Pair<List<GuestOSHypervisorVO>, Integer> result = _guestOSHypervisorDao.searchAndCount(sc, searchFilter);
return new Pair<List<? extends GuestOSHypervisor>, Integer>(result.first(), result.second());
return new Pair<>(result.first(), result.second());
}
@Override
@ -3147,7 +3142,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
public Pair<String, Integer> getVncPort(final VirtualMachine vm) {
if (vm.getHostId() == null) {
logger.warn("VM " + vm.getHostName() + " does not have host, return -1 for its VNC port");
return new Pair<String, Integer>(null, -1);
return new Pair<>(null, -1);
}
if (logger.isTraceEnabled()) {
@ -3161,10 +3156,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
answer = (GetVncPortAnswer)_agentMgr.easySend(vm.getHostId(), new GetVncPortCommand(vm.getId(), vm.getInstanceName()));
}
if (answer != null && answer.getResult()) {
return new Pair<String, Integer>(answer.getAddress(), answer.getPort());
return new Pair<>(answer.getAddress(), answer.getPort());
}
return new Pair<String, Integer>(null, -1);
return new Pair<>(null, -1);
}
@Override
@ -3202,21 +3197,19 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
sc.addAnd("archived", SearchCriteria.Op.EQ, false);
final Pair<List<AlertVO>, Integer> result = _alertDao.searchAndCount(sc, searchFilter);
return new Pair<List<? extends Alert>, Integer>(result.first(), result.second());
return new Pair<>(result.first(), result.second());
}
@Override
public boolean archiveAlerts(final ArchiveAlertsCmd cmd) {
final Long zoneId = _accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), null);
final boolean result = _alertDao.archiveAlert(cmd.getIds(), cmd.getType(), cmd.getStartDate(), cmd.getEndDate(), zoneId);
return result;
return _alertDao.archiveAlert(cmd.getIds(), cmd.getType(), cmd.getStartDate(), cmd.getEndDate(), zoneId);
}
@Override
public boolean deleteAlerts(final DeleteAlertsCmd cmd) {
final Long zoneId = _accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), null);
final boolean result = _alertDao.deleteAlert(cmd.getIds(), cmd.getType(), cmd.getStartDate(), cmd.getEndDate(), zoneId);
return result;
return _alertDao.deleteAlert(cmd.getIds(), cmd.getType(), cmd.getStartDate(), cmd.getEndDate(), zoneId);
}
Pair<Boolean, List<Long>> getHostIdsForCapacityListing(Long zoneId, Long podId, Long clusterId, Integer capacityType, String tag) {
@ -3482,7 +3475,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
@Override
public List<Class<?>> getCommands() {
final List<Class<?>> cmdList = new ArrayList<Class<?>>();
final List<Class<?>> cmdList = new ArrayList<>();
cmdList.add(CreateAccountCmd.class);
cmdList.add(DeleteAccountCmd.class);
cmdList.add(DisableAccountCmd.class);
@ -4251,7 +4244,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
}
final Pair<List<VMInstanceVO>, Integer> result = _vmInstanceDao.searchAndCount(sc, searchFilter);
return new Pair<List<? extends VirtualMachine>, Integer>(result.first(), result.second());
return new Pair<>(result.first(), result.second());
}
@Override
@ -4417,7 +4410,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
logger.warn("Exception whilst creating a signature:" + e);
}
final ArrayList<String> cloudParams = new ArrayList<String>();
final ArrayList<String> cloudParams = new ArrayList<>();
cloudParams.add(cloudIdentifier);
cloudParams.add(signature);
@ -4426,7 +4419,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
@Override
public Map<String, Object> listCapabilities(final ListCapabilitiesCmd cmd) {
final Map<String, Object> capabilities = new HashMap<String, Object>();
final Map<String, Object> capabilities = new HashMap<>();
final Account caller = getCaller();
boolean securityGroupsEnabled = false;
@ -4598,7 +4591,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
@Override
public List<String> getHypervisors(final Long zoneId) {
final List<String> result = new ArrayList<String>();
final List<String> result = new ArrayList<>();
final String hypers = _configDao.getValue(Config.HypervisorList.key());
final String[] hypervisors = hypers.split(",");
@ -4810,9 +4803,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
final String keyword = cmd.getKeyword();
final Account caller = getCaller();
final List<Long> permittedAccounts = new ArrayList<Long>();
final List<Long> permittedAccounts = new ArrayList<>();
final Ternary<Long, Boolean, ListProjectResourcesCriteria> domainIdRecursiveListProject = new Ternary<Long, Boolean, ListProjectResourcesCriteria>(cmd.getDomainId(), cmd.isRecursive(), null);
final Ternary<Long, Boolean, ListProjectResourcesCriteria> domainIdRecursiveListProject = new Ternary<>(cmd.getDomainId(), cmd.isRecursive(), null);
_accountMgr.buildACLSearchParameters(caller, null, cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, domainIdRecursiveListProject, cmd.listAll(), false);
final Long domainId = domainIdRecursiveListProject.first();
final Boolean isRecursive = domainIdRecursiveListProject.second();
@ -5155,7 +5148,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
}
final Pair<List<HypervisorCapabilitiesVO>, Integer> result = _hypervisorCapabilitiesDao.searchAndCount(sc, searchFilter);
return new Pair<List<? extends HypervisorCapabilities>, Integer>(result.first(), result.second());
return new Pair<>(result.first(), result.second());
}
@Override
@ -5302,7 +5295,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
@Override
public List<String> listDeploymentPlanners() {
final List<String> plannersAvailable = new ArrayList<String>();
final List<String> plannersAvailable = new ArrayList<>();
for (final DeploymentPlanner planner : _planners) {
plannersAvailable.add(planner.getName());
}

View File

@ -1670,7 +1670,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
}
List<DataStore> stores = _dataStoreMgr.listImageStores();
ConcurrentHashMap<Long, StorageStats> storageStats = new ConcurrentHashMap<Long, StorageStats>();
ConcurrentHashMap<Long, StorageStats> storageStats = new ConcurrentHashMap<>();
for (DataStore store : stores) {
if (store.getUri() == null) {
continue;
@ -1690,7 +1690,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
logger.trace("HostId: " + storeId + " Used: " + toHumanReadableSize(((StorageStats)answer).getByteUsed()) + " Total Available: " + toHumanReadableSize(((StorageStats)answer).getCapacityBytes()));
}
}
_storageStats = storageStats;
updateStorageStats(storageStats);
ConcurrentHashMap<Long, StorageStats> storagePoolStats = new ConcurrentHashMap<Long, StorageStats>();
List<StoragePoolVO> storagePools = _storagePoolDao.listAll();
@ -1740,6 +1740,19 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
logger.error("Error trying to retrieve storage stats", t);
}
}
private void updateStorageStats(ConcurrentHashMap<Long, StorageStats> storageStats) {
for (Long storeId : storageStats.keySet()) {
if (_storageStats.containsKey(storeId)
&& (_storageStats.get(storeId).getCapacityBytes() == 0l
|| _storageStats.get(storeId).getCapacityBytes() != storageStats.get(storeId).getCapacityBytes())) {
// get add to DB rigorously
_storageManager.updateImageStoreStatus(storeId, null, null, storageStats.get(storeId).getCapacityBytes());
}
}
// if in _storageStats and not in storageStats it gets discarded
_storageStats = storageStats;
}
}
class AutoScaleMonitor extends ManagedContextRunnable {

View File

@ -62,6 +62,7 @@ import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.UpdateObjectStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.heuristics.CreateSecondaryStorageSelectorCmd;
import org.apache.cloudstack.api.command.admin.storage.heuristics.RemoveSecondaryStorageSelectorCmd;
@ -138,7 +139,6 @@ import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.time.DateUtils;
import org.apache.commons.lang3.EnumUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.stereotype.Component;
import com.cloud.agent.AgentManager;
@ -234,6 +234,7 @@ import com.cloud.utils.DateUtil;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.Pair;
import com.cloud.utils.UriUtils;
import com.cloud.utils.StringUtils;
import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.component.ManagerBase;
import com.cloud.utils.concurrency.NamedThreadFactory;
@ -407,7 +408,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
int _downloadUrlExpirationInterval;
private long _serverId;
private final Map<String, HypervisorHostListener> hostListeners = new HashMap<String, HypervisorHostListener>();
private final Map<String, HypervisorHostListener> hostListeners = new HashMap<>();
public boolean share(VMInstanceVO vm, List<VolumeVO> vols, HostVO host, boolean cancelPreviousShare) throws StorageUnavailableException {
@ -465,7 +466,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@Override
public List<StoragePoolVO> ListByDataCenterHypervisor(long datacenterId, HypervisorType type) {
List<StoragePoolVO> pools = _storagePoolDao.listByDataCenterId(datacenterId);
List<StoragePoolVO> retPools = new ArrayList<StoragePoolVO>();
List<StoragePoolVO> retPools = new ArrayList<>();
for (StoragePoolVO pool : pools) {
if (pool.getStatus() != StoragePoolStatus.Up) {
continue;
@ -571,7 +572,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName());
DataStoreDriver storeDriver = storeProvider.getDataStoreDriver();
PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver) storeDriver;
HashMap<String, VolumeStatsEntry> statEntry = new HashMap<String, VolumeStatsEntry>();
HashMap<String, VolumeStatsEntry> statEntry = new HashMap<>();
GetVolumeStatsCommand getVolumeStatsCommand = (GetVolumeStatsCommand) cmd;
for (String volumeUuid : getVolumeStatsCommand.getVolumeUuids()) {
Pair<Long, Long> volumeStats = primaryStoreDriver.getVolumeStats(pool, volumeUuid);
@ -793,7 +794,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
DataStoreProvider provider = _dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider();
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
if (pool == null) {
Map<String, Object> params = new HashMap<String, Object>();
Map<String, Object> params = new HashMap<>();
String name = pInfo.getName() != null ? pInfo.getName() : createLocalStoragePoolName(host, pInfo);
params.put("zoneId", host.getDataCenterId());
params.put("clusterId", host.getClusterId());
@ -909,7 +910,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId);
}
Map<String, Object> params = new HashMap<String, Object>();
Map<String, Object> params = new HashMap<>();
params.put("zoneId", zone.getId());
params.put("clusterId", clusterId);
params.put("podId", podId);
@ -1031,7 +1032,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
private Map<String, String> extractApiParamAsMap(Map ds) {
Map<String, String> details = new HashMap<String, String>();
Map<String, String> details = new HashMap<>();
if (ds != null) {
Collection detailsCollection = ds.values();
Iterator it = detailsCollection.iterator();
@ -1442,13 +1443,13 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
for (Long hostId : hostIds) {
try {
List<Answer> answers = new ArrayList<Answer>();
List<Answer> answers = new ArrayList<>();
Command[] cmdArray = cmds.toCommands();
for (Command cmd : cmdArray) {
long targetHostId = _hvGuruMgr.getGuruProcessedCommandTargetHost(hostId, cmd);
answers.add(_agentMgr.send(targetHostId, cmd));
}
return new Pair<Long, Answer[]>(hostId, answers.toArray(new Answer[answers.size()]));
return new Pair<>(hostId, answers.toArray(new Answer[answers.size()]));
} catch (AgentUnavailableException e) {
logger.debug("Unable to send storage pool command to " + pool + " via " + hostId, e);
} catch (OperationTimedoutException e) {
@ -1463,7 +1464,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
public Pair<Long, Answer> sendToPool(StoragePool pool, long[] hostIdsToTryFirst, List<Long> hostIdsToAvoid, Command cmd) throws StorageUnavailableException {
Commands cmds = new Commands(cmd);
Pair<Long, Answer[]> result = sendToPool(pool, hostIdsToTryFirst, hostIdsToAvoid, cmds);
return new Pair<Long, Answer>(result.first(), result.second()[0]);
return new Pair<>(result.first(), result.second()[0]);
}
private void cleanupInactiveTemplates() {
@ -1785,7 +1786,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@DB
List<Long> findAllVolumeIdInSnapshotTable(Long storeId) {
String sql = "SELECT volume_id from snapshots, snapshot_store_ref WHERE snapshots.id = snapshot_store_ref.snapshot_id and store_id=? GROUP BY volume_id";
List<Long> list = new ArrayList<Long>();
List<Long> list = new ArrayList<>();
try {
TransactionLegacy txn = TransactionLegacy.currentTxn();
ResultSet rs = null;
@ -1813,7 +1814,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
pstmt = txn.prepareAutoCloseStatement(sql);
pstmt.setLong(1, volumeId);
rs = pstmt.executeQuery();
List<String> list = new ArrayList<String>();
List<String> list = new ArrayList<>();
while (rs.next()) {
list.add(rs.getString(1));
}
@ -2062,7 +2063,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
if (!answer.getResult()) {
throw new CloudRuntimeException(String.format("Unable to process ModifyStoragePoolCommand for pool %s on the host %s due to ", pool.getUuid(), hostId, answer.getDetails()));
throw new CloudRuntimeException(String.format("Unable to process ModifyStoragePoolCommand for pool %s on the host %s due to %s", pool.getUuid(), hostId, answer.getDetails()));
}
assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" +
@ -2297,7 +2298,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
// Prepare for the syncvolumepath command
DataTO volTO = volFactory.getVolume(volume.getId()).getTO();
DiskTO disk = new DiskTO(volTO, volume.getDeviceId(), volume.getPath(), volume.getVolumeType());
Map<String, String> details = new HashMap<String, String>();
Map<String, String> details = new HashMap<>();
details.put(DiskTO.PROTOCOL_TYPE, Storage.StoragePoolType.DatastoreCluster.toString());
disk.setDetails(details);
@ -2416,7 +2417,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId);
}
List<Long> hosts = new ArrayList<Long>();
List<Long> hosts = new ArrayList<>();
if (hostId != null) {
hosts.add(hostId);
} else {
@ -2571,11 +2572,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName());
DataStoreDriver storeDriver = storeProvider.getDataStoreDriver();
if (storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canHostAccessStoragePool(host, pool)) {
return true;
}
return false;
return (storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canHostAccessStoragePool(host, pool));
}
@Override
@ -2894,7 +2891,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
if (CollectionUtils.isEmpty(volumes)) {
return false;
}
List<Pair<Volume, Answer>> answers = new ArrayList<Pair<Volume, Answer>>();
List<Pair<Volume, Answer>> answers = new ArrayList<>();
for (Pair<Volume, DiskProfile> volumeDiskProfilePair : volumes) {
Volume volume = volumeDiskProfilePair.first();
@ -3232,7 +3229,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
// Check if it's the only/first store in the zone
if (stores.size() == 0) {
List<HypervisorType> hypervisorTypes = _clusterDao.getAvailableHypervisorInZone(zoneId);
Set<HypervisorType> hypSet = new HashSet<HypervisorType>(hypervisorTypes);
Set<HypervisorType> hypSet = new HashSet<>(hypervisorTypes);
TransactionLegacy txn = TransactionLegacy.open("AutomaticTemplateRegister");
SystemVmTemplateRegistration systemVmTemplateRegistration = new SystemVmTemplateRegistration();
String filePath = null;
@ -3292,7 +3289,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
public ImageStore migrateToObjectStore(String name, String url, String providerName, Map<String, String> details) throws DiscoveryException, InvalidParameterValueException {
// check if current cloud is ready to migrate, we only support cloud with only NFS secondary storages
List<ImageStoreVO> imgStores = _imageStoreDao.listImageStores();
List<ImageStoreVO> nfsStores = new ArrayList<ImageStoreVO>();
List<ImageStoreVO> nfsStores = new ArrayList<>();
if (imgStores != null && imgStores.size() > 0) {
for (ImageStoreVO store : imgStores) {
if (!store.getProviderName().equals(DataStoreProvider.NFS_IMAGE)) {
@ -3322,20 +3319,38 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
return discoverImageStore(name, url, providerName, null, details);
}
@Override
public ImageStore updateImageStore(UpdateImageStoreCmd cmd) {
return updateImageStoreStatus(cmd.getId(), cmd.getName(), cmd.getReadonly(), cmd.getCapacityBytes());
}
@Override
@ActionEvent(eventType = EventTypes.EVENT_UPDATE_IMAGE_STORE_ACCESS_STATE,
eventDescription = "image store access updated")
public ImageStore updateImageStoreStatus(Long id, Boolean readonly) {
public ImageStore updateImageStoreStatus(Long id, String name, Boolean readonly, Long capacityBytes) {
// Input validation
ImageStoreVO imageStoreVO = _imageStoreDao.findById(id);
if (imageStoreVO == null) {
throw new IllegalArgumentException("Unable to find image store with ID: " + id);
}
imageStoreVO.setReadonly(readonly);
if (com.cloud.utils.StringUtils.isNotBlank(name)) {
imageStoreVO.setName(name);
}
if (capacityBytes != null) {
imageStoreVO.setTotalSize(capacityBytes);
}
if (readonly != null) {
imageStoreVO.setReadonly(readonly);
}
_imageStoreDao.update(id, imageStoreVO);
return imageStoreVO;
}
@Override
public ImageStore updateImageStoreStatus(Long id, Boolean readonly) {
return updateImageStoreStatus(id, null, readonly, null);
}
/**
* @param poolId - Storage pool id for pool to update.
* @param failOnChecks - If true, throw an error if pool type and state checks fail.
@ -3380,7 +3395,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
// find the host
List<Long> poolIds = new ArrayList<Long>();
List<Long> poolIds = new ArrayList<>();
poolIds.add(pool.getId());
List<Long> hosts = _storagePoolHostDao.findHostsConnectedToPools(poolIds);
if (hosts.size() > 0) {
@ -3417,7 +3432,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
VMTemplateZoneVO tmpltZone;
List<VMTemplateVO> allTemplates = _vmTemplateDao.listAll();
List<Long> dcIds = new ArrayList<Long>();
List<Long> dcIds = new ArrayList<>();
if (zoneId != null) {
dcIds.add(zoneId);
} else {
@ -3534,7 +3549,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
throw ex;
}
Map<String, Object> params = new HashMap<String, Object>();
Map<String, Object> params = new HashMap<>();
params.put("zoneId", dcId);
params.put("url", cmd.getUrl());
params.put("name", cmd.getUrl());
@ -3622,8 +3637,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
// Cleanup expired volume URLs
List<VolumeDataStoreVO> volumesOnImageStoreList = _volumeStoreDao.listVolumeDownloadUrls();
HashSet<Long> expiredVolumeIds = new HashSet<Long>();
HashSet<Long> activeVolumeIds = new HashSet<Long>();
HashSet<Long> expiredVolumeIds = new HashSet<>();
HashSet<Long> activeVolumeIds = new HashSet<>();
for (VolumeDataStoreVO volumeOnImageStore : volumesOnImageStoreList) {
long volumeId = volumeOnImageStore.getVolumeId();

View File

@ -1986,7 +1986,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
}
private Volume changeDiskOfferingForVolumeInternal(VolumeVO volume, Long newDiskOfferingId, Long newSize, Long newMinIops, Long newMaxIops, boolean autoMigrateVolume, boolean shrinkOk) throws ResourceAllocationException {
DiskOfferingVO existingDiskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
long existingDiskOfferingId = volume.getDiskOfferingId();
DiskOfferingVO existingDiskOffering = _diskOfferingDao.findByIdIncludingRemoved(existingDiskOfferingId);
DiskOfferingVO newDiskOffering = _diskOfferingDao.findById(newDiskOfferingId);
Integer newHypervisorSnapshotReserve = null;
@ -1998,6 +1999,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
Long[] updateNewMinIops = {newMinIops};
Long[] updateNewMaxIops = {newMaxIops};
Integer[] updateNewHypervisorSnapshotReserve = {newHypervisorSnapshotReserve};
volService.validateChangeDiskOfferingEncryptionType(existingDiskOfferingId, newDiskOfferingId);
validateVolumeResizeWithNewDiskOfferingAndLoad(volume, existingDiskOffering, newDiskOffering, updateNewSize, updateNewMinIops, updateNewMaxIops, updateNewHypervisorSnapshotReserve);
newSize = updateNewSize[0];
newMinIops = updateNewMinIops[0];

View File

@ -2137,12 +2137,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
throw new InvalidParameterValueException("Unable to Scale VM, since disk offering id associated with the old service offering is not same for new service offering");
}
DiskOfferingVO currentRootDiskOffering = _diskOfferingDao.findByIdIncludingRemoved(currentServiceOffering.getDiskOfferingId());
DiskOfferingVO newRootDiskOffering = _diskOfferingDao.findById(newServiceOffering.getDiskOfferingId());
if (currentRootDiskOffering.getEncrypt() != newRootDiskOffering.getEncrypt()) {
throw new InvalidParameterValueException("Cannot change volume encryption type via service offering change");
}
_volService.validateChangeDiskOfferingEncryptionType(currentServiceOffering.getDiskOfferingId(), newServiceOffering.getDiskOfferingId());
}
private void changeDiskOfferingForRootVolume(Long vmId, DiskOfferingVO newDiskOffering, Map<String, String> customParameters, Long zoneId) throws ResourceAllocationException {

View File

@ -691,34 +691,6 @@ public class UserVmManagerImplTest {
prepareAndRunResizeVolumeTest(2L, 10L, 20L, largerDisdkOffering, smallerDisdkOffering);
}
@Test
public void validateDiskOfferingCheckForEncryption1Test() {
ServiceOfferingVO currentOffering = prepareOfferingsForEncryptionValidation(1L, true);
ServiceOfferingVO newOffering = prepareOfferingsForEncryptionValidation(2L, true);
userVmManagerImpl.validateDiskOfferingChecks(currentOffering, newOffering);
}
@Test
public void validateDiskOfferingCheckForEncryption2Test() {
ServiceOfferingVO currentOffering = prepareOfferingsForEncryptionValidation(1L, false);
ServiceOfferingVO newOffering = prepareOfferingsForEncryptionValidation(2L, false);
userVmManagerImpl.validateDiskOfferingChecks(currentOffering, newOffering);
}
@Test (expected = InvalidParameterValueException.class)
public void validateDiskOfferingCheckForEncryptionFail1Test() {
ServiceOfferingVO currentOffering = prepareOfferingsForEncryptionValidation(1L, false);
ServiceOfferingVO newOffering = prepareOfferingsForEncryptionValidation(2L, true);
userVmManagerImpl.validateDiskOfferingChecks(currentOffering, newOffering);
}
@Test (expected = InvalidParameterValueException.class)
public void validateDiskOfferingCheckForEncryptionFail2Test() {
ServiceOfferingVO currentOffering = prepareOfferingsForEncryptionValidation(1L, true);
ServiceOfferingVO newOffering = prepareOfferingsForEncryptionValidation(2L, false);
userVmManagerImpl.validateDiskOfferingChecks(currentOffering, newOffering);
}
private void prepareAndRunResizeVolumeTest(Long expectedOfferingId, long expectedMinIops, long expectedMaxIops, DiskOfferingVO currentRootDiskOffering, DiskOfferingVO newRootDiskOffering) {
long rootVolumeId = 1l;
VolumeVO rootVolumeOfVm = Mockito.mock(VolumeVO.class);
@ -742,20 +714,6 @@ public class UserVmManagerImplTest {
return newRootDiskOffering;
}
private ServiceOfferingVO prepareOfferingsForEncryptionValidation(long diskOfferingId, boolean encryption) {
ServiceOfferingVO svcOffering = Mockito.mock(ServiceOfferingVO.class);
DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class);
Mockito.when(svcOffering.getDiskOfferingId()).thenReturn(diskOfferingId);
Mockito.when(diskOffering.getEncrypt()).thenReturn(encryption);
// Be aware - Multiple calls with the same disk offering ID could conflict
Mockito.when(diskOfferingDao.findByIdIncludingRemoved(diskOfferingId)).thenReturn(diskOffering);
Mockito.when(diskOfferingDao.findById(diskOfferingId)).thenReturn(diskOffering);
return svcOffering;
}
@Test (expected = CloudRuntimeException.class)
public void testUserDataDenyOverride() {
Long userDataId = 1L;

View File

@ -204,6 +204,20 @@ public class MockVpcVirtualNetworkApplianceManager extends ManagerBase implement
return false;
}
@Override
public boolean stopKeepAlivedOnRouter(VirtualRouter router,
Network network) throws ConcurrentOperationException, ResourceUnavailableException {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean startKeepAlivedOnRouter(VirtualRouter router,
Network network) throws ConcurrentOperationException, ResourceUnavailableException {
// TODO Auto-generated method stub
return false;
}
/* (non-Javadoc)
* @see com.cloud.network.router.VpcVirtualNetworkApplianceManager#destroyPrivateGateway(com.cloud.network.vpc.PrivateGateway, com.cloud.network.router.VirtualRouter)
*/

View File

@ -529,8 +529,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar
/**
* Get the default network for the secondary storage VM, based on the zone it is in. Delegates to
* either {@link #getDefaultNetworkForZone(DataCenter)} or {@link #getDefaultNetworkForAdvancedSGZone(DataCenter)},
* depending on the zone network type and whether or not security groups are enabled in the zone.
* either {@link #getDefaultNetworkForAdvancedZone(DataCenter)} or {@link #getDefaultNetworkForBasicZone(DataCenter)},
* depending on the zone network type and whether security groups are enabled in the zone.
* @param dc - The zone (DataCenter) of the secondary storage VM.
* @return The default network for use with the secondary storage VM.
*/

View File

@ -0,0 +1,67 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package org.apache.cloudstack.storage.resource;
import com.cloud.utils.script.Script;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
public class IpTablesHelper {
public static final Logger LOGGER = LogManager.getLogger(IpTablesHelper.class);
public static final String OUTPUT_CHAIN = "OUTPUT";
public static final String INPUT_CHAIN = "INPUT";
public static final String INSERT = " -I ";
public static final String APPEND = " -A ";
public static boolean needsAdding(String chain, String rule) {
Script command = new Script("/bin/bash", LOGGER);
command.add("-c");
command.add("iptables -C " + chain + " " + rule);
String commandOutput = command.execute();
boolean needsAdding = (commandOutput != null && commandOutput.contains("iptables: Bad rule (does a matching rule exist in that chain?)."));
LOGGER.debug(String.format("Rule [%s], %s need adding to [%s] : %s",
rule,
needsAdding ? "does indeed" : "doesn't",
chain,
commandOutput
));
return needsAdding;
}
public static String addConditionally(String chain, boolean insert, String rule, String errMsg) {
LOGGER.info(String.format("Adding rule [%s] to [%s] if required.", rule, chain));
if (needsAdding(chain, rule)) {
Script command = new Script("/bin/bash", LOGGER);
command.add("-c");
command.add("iptables" + (insert ? INSERT : APPEND) + chain + " " + rule);
String result = command.execute();
LOGGER.debug(String.format("Executed [%s] with result [%s]", command, result));
if (result != null) {
LOGGER.warn(String.format("%s , err = %s", errMsg, result));
return errMsg + result;
}
} else {
LOGGER.warn("Rule already defined in SVM: " + rule);
}
return null;
}
}

View File

@ -247,11 +247,11 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
private String _storageNetmask;
private String _storageGateway;
private String _nfsVersion;
private final List<String> nfsIps = new ArrayList<String>();
private final List<String> nfsIps = new ArrayList<>();
protected String _parent = "/mnt/SecStorage";
final private String _tmpltpp = "template.properties";
protected String createTemplateFromSnapshotXenScript;
private HashMap<String, UploadEntity> uploadEntityStateMap = new HashMap<String, UploadEntity>();
private HashMap<String, UploadEntity> uploadEntityStateMap = new HashMap<>();
private String _ssvmPSK = null;
private long processTimeout;
@ -2330,15 +2330,14 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
if (!_inSystemVM) {
return null;
}
Script command = new Script("/bin/bash", logger);
String intf = "eth1";
command.add("-c");
command.add("iptables -I OUTPUT -o " + intf + " -d " + destCidr + " -p tcp -m state --state NEW -m tcp -j ACCEPT");
String rule = String.format("-o %s -d %s -p tcp -m state --state NEW -m tcp -j ACCEPT", intf, destCidr);
String errMsg = String.format("Error in allowing outgoing to %s", destCidr);
String result = command.execute();
logger.info("Adding rule if required: {}", rule);
String result = IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN, true, rule, errMsg);
if (result != null) {
logger.warn("Error in allowing outgoing to " + destCidr + ", err=" + result);
return "Error in allowing outgoing to " + destCidr + ", err=" + result;
return result;
}
addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, destCidr);
@ -2875,13 +2874,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
if (result != null) {
logger.warn("Error in starting sshd service err=" + result);
}
command = new Script("/bin/bash", logger);
command.add("-c");
command.add("iptables -I INPUT -i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT");
result = command.execute();
if (result != null) {
logger.warn("Error in opening up ssh port err=" + result);
}
String rule = "-i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT";
IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN, true, rule, "Error in opening up ssh port");
}
private void addRouteToInternalIpOrCidr(String localgw, String eth1ip, String eth1mask, String destIpOrCidr) {

View File

@ -48,6 +48,7 @@ import org.apache.cloudstack.storage.command.DownloadCommand;
import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType;
import org.apache.cloudstack.storage.command.DownloadProgressCommand;
import org.apache.cloudstack.storage.command.DownloadProgressCommand.RequestType;
import org.apache.cloudstack.storage.resource.IpTablesHelper;
import org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource;
import org.apache.cloudstack.storage.resource.SecondaryStorageResource;
import org.apache.cloudstack.utils.security.ChecksumValue;
@ -1226,17 +1227,14 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager
}
private void blockOutgoingOnPrivate() {
Script command = new Script("/bin/bash", logger);
String intf = "eth1";
command.add("-c");
command.add("iptables -A OUTPUT -o " + intf + " -p tcp -m state --state NEW -m tcp --dport " + "80" + " -j REJECT;" + "iptables -A OUTPUT -o " + intf +
" -p tcp -m state --state NEW -m tcp --dport " + "443" + " -j REJECT;");
String result = command.execute();
if (result != null) {
logger.warn("Error in blocking outgoing to port 80/443 err=" + result);
return;
}
IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN
, false
, "-o " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE + " -p tcp -m state --state NEW -m tcp --dport 80 -j REJECT;"
, "Error in blocking outgoing to port 80");
IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN
, false
, "-o " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE + " -p tcp -m state --state NEW -m tcp --dport 443 -j REJECT;"
, "Error in blocking outgoing to port 443");
}
@Override
@ -1262,17 +1260,19 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager
if (result != null) {
logger.warn("Error in stopping httpd service err=" + result);
}
String port = Integer.toString(TemplateConstants.DEFAULT_TMPLT_COPY_PORT);
String intf = TemplateConstants.DEFAULT_TMPLT_COPY_INTF;
command = new Script("/bin/bash", logger);
command.add("-c");
command.add("iptables -I INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + port + " -j ACCEPT;" + "iptables -I INPUT -i " + intf +
" -p tcp -m state --state NEW -m tcp --dport " + "443" + " -j ACCEPT;");
result = command.execute();
result = IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN
, true
, "-i " + TemplateConstants.DEFAULT_TMPLT_COPY_INTF + " -p tcp -m state --state NEW -m tcp --dport " + TemplateConstants.DEFAULT_TMPLT_COPY_PORT + " -j ACCEPT"
, "Error in opening up apache2 port " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE);
if (result != null) {
return;
}
result = IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN
, true
, "-i " + TemplateConstants.DEFAULT_TMPLT_COPY_INTF + " -p tcp -m state --state NEW -m tcp --dport 443 -j ACCEPT;"
, "Error in opening up apache2 port 443");
if (result != null) {
logger.warn("Error in opening up apache2 port err=" + result);
return;
}

View File

@ -221,7 +221,7 @@ def save_iptables(command, iptables_file):
def execute2(command, wait=True):
""" Execute command """
logging.info("Executing: %s" % command)
logging.info("Executing2: %s" % command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
if wait:
p.wait()

View File

@ -0,0 +1,19 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
systemctl $1 $2

View File

@ -0,0 +1,191 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test cases for validating the Quota balance of accounts
"""
from marvin.cloudstackTestCase import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
class TestQuotaBalance(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestQuotaBalance, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.zone
# Create Account
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=cls.domain.id
)
cls._cleanup = [
cls.account,
]
cls.services["account"] = cls.account.name
if not is_config_suitable(apiclient=cls.apiclient, name='quota.enable.service', value='true'):
cls.debug("Quota service is not enabled, therefore the configuration `quota.enable.service` will be set to `true` and the management server will be restarted.")
Configurations.update(cls.apiclient, "quota.enable.service", "true")
cls.restartServer()
return
@classmethod
def restartServer(cls):
"""Restart management server"""
cls.debug("Restarting management server")
sshClient = SshClient(
cls.mgtSvrDetails["mgtSvrIp"],
22,
cls.mgtSvrDetails["user"],
cls.mgtSvrDetails["passwd"]
)
command = "service cloudstack-management restart"
sshClient.execute(command)
# Waits for management to come up in 5 mins, when it's up it will continue
timeout = time.time() + 300
while time.time() < timeout:
if cls.isManagementUp() is True:
time.sleep(30)
return
time.sleep(5)
return cls.fail("Management server did not come up, failing")
@classmethod
def isManagementUp(cls):
try:
cls.apiclient.listInfrastructure(listInfrastructure.listInfrastructureCmd())
return True
except Exception:
return False
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
self.tariffs = []
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
self.delete_tariffs()
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def delete_tariffs(self):
for tariff in self.tariffs:
cmd = quotaTariffDelete.quotaTariffDeleteCmd()
cmd.id = tariff.uuid
self.apiclient.quotaTariffDelete(cmd)
@attr(tags=["advanced", "smoke", "quota"], required_hardware="false")
def test_quota_balance(self):
"""
Test Quota balance
Validate the following
1. Add credits to an account
2. Create Quota tariff for the usage type 21 (VM_DISK_IO_READ)
3. Simulate quota usage by inserting a row in the `cloud_usage` table
4. Update the balance of the account by calling the API quotaUpdate
5. Verify the balance of the account according to the tariff created
"""
# Create quota tariff for the usage type 21 (VM_DISK_IO_READ)
cmd = quotaTariffCreate.quotaTariffCreateCmd()
cmd.name = 'Tariff'
cmd.value = '10'
cmd.usagetype = '21'
self.tariffs.append(self.apiclient.quotaTariffCreate(cmd))
# Add credits to the account
cmd = quotaCredits.quotaCreditsCmd()
cmd.account = self.account.name
cmd.domainid = self.domain.id
cmd.value = 100
self.apiclient.quotaCredits(cmd)
# Fetch account ID from account_uuid
account_id_select = f"SELECT id FROM account WHERE uuid = '{self.account.id}';"
self.debug(account_id_select)
qresultset = self.dbclient.execute(account_id_select)
account_id = qresultset[0][0]
# Fetch domain ID from domain_uuid
domain_id_select = f"SELECT id FROM `domain` d WHERE uuid = '{self.domain.id}';"
self.debug(domain_id_select)
qresultset = self.dbclient.execute(domain_id_select)
domain_id = qresultset[0][0]
# Fetch zone ID from zone_uuid
zone_id_select = f"SELECT id from data_center dc where dc.uuid = '{self.zone.id}';"
self.debug(zone_id_select)
qresultset = self.dbclient.execute(zone_id_select)
zone_id = qresultset[0][0]
start_date = datetime.datetime.now() + datetime.timedelta(seconds=1)
end_date = datetime.datetime.now() + datetime.timedelta(hours=1)
# Manually insert a usage regarding the usage type 21 (VM_DISK_IO_READ)
sql_query = (f"INSERT INTO cloud_usage.cloud_usage (zone_id,account_id,domain_id,description,usage_display,usage_type,raw_usage,vm_instance_id,vm_name,offering_id,template_id,"
f"usage_id,`type`,`size`,network_id,start_date,end_date,virtual_size,cpu_speed,cpu_cores,memory,quota_calculated,is_hidden,state)"
f" VALUES ('{zone_id}','{account_id}','{domain_id}','Test','1 Hrs',21,1,NULL,NULL,NULL,NULL,NULL,'VirtualMachine',NULL,NULL,'{start_date}','{end_date}',NULL,NULL,NULL,NULL,0,0,NULL);")
self.debug(sql_query)
self.dbclient.execute(sql_query)
# Update quota to calculate the balance of the account
cmd = quotaUpdate.quotaUpdateCmd()
self.apiclient.quotaUpdate(cmd)
# Retrieve the quota balance of the account
cmd = quotaBalance.quotaBalanceCmd()
cmd.domainid = self.account.domainid
cmd.account = self.account.name
response = self.apiclient.quotaBalance(cmd)
self.debug(f"The quota balance for the account {self.account.name} is {response.balance}.")
self.assertEqual(response.balance.startquota, 90, f"The `startQuota` response field is supposed to be 90 but was {response.balance.startquota}.")
return

View File

@ -1136,6 +1136,7 @@
"label.ipv6.subnets": "IPv6 Subnets",
"label.ip.addresses": "IP Addresses",
"label.iqn": "Target IQN",
"label.is.base64.encoded": "Base64 encoded",
"label.is.in.progress": "is in progress",
"label.is.shared": "Is shared",
"label.is2faenabled": "Is 2FA enabled",

View File

@ -142,6 +142,7 @@ export default {
subItem.key = subItem.tag ? (subItem.resourcetype + '-' + subItem.tag) : subItem.resourcetype
form[subItem.key] = subItem.max || -1
})
form[item.resourcetype] = item.max == null ? -1 : item.max
})
this.form = form
this.formRef.value.resetFields()

View File

@ -101,7 +101,25 @@ export default {
label: 'label.edit',
dataView: true,
popup: true,
args: ['description', 'clientid', 'redirecturi', 'secretkey', 'enabled']
args: ['description', 'clientid', 'redirecturi', 'secretkey']
},
{
api: 'updateOauthProvider',
icon: 'play-circle-outlined',
label: 'label.enable.provider',
message: 'message.confirm.enable.provider',
dataView: true,
defaultArgs: { enabled: true },
show: (record) => { return record.enabled === false }
},
{
api: 'updateOauthProvider',
icon: 'pause-circle-outlined',
label: 'label.disable.provider',
message: 'message.confirm.disable.provider',
dataView: true,
defaultArgs: { enabled: false },
show: (record) => { return record.enabled === true }
},
{
api: 'deleteOauthProvider',

View File

@ -97,21 +97,10 @@ export default {
},
{
api: 'updateImageStore',
icon: 'stop-outlined',
label: 'label.action.image.store.read.only',
message: 'message.action.secondary.storage.read.only',
icon: 'edit-outlined',
label: 'label.edit',
dataView: true,
defaultArgs: { readonly: true },
show: (record) => { return record.readonly === false }
},
{
api: 'updateImageStore',
icon: 'check-circle-outlined',
label: 'label.action.image.store.read.write',
message: 'message.action.secondary.storage.read.write',
dataView: true,
defaultArgs: { readonly: false },
show: (record) => { return record.readonly === true }
args: ['name', 'readonly', 'capacitybytes']
},
{
api: 'deleteImageStore',

View File

@ -492,6 +492,15 @@ export const fileSizeUtilPlugin = {
}
}
function isBase64 (str) {
try {
const decoded = new TextDecoder().decode(Uint8Array.from(atob(str), c => c.charCodeAt(0)))
return btoa(decoded) === str
} catch (err) {
return false
}
}
export const genericUtilPlugin = {
install (app) {
app.config.globalProperties.$isValidUuid = function (uuid) {
@ -500,8 +509,7 @@ export const genericUtilPlugin = {
}
app.config.globalProperties.$toBase64AndURIEncoded = function (text) {
const base64regex = /^([0-9a-zA-Z+/]{4})*(([0-9a-zA-Z+/]{2}==)|([0-9a-zA-Z+/]{3}=))?$/
if (base64regex.test(text)) {
if (isBase64(text)) {
return text
}
return encodeURIComponent(btoa(unescape(encodeURIComponent(text))))

View File

@ -705,7 +705,6 @@ export default {
},
getOkProps () {
if (this.selectedRowKeys.length > 0 && this.currentAction?.groupAction) {
return { props: { type: 'default' } }
} else {
return { props: { type: 'primary' } }
}

View File

@ -43,6 +43,9 @@
v-model:value="form.userdata"
:placeholder="apiParams.userdata.description"/>
</a-form-item>
<a-form-item name="isbase64" ref="isbase64" :label="$t('label.is.base64.encoded')">
<a-checkbox v-model:checked="form.isbase64"></a-checkbox>
</a-form-item>
<a-form-item name="params" ref="params">
<template #label>
<tooltip-label :title="$t('label.userdataparams')" :tooltip="apiParams.params.description"/>
@ -147,7 +150,9 @@ export default {
methods: {
initForm () {
this.formRef = ref()
this.form = reactive({})
this.form = reactive({
isbase64: false
})
this.rules = reactive({
name: [{ required: true, message: this.$t('message.error.name') }],
userdata: [{ required: true, message: this.$t('message.error.userdata') }]
@ -204,7 +209,7 @@ export default {
if (this.isValidValueForKey(values, 'account') && values.account.length > 0) {
params.account = values.account
}
params.userdata = this.$toBase64AndURIEncoded(values.userdata)
params.userdata = values.isbase64 ? values.userdata : this.$toBase64AndURIEncoded(values.userdata)
if (values.params != null && values.params.length > 0) {
var userdataparams = values.params.join(',')

View File

@ -429,9 +429,14 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna
cal.add(Calendar.MILLISECOND, -1);
endDate = cal.getTime().getTime();
} else {
endDate = cal.getTime().getTime(); // current time
cal.add(Calendar.MINUTE, -1 * _aggregationDuration);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
startDate = cal.getTime().getTime();
cal.add(Calendar.MINUTE, _aggregationDuration);
cal.add(Calendar.MILLISECOND, -1);
endDate = cal.getTime().getTime();
}
parse(job, startDate, endDate);

View File

@ -372,12 +372,19 @@ public class Script implements Callable<String> {
//process completed successfully
if (_process.exitValue() == 0 || _process.exitValue() == exitValue) {
_logger.debug("Execution is successful.");
String result;
String method;
if (interpreter != null) {
return interpreter.drain() ? task.getResult() : interpreter.interpret(ir);
_logger.debug("interpreting the result...");
method = "result interpretation of execution: ";
result= interpreter.drain() ? task.getResult() : interpreter.interpret(ir);
} else {
// null return exitValue apparently
return String.valueOf(_process.exitValue());
method = "return code of execution: ";
result = String.valueOf(_process.exitValue());
}
_logger.debug(method + result);
return result;
} else { //process failed
break;
}