diff --git a/api/src/main/java/com/cloud/network/VpcVirtualNetworkApplianceService.java b/api/src/main/java/com/cloud/network/VpcVirtualNetworkApplianceService.java index 5c3ee3f1032..cd04db802ca 100644 --- a/api/src/main/java/com/cloud/network/VpcVirtualNetworkApplianceService.java +++ b/api/src/main/java/com/cloud/network/VpcVirtualNetworkApplianceService.java @@ -29,7 +29,6 @@ public interface VpcVirtualNetworkApplianceService extends VirtualNetworkApplian /** * @param router * @param network - * @param isRedundant * @param params TODO * @return * @throws ConcurrentOperationException @@ -42,11 +41,30 @@ public interface VpcVirtualNetworkApplianceService extends VirtualNetworkApplian /** * @param router * @param network - * @param isRedundant * @return * @throws ConcurrentOperationException * @throws ResourceUnavailableException */ boolean removeVpcRouterFromGuestNetwork(VirtualRouter router, Network network) throws ConcurrentOperationException, ResourceUnavailableException; + + /** + * @param router + * @param network + * @return + * @throws ConcurrentOperationException + * @throws ResourceUnavailableException + */ + boolean stopKeepAlivedOnRouter(VirtualRouter router, Network network) throws ConcurrentOperationException, ResourceUnavailableException; + + + /** + * @param router + * @param network + * @return + * @throws ConcurrentOperationException + * @throws ResourceUnavailableException + */ + boolean startKeepAlivedOnRouter(VirtualRouter router, Network network) throws ConcurrentOperationException, ResourceUnavailableException; + } diff --git a/api/src/main/java/com/cloud/storage/StorageService.java b/api/src/main/java/com/cloud/storage/StorageService.java index c3609cfd8ee..77800d8955c 100644 --- a/api/src/main/java/com/cloud/storage/StorageService.java +++ b/api/src/main/java/com/cloud/storage/StorageService.java @@ -29,6 +29,7 @@ import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateObjectStoragePoolCmd; +import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; import com.cloud.exception.DiscoveryException; @@ -110,6 +111,8 @@ public interface StorageService { */ ImageStore migrateToObjectStore(String name, String url, String providerName, Map details) throws DiscoveryException; + ImageStore updateImageStore(UpdateImageStoreCmd cmd); + ImageStore updateImageStoreStatus(Long id, Boolean readonly); void updateStorageCapabilities(Long poolId, boolean failOnChecks); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java index bcc438b957b..0e1631a46ba 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java @@ -39,10 +39,17 @@ public class UpdateImageStoreCmd extends BaseCmd { @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = ImageStoreResponse.class, required = true, description = "Image Store UUID") private Long id; - @Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = true, description = "If set to true, it designates the corresponding image store to read-only, " + - "hence not considering them during storage migration") + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = false, description = "The new name for the Image Store.") + private String name; + + @Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = false, + description = "If set to true, it designates the corresponding image store to read-only, hence not considering them during storage migration") private Boolean readonly; + @Parameter(name = ApiConstants.CAPACITY_BYTES, type = CommandType.LONG, required = false, + description = "The number of bytes CloudStack can use on this image storage.\n\tNOTE: this will be overwritten by the StatsCollector as soon as there is a SSVM to query the storage.") + private Long capacityBytes; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -51,17 +58,25 @@ public class UpdateImageStoreCmd extends BaseCmd { return id; } + public String getName() { + return name; + } + public Boolean getReadonly() { return readonly; } + public Long getCapacityBytes() { + return capacityBytes; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @Override public void execute() { - ImageStore result = _storageService.updateImageStoreStatus(getId(), getReadonly()); + ImageStore result = _storageService.updateImageStore(this); ImageStoreResponse storeResponse = null; if (result != null) { storeResponse = _responseGenerator.createImageStoreResponse(result); diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java index 532963dbddc..ee44b6bc474 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java @@ -27,11 +27,11 @@ import com.google.gson.annotations.SerializedName; @EntityReference(value = ImageStore.class) public class ImageStoreResponse extends BaseResponseWithAnnotations { - @SerializedName("id") + @SerializedName(ApiConstants.ID) @Param(description = "the ID of the image store") private String id; - @SerializedName("zoneid") + @SerializedName(ApiConstants.ZONE_ID) @Param(description = "the Zone ID of the image store") private String zoneId; @@ -39,15 +39,15 @@ public class ImageStoreResponse extends BaseResponseWithAnnotations { @Param(description = "the Zone name of the image store") private String zoneName; - @SerializedName("name") + @SerializedName(ApiConstants.NAME) @Param(description = "the name of the image store") private String name; - @SerializedName("url") + @SerializedName(ApiConstants.URL) @Param(description = "the url of the image store") private String url; - @SerializedName("protocol") + @SerializedName(ApiConstants.PROTOCOL) @Param(description = "the protocol of the image store") private String protocol; @@ -55,11 +55,11 @@ public class ImageStoreResponse extends BaseResponseWithAnnotations { @Param(description = "the provider name of the image store") private String providerName; - @SerializedName("scope") + @SerializedName(ApiConstants.SCOPE) @Param(description = "the scope of the image store") private ScopeType scope; - @SerializedName("readonly") + @SerializedName(ApiConstants.READ_ONLY) @Param(description = "defines if store is read-only") private Boolean readonly; diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java index ebe5e9a7ec9..e435c838b7d 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java @@ -81,4 +81,5 @@ public class VRScripts { public static final String VR_UPDATE_INTERFACE_CONFIG = "update_interface_config.sh"; public static final String ROUTER_FILESYSTEM_WRITABLE_CHECK = "filesystem_writable_check.py"; + public static final String MANAGE_SERVICE = "manage_service.sh"; } diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java index 3c86b3a0dcc..4afac9b43cb 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java @@ -34,6 +34,7 @@ import java.util.concurrent.locks.ReentrantLock; import javax.naming.ConfigurationException; +import org.apache.cloudstack.agent.routing.ManageServiceCommand; import com.cloud.agent.api.routing.UpdateNetworkCommand; import com.cloud.agent.api.to.IpAddressTO; import com.cloud.network.router.VirtualRouter; @@ -144,6 +145,10 @@ public class VirtualRoutingResource { return execute((UpdateNetworkCommand) cmd); } + if (cmd instanceof ManageServiceCommand) { + return execute((ManageServiceCommand) cmd); + } + if (_vrAggregateCommandsSet.containsKey(routerName)) { _vrAggregateCommandsSet.get(routerName).add(cmd); aggregated = true; @@ -271,6 +276,20 @@ public class VirtualRoutingResource { return new Answer(cmd, new CloudRuntimeException("Failed to update interface mtu")); } + private Answer execute(ManageServiceCommand cmd) { + String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); + String args = cmd.getAction() + " " + cmd.getServiceName(); + ExecutionResult result = _vrDeployer.executeInVR(routerIp, VRScripts.MANAGE_SERVICE, args); + if (result.isSuccess()) { + return new Answer(cmd, true, + String.format("Successfully executed action: %s on service: %s. Details: %s", + cmd.getAction(), cmd.getServiceName(), result.getDetails())); + } else { + return new Answer(cmd, false, String.format("Failed to execute action: %s on service: %s. Details: %s", + cmd.getAction(), cmd.getServiceName(), result.getDetails())); + } + } + private ExecutionResult applyConfigToVR(String routerAccessIp, ConfigItem c) { return applyConfigToVR(routerAccessIp, c, VRScripts.VR_SCRIPT_EXEC_TIMEOUT); } diff --git a/core/src/main/java/org/apache/cloudstack/agent/routing/ManageServiceCommand.java b/core/src/main/java/org/apache/cloudstack/agent/routing/ManageServiceCommand.java new file mode 100644 index 00000000000..c83a5b69574 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/agent/routing/ManageServiceCommand.java @@ -0,0 +1,49 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.agent.routing; + +import com.cloud.agent.api.routing.NetworkElementCommand; + +public class ManageServiceCommand extends NetworkElementCommand { + + String serviceName; + String action; + + @Override + public boolean executeInSequence() { + return true; + } + + protected ManageServiceCommand() { + } + + public ManageServiceCommand(String serviceName, String action) { + this.serviceName = serviceName; + this.action = action; + } + + public String getServiceName() { + return serviceName; + } + + public String getAction() { + return action; + } +} diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java index 7c4d56e12b9..682473ec94f 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java @@ -121,4 +121,6 @@ public interface VolumeService { Pair checkAndRepairVolume(VolumeInfo volume); void checkAndRepairVolumeBasedOnConfig(DataObject dataObject, Host host); + + void validateChangeDiskOfferingEncryptionType(long existingDiskOfferingId, long newDiskOfferingId); } diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index daeb4b19a18..6653bb2bad3 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -368,6 +368,8 @@ public interface StorageManager extends StorageService { Long getDiskIopsWriteRate(ServiceOffering offering, DiskOffering diskOffering); + ImageStore updateImageStoreStatus(Long id, String name, Boolean readonly, Long capacityBytes); + void cleanupDownloadUrls(); void setDiskProfileThrottling(DiskProfile dskCh, ServiceOffering offering, DiskOffering diskOffering); diff --git a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDao.java b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDao.java index 51362cf885e..6b53e49764e 100644 --- a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDao.java +++ b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDao.java @@ -31,4 +31,6 @@ public interface DomainDetailsDao extends GenericDao { void deleteDetails(long domainId); void update(long domainId, Map details); + + String getActualValue(DomainDetailVO domainDetailVO); } diff --git a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java index dad3fe9ad1e..50097d154f5 100644 --- a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java @@ -24,6 +24,7 @@ import javax.inject.Inject; import com.cloud.domain.DomainDetailVO; import com.cloud.domain.DomainVO; +import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchBuilder; @@ -34,6 +35,7 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.ConfigKey.Scope; import org.apache.cloudstack.framework.config.ScopedConfigStorage; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.config.impl.ConfigurationVO; public class DomainDetailsDaoImpl extends GenericDaoBase implements DomainDetailsDao, ScopedConfigStorage { protected final SearchBuilder domainSearch; @@ -111,7 +113,7 @@ public class DomainDetailsDaoImpl extends GenericDaoBase i String enableDomainSettingsForChildDomain = _configDao.getValue("enable.domain.settings.for.child.domain"); if (!Boolean.parseBoolean(enableDomainSettingsForChildDomain)) { vo = findDetail(id, key.key()); - return vo == null ? null : vo.getValue(); + return vo == null ? null : getActualValue(vo); } DomainVO domain = _domainDao.findById(id); // if value is not configured in domain then check its parent domain till ROOT @@ -125,6 +127,15 @@ public class DomainDetailsDaoImpl extends GenericDaoBase i break; } } - return vo == null ? null : vo.getValue(); + return vo == null ? null : getActualValue(vo); + } + + @Override + public String getActualValue(DomainDetailVO domainDetailVO) { + ConfigurationVO configurationVO = _configDao.findByName(domainDetailVO.getName()); + if (configurationVO != null && configurationVO.isEncrypted()) { + return DBEncryptionUtil.decrypt(domainDetailVO.getValue()); + } + return domainDetailVO.getValue(); } } diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java index d086ad1dac1..48e63d8e2b5 100644 --- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java +++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java @@ -54,7 +54,7 @@ public interface ServiceOfferingDao extends GenericDao List listPublicByCpuAndMemory(Integer cpus, Integer memory); - ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId); - List listByHostTag(String tag); + + ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId, boolean includingRemoved); } diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java index 34ac7c47521..706dcdc1b7b 100644 --- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java @@ -282,10 +282,10 @@ public class ServiceOfferingDaoImpl extends GenericDaoBase sc = SearchComputeOfferingByComputeOnlyDiskOffering.create(); sc.setParameters("disk_offering_id", diskOfferingId); - List vos = listBy(sc); + List vos = includingRemoved ? listIncludingRemovedBy(sc) : listBy(sc); if (vos.size() == 0) { return null; } diff --git a/engine/schema/src/main/java/com/cloud/user/AccountDetailsDao.java b/engine/schema/src/main/java/com/cloud/user/AccountDetailsDao.java index f4534ee41ee..514433e8068 100644 --- a/engine/schema/src/main/java/com/cloud/user/AccountDetailsDao.java +++ b/engine/schema/src/main/java/com/cloud/user/AccountDetailsDao.java @@ -34,4 +34,6 @@ public interface AccountDetailsDao extends GenericDao { * they will get created */ void update(long accountId, Map details); + + String getActualValue(AccountDetailVO accountDetailVO); } diff --git a/engine/schema/src/main/java/com/cloud/user/AccountDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/user/AccountDetailsDaoImpl.java index 5451192fc6d..de562e27f9e 100644 --- a/engine/schema/src/main/java/com/cloud/user/AccountDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/user/AccountDetailsDaoImpl.java @@ -23,6 +23,7 @@ import java.util.Optional; import javax.inject.Inject; +import com.cloud.utils.crypt.DBEncryptionUtil; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.ConfigKey.Scope; import org.apache.cloudstack.framework.config.ScopedConfigStorage; @@ -40,6 +41,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.TransactionLegacy; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.config.impl.ConfigurationVO; public class AccountDetailsDaoImpl extends GenericDaoBase implements AccountDetailsDao, ScopedConfigStorage { protected final SearchBuilder accountSearch; @@ -119,7 +121,7 @@ public class AccountDetailsDaoImpl extends GenericDaoBase public String getConfigValue(long id, ConfigKey key) { // check if account level setting is configured AccountDetailVO vo = findDetail(id, key.key()); - String value = vo == null ? null : vo.getValue(); + String value = vo == null ? null : getActualValue(vo); if (value != null) { return value; } @@ -140,7 +142,7 @@ public class AccountDetailsDaoImpl extends GenericDaoBase while (domain != null) { DomainDetailVO domainVO = _domainDetailsDao.findDetail(domain.getId(), key.key()); if (domainVO != null) { - value = domainVO.getValue(); + value = _domainDetailsDao.getActualValue(domainVO); break; } else if (domain.getParent() != null) { domain = _domainDao.findById(domain.getParent()); @@ -152,4 +154,13 @@ public class AccountDetailsDaoImpl extends GenericDaoBase } return value; } + + @Override + public String getActualValue(AccountDetailVO accountDetailVO) { + ConfigurationVO configurationVO = _configDao.findByName(accountDetailVO.getName()); + if (configurationVO != null && configurationVO.isEncrypted()) { + return DBEncryptionUtil.decrypt(accountDetailVO.getValue()); + } + return accountDetailVO.getValue(); + } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 24a0db0b74a..136134698a9 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -32,7 +32,6 @@ import java.util.concurrent.ExecutionException; import javax.inject.Inject; -import com.cloud.storage.VolumeApiServiceImpl; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.command.user.volume.CheckAndRepairVolumeCmd; @@ -105,6 +104,7 @@ import com.cloud.configuration.Resource.ResourceType; import com.cloud.dc.dao.ClusterDao; import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.StorageAccessException; import com.cloud.host.Host; @@ -118,6 +118,7 @@ import com.cloud.org.Grouping.AllocationState; import com.cloud.resource.ResourceState; import com.cloud.server.ManagementService; import com.cloud.storage.CheckAndRepairVolumePayload; +import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.DataStoreRole; import com.cloud.storage.RegisterVolumePayload; import com.cloud.storage.ScopeType; @@ -130,6 +131,7 @@ import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; +import com.cloud.storage.VolumeApiServiceImpl; import com.cloud.storage.Volume.State; import com.cloud.storage.VolumeDetailVO; import com.cloud.storage.VolumeVO; @@ -215,7 +217,7 @@ public class VolumeServiceImpl implements VolumeService { @Inject private PassphraseDao passphraseDao; @Inject - private DiskOfferingDao diskOfferingDao; + protected DiskOfferingDao diskOfferingDao; public VolumeServiceImpl() { } @@ -290,12 +292,12 @@ public class VolumeServiceImpl implements VolumeService { @Override public AsyncCallFuture createVolumeAsync(VolumeInfo volume, DataStore dataStore) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); DataObject volumeOnStore = dataStore.create(volume); volumeOnStore.processEvent(Event.CreateOnlyRequested); try { - CreateVolumeContext context = new CreateVolumeContext(null, volumeOnStore, future); + CreateVolumeContext context = new CreateVolumeContext<>(null, volumeOnStore, future); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().createVolumeCallback(null, null)).setContext(context); @@ -371,7 +373,7 @@ public class VolumeServiceImpl implements VolumeService { @DB @Override public AsyncCallFuture expungeVolumeAsync(VolumeInfo volume) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); VolumeApiResult result = new VolumeApiResult(volume); if (volume.getDataStore() == null) { logger.info("Expunge volume with no data store specified"); @@ -427,7 +429,7 @@ public class VolumeServiceImpl implements VolumeService { volume.processEvent(Event.ExpungeRequested); } - DeleteVolumeContext context = new DeleteVolumeContext(null, vo, future); + DeleteVolumeContext context = new DeleteVolumeContext<>(null, vo, future); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().deleteVolumeCallback(null, null)).setContext(context); @@ -636,7 +638,7 @@ public class VolumeServiceImpl implements VolumeService { } } long templatePoolRefId = templatePoolRef.getId(); - CreateBaseImageContext context = new CreateBaseImageContext(null, volume, dataStore, template, future, templateOnPrimaryStoreObj, templatePoolRefId); + CreateBaseImageContext context = new CreateBaseImageContext<>(null, volume, dataStore, template, future, templateOnPrimaryStoreObj, templatePoolRefId); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().copyBaseImageCallback(null, null)).setContext(context); @@ -806,7 +808,7 @@ public class VolumeServiceImpl implements VolumeService { DataObject volumeOnPrimaryStorage = pd.create(volume, volume.getDeployAsIsConfiguration()); volumeOnPrimaryStorage.processEvent(Event.CreateOnlyRequested); - CreateVolumeFromBaseImageContext context = new CreateVolumeFromBaseImageContext(null, volumeOnPrimaryStorage, pd, templateOnPrimaryStore, future, null, volume.getDeployAsIsConfiguration()); + CreateVolumeFromBaseImageContext context = new CreateVolumeFromBaseImageContext<>(null, volumeOnPrimaryStorage, pd, templateOnPrimaryStore, future, null, volume.getDeployAsIsConfiguration()); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().createVolumeFromBaseImageCallBack(null, null)); caller.setContext(context); @@ -1174,7 +1176,7 @@ public class VolumeServiceImpl implements VolumeService { // Refresh the volume info from the DB. volumeInfo = volFactory.getVolume(volumeInfo.getId(), destPrimaryDataStore); - Map details = new HashMap(); + Map details = new HashMap<>(); details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); details.put(PrimaryDataStore.STORAGE_HOST, destPrimaryDataStore.getHostAddress()); details.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(destPrimaryDataStore.getPort())); @@ -1278,12 +1280,12 @@ public class VolumeServiceImpl implements VolumeService { // Refresh the volume info from the DB. volumeInfo = volFactory.getVolume(volumeInfo.getId(), primaryDataStore); - ManagedCreateBaseImageContext context = new ManagedCreateBaseImageContext(null, volumeInfo, primaryDataStore, srcTemplateInfo, future); + ManagedCreateBaseImageContext context = new ManagedCreateBaseImageContext<>(null, volumeInfo, primaryDataStore, srcTemplateInfo, future); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().managedCopyBaseImageCallback(null, null)).setContext(context); - Map details = new HashMap(); + Map details = new HashMap<>(); details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); details.put(PrimaryDataStore.STORAGE_HOST, primaryDataStore.getHostAddress()); @@ -1639,14 +1641,14 @@ public class VolumeServiceImpl implements VolumeService { @Override public AsyncCallFuture createVolumeFromSnapshot(VolumeInfo volume, DataStore store, SnapshotInfo snapshot) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); try { DataObject volumeOnStore = store.create(volume); volumeOnStore.processEvent(Event.CreateOnlyRequested); _volumeDetailsDao.addDetail(volume.getId(), SNAPSHOT_ID, Long.toString(snapshot.getId()), false); - CreateVolumeFromBaseImageContext context = new CreateVolumeFromBaseImageContext(null, volume, store, volumeOnStore, future, snapshot, null); + CreateVolumeFromBaseImageContext context = new CreateVolumeFromBaseImageContext<>(null, volume, store, volumeOnStore, future, snapshot, null); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().createVolumeFromSnapshotCallback(null, null)).setContext(context); motionSrv.copyAsync(snapshot, volumeOnStore, caller); @@ -1733,7 +1735,7 @@ public class VolumeServiceImpl implements VolumeService { } protected AsyncCallFuture copyVolumeFromImageToPrimary(VolumeInfo srcVolume, DataStore destStore) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); VolumeApiResult res = new VolumeApiResult(srcVolume); VolumeInfo destVolume = null; try { @@ -1741,7 +1743,7 @@ public class VolumeServiceImpl implements VolumeService { destVolume.processEvent(Event.CopyingRequested); srcVolume.processEvent(Event.CopyingRequested); - CopyVolumeContext context = new CopyVolumeContext(null, future, srcVolume, destVolume, destStore); + CopyVolumeContext context = new CopyVolumeContext<>(null, future, srcVolume, destVolume, destStore); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().copyVolumeFromImageToPrimaryCallback(null, null)).setContext(context); @@ -1787,7 +1789,7 @@ public class VolumeServiceImpl implements VolumeService { } protected AsyncCallFuture copyVolumeFromPrimaryToImage(VolumeInfo srcVolume, DataStore destStore) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); VolumeApiResult res = new VolumeApiResult(srcVolume); VolumeInfo destVolume = null; try { @@ -1795,7 +1797,7 @@ public class VolumeServiceImpl implements VolumeService { srcVolume.processEvent(Event.MigrationRequested); // this is just used for locking that src volume record in DB to avoid using lock destVolume.processEventOnly(Event.CreateOnlyRequested); - CopyVolumeContext context = new CopyVolumeContext(null, future, srcVolume, destVolume, destStore); + CopyVolumeContext context = new CopyVolumeContext<>(null, future, srcVolume, destVolume, destStore); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().copyVolumeFromPrimaryToImageCallback(null, null)).setContext(context); @@ -1868,7 +1870,7 @@ public class VolumeServiceImpl implements VolumeService { // OfflineVmwareMigration: aren't we missing secondary to secondary in this logic? - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); VolumeApiResult res = new VolumeApiResult(srcVolume); try { if (!snapshotMgr.canOperateOnVolume(srcVolume)) { @@ -1884,7 +1886,7 @@ public class VolumeServiceImpl implements VolumeService { destVolume.processEvent(Event.MigrationCopyRequested); srcVolume.processEvent(Event.MigrationRequested); - CopyVolumeContext context = new CopyVolumeContext(null, future, srcVolume, destVolume, destStore); + CopyVolumeContext context = new CopyVolumeContext<>(null, future, srcVolume, destVolume, destStore); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().copyVolumeCallBack(null, null)).setContext(context); motionSrv.copyAsync(srcVolume, destVolume, caller); @@ -2018,7 +2020,7 @@ public class VolumeServiceImpl implements VolumeService { } private AsyncCallFuture copyManagedVolume(VolumeInfo srcVolume, DataStore destStore) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); VolumeApiResult res = new VolumeApiResult(srcVolume); try { if (!snapshotMgr.canOperateOnVolume(srcVolume)) { @@ -2035,7 +2037,7 @@ public class VolumeServiceImpl implements VolumeService { return future; } - List poolIds = new ArrayList(); + List poolIds = new ArrayList<>(); poolIds.add(srcVolume.getPoolId()); poolIds.add(destStore.getId()); @@ -2067,7 +2069,7 @@ public class VolumeServiceImpl implements VolumeService { PrimaryDataStore srcPrimaryDataStore = (PrimaryDataStore) srcVolume.getDataStore(); if (srcPrimaryDataStore.isManaged()) { - Map srcPrimaryDataStoreDetails = new HashMap(); + Map srcPrimaryDataStoreDetails = new HashMap<>(); srcPrimaryDataStoreDetails.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); srcPrimaryDataStoreDetails.put(PrimaryDataStore.STORAGE_HOST, srcPrimaryDataStore.getHostAddress()); srcPrimaryDataStoreDetails.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(srcPrimaryDataStore.getPort())); @@ -2080,7 +2082,7 @@ public class VolumeServiceImpl implements VolumeService { } PrimaryDataStore destPrimaryDataStore = (PrimaryDataStore) destStore; - Map destPrimaryDataStoreDetails = new HashMap(); + Map destPrimaryDataStoreDetails = new HashMap<>(); destPrimaryDataStoreDetails.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); destPrimaryDataStoreDetails.put(PrimaryDataStore.STORAGE_HOST, destPrimaryDataStore.getHostAddress()); destPrimaryDataStoreDetails.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(destPrimaryDataStore.getPort())); @@ -2095,7 +2097,7 @@ public class VolumeServiceImpl implements VolumeService { destVolume.processEvent(Event.CreateRequested); srcVolume.processEvent(Event.MigrationRequested); - CopyManagedVolumeContext context = new CopyManagedVolumeContext(null, future, srcVolume, destVolume, hostWithPoolsAccess); + CopyManagedVolumeContext context = new CopyManagedVolumeContext<>(null, future, srcVolume, destVolume, hostWithPoolsAccess); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().copyManagedVolumeCallBack(null, null)).setContext(context); @@ -2233,7 +2235,7 @@ public class VolumeServiceImpl implements VolumeService { @Override public AsyncCallFuture migrateVolume(VolumeInfo srcVolume, DataStore destStore) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); VolumeApiResult res = new VolumeApiResult(srcVolume); try { if (!snapshotMgr.canOperateOnVolume(srcVolume)) { @@ -2245,7 +2247,7 @@ public class VolumeServiceImpl implements VolumeService { VolumeInfo destVolume = volFactory.getVolume(srcVolume.getId(), destStore); srcVolume.processEvent(Event.MigrationRequested); - MigrateVolumeContext context = new MigrateVolumeContext(null, future, srcVolume, destVolume, destStore); + MigrateVolumeContext context = new MigrateVolumeContext<>(null, future, srcVolume, destVolume, destStore); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().migrateVolumeCallBack(null, null)).setContext(context); motionSrv.copyAsync(srcVolume, destVolume, caller); @@ -2298,13 +2300,13 @@ public class VolumeServiceImpl implements VolumeService { @Override public AsyncCallFuture migrateVolumes(Map volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); CommandResult res = new CommandResult(); try { // Check to make sure there are no snapshot operations on a volume // and // put it in the migrating state. - List volumesMigrating = new ArrayList(); + List volumesMigrating = new ArrayList<>(); for (Map.Entry entry : volumeMap.entrySet()) { VolumeInfo volume = entry.getKey(); if (!snapshotMgr.canOperateOnVolume(volume)) { @@ -2324,7 +2326,7 @@ public class VolumeServiceImpl implements VolumeService { } } - MigrateVmWithVolumesContext context = new MigrateVmWithVolumesContext(null, future, volumeMap); + MigrateVmWithVolumesContext context = new MigrateVmWithVolumesContext<>(null, future, volumeMap); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().migrateVmWithVolumesCallBack(null, null)).setContext(context); motionSrv.copyAsync(volumeMap, vmTo, srcHost, destHost, caller); @@ -2371,13 +2373,13 @@ public class VolumeServiceImpl implements VolumeService { @Override public AsyncCallFuture registerVolume(VolumeInfo volume, DataStore store) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); DataObject volumeOnStore = store.create(volume); volumeOnStore.processEvent(Event.CreateOnlyRequested); try { - CreateVolumeContext context = new CreateVolumeContext(null, volumeOnStore, future); + CreateVolumeContext context = new CreateVolumeContext<>(null, volumeOnStore, future); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().registerVolumeCallback(null, null)); caller.setContext(context); @@ -2472,7 +2474,7 @@ public class VolumeServiceImpl implements VolumeService { @Override public AsyncCallFuture resize(VolumeInfo volume) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); VolumeApiResult result = new VolumeApiResult(volume); try { volume.processEvent(Event.ResizeRequested); @@ -2482,7 +2484,7 @@ public class VolumeServiceImpl implements VolumeService { future.complete(result); return future; } - CreateVolumeContext context = new CreateVolumeContext(null, volume, future); + CreateVolumeContext context = new CreateVolumeContext<>(null, volume, future); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().resizeVolumeCallback(caller, context)).setContext(context); @@ -2581,7 +2583,7 @@ public class VolumeServiceImpl implements VolumeService { // find all the db volumes including those with NULL url column to avoid accidentally deleting volumes on image store later. List dbVolumes = _volumeStoreDao.listByStoreId(storeId); - List toBeDownloaded = new ArrayList(dbVolumes); + List toBeDownloaded = new ArrayList<>(dbVolumes); for (VolumeDataStoreVO volumeStore : dbVolumes) { VolumeVO volume = volDao.findById(volumeStore.getVolumeId()); if (volume == null) { @@ -2797,6 +2799,16 @@ public class VolumeServiceImpl implements VolumeService { } } + @Override + public void validateChangeDiskOfferingEncryptionType(long existingDiskOfferingId, long newDiskOfferingId) { + DiskOfferingVO existingDiskOffering = diskOfferingDao.findByIdIncludingRemoved(existingDiskOfferingId); + DiskOfferingVO newDiskOffering = diskOfferingDao.findById(newDiskOfferingId); + + if (existingDiskOffering.getEncrypt() != newDiskOffering.getEncrypt()) { + throw new InvalidParameterValueException("Cannot change the encryption type of a volume, please check the selected offering"); + } + } + @Override public Pair checkAndRepairVolume(VolumeInfo volume) { Long poolId = volume.getPoolId(); diff --git a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java index 3a7fcfb6338..c4241dfbc3a 100644 --- a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java +++ b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java @@ -19,6 +19,23 @@ package org.apache.cloudstack.storage.volume; +import com.cloud.agent.api.storage.CheckAndRepairVolumeAnswer; +import com.cloud.agent.api.storage.CheckAndRepairVolumeCommand; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.StorageUnavailableException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.CheckAndRepairVolumePayload; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.utils.Pair; import java.util.ArrayList; import java.util.Arrays; @@ -39,21 +56,6 @@ import org.mockito.Mockito; import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; -import com.cloud.agent.api.storage.CheckAndRepairVolumeAnswer; -import com.cloud.agent.api.storage.CheckAndRepairVolumeCommand; -import com.cloud.agent.api.to.StorageFilerTO; -import com.cloud.exception.StorageUnavailableException; -import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; -import com.cloud.storage.CheckAndRepairVolumePayload; -import com.cloud.storage.Storage; -import com.cloud.storage.StorageManager; -import com.cloud.storage.StoragePool; -import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.storage.snapshot.SnapshotManager; -import com.cloud.utils.Pair; - import junit.framework.TestCase; @RunWith(MockitoJUnitRunner.class) @@ -92,6 +94,9 @@ public class VolumeServiceTest extends TestCase{ @Mock HostDao hostDaoMock; + @Mock + DiskOfferingDao diskOfferingDaoMock; + @Before public void setup(){ volumeServiceImplSpy = Mockito.spy(new VolumeServiceImpl()); @@ -100,6 +105,7 @@ public class VolumeServiceTest extends TestCase{ volumeServiceImplSpy.snapshotMgr = snapshotManagerMock; volumeServiceImplSpy._storageMgr = storageManagerMock; volumeServiceImplSpy._hostDao = hostDaoMock; + volumeServiceImplSpy.diskOfferingDao = diskOfferingDaoMock; } @Test(expected = InterruptedException.class) @@ -309,4 +315,40 @@ public class VolumeServiceTest extends TestCase{ Assert.assertEquals(null, result); } + + @Test + public void validateDiskOfferingCheckForEncryption1Test() { + prepareOfferingsForEncryptionValidation(1L, true); + prepareOfferingsForEncryptionValidation(2L, true); + volumeServiceImplSpy.validateChangeDiskOfferingEncryptionType(1L, 2L); + } + + @Test + public void validateDiskOfferingCheckForEncryption2Test() { + prepareOfferingsForEncryptionValidation(1L, false); + prepareOfferingsForEncryptionValidation(2L, false); + volumeServiceImplSpy.validateChangeDiskOfferingEncryptionType(1L, 2L); + } + + @Test (expected = InvalidParameterValueException.class) + public void validateDiskOfferingCheckForEncryptionFail1Test() { + prepareOfferingsForEncryptionValidation(1L, false); + prepareOfferingsForEncryptionValidation(2L, true); + volumeServiceImplSpy.validateChangeDiskOfferingEncryptionType(1L, 2L); + } + + @Test (expected = InvalidParameterValueException.class) + public void validateDiskOfferingCheckForEncryptionFail2Test() { + prepareOfferingsForEncryptionValidation(1L, true); + prepareOfferingsForEncryptionValidation(2L, false); + volumeServiceImplSpy.validateChangeDiskOfferingEncryptionType(1L, 2L); + } + + private void prepareOfferingsForEncryptionValidation(long diskOfferingId, boolean encryption) { + DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class); + + Mockito.when(diskOffering.getEncrypt()).thenReturn(encryption); + Mockito.when(diskOfferingDaoMock.findByIdIncludingRemoved(diskOfferingId)).thenReturn(diskOffering); + Mockito.when(diskOfferingDaoMock.findById(diskOfferingId)).thenReturn(diskOffering); + } } diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java index 9c15a47444a..ded35338aea 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java @@ -22,6 +22,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.HashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.TimeZone; @@ -54,6 +55,7 @@ import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.math.NumberUtils; +import org.apache.commons.lang3.time.DateUtils; import org.springframework.stereotype.Component; import com.cloud.usage.UsageVO; @@ -145,79 +147,81 @@ public class QuotaManagerImpl extends ManagerBase implements QuotaManager { return; } - QuotaUsageVO firstQuotaUsage = accountQuotaUsages.get(0); - Date startDate = firstQuotaUsage.getStartDate(); - Date endDate = firstQuotaUsage.getStartDate(); + Date startDate = accountQuotaUsages.get(0).getStartDate(); + Date endDate = accountQuotaUsages.get(0).getEndDate(); + Date lastQuotaUsageEndDate = accountQuotaUsages.get(accountQuotaUsages.size() - 1).getEndDate(); - logger.info("Processing quota balance for account [{}] between [{}] and [{}].", accountToString, - DateUtil.displayDateInTimezone(usageAggregationTimeZone, startDate), - DateUtil.displayDateInTimezone(usageAggregationTimeZone, accountQuotaUsages.get(accountQuotaUsages.size() - 1).getEndDate())); + LinkedHashSet> periods = accountQuotaUsages.stream() + .map(quotaUsageVO -> new Pair<>(quotaUsageVO.getStartDate(), quotaUsageVO.getEndDate())) + .collect(Collectors.toCollection(LinkedHashSet::new)); + + logger.info(String.format("Processing quota balance for account[{}] between [{}] and [{}].", accountToString, startDate, lastQuotaUsageEndDate)); - BigDecimal aggregatedUsage = BigDecimal.ZERO; long accountId = accountVo.getAccountId(); long domainId = accountVo.getDomainId(); + BigDecimal accountBalance = retrieveBalanceForUsageCalculation(accountId, domainId, startDate, accountToString); - aggregatedUsage = getUsageValueAccordingToLastQuotaUsageEntryAndLastQuotaBalance(accountId, domainId, startDate, endDate, aggregatedUsage, accountToString); + for (Pair period : periods) { + startDate = period.first(); + endDate = period.second(); - for (QuotaUsageVO quotaUsage : accountQuotaUsages) { - Date quotaUsageStartDate = quotaUsage.getStartDate(); - Date quotaUsageEndDate = quotaUsage.getEndDate(); - BigDecimal quotaUsed = quotaUsage.getQuotaUsed(); - - if (quotaUsed.equals(BigDecimal.ZERO)) { - aggregatedUsage = aggregatedUsage.add(aggregateCreditBetweenDates(accountId, domainId, quotaUsageStartDate, quotaUsageEndDate, accountToString)); - continue; - } - - if (startDate.compareTo(quotaUsageStartDate) == 0) { - aggregatedUsage = aggregatedUsage.subtract(quotaUsed); - continue; - } - - _quotaBalanceDao.saveQuotaBalance(new QuotaBalanceVO(accountId, domainId, aggregatedUsage, endDate)); - - aggregatedUsage = BigDecimal.ZERO; - startDate = quotaUsageStartDate; - endDate = quotaUsageEndDate; - - QuotaBalanceVO lastRealBalanceEntry = _quotaBalanceDao.findLastBalanceEntry(accountId, domainId, endDate); - Date lastBalanceDate = new Date(0); - - if (lastRealBalanceEntry != null) { - lastBalanceDate = lastRealBalanceEntry.getUpdatedOn(); - aggregatedUsage = aggregatedUsage.add(lastRealBalanceEntry.getCreditBalance()); - } - - aggregatedUsage = aggregatedUsage.add(aggregateCreditBetweenDates(accountId, domainId, lastBalanceDate, endDate, accountToString)); - aggregatedUsage = aggregatedUsage.subtract(quotaUsed); + accountBalance = calculateBalanceConsideringCreditsAddedAndQuotaUsed(accountBalance, accountQuotaUsages, accountId, domainId, startDate, endDate, accountToString); + _quotaBalanceDao.saveQuotaBalance(new QuotaBalanceVO(accountId, domainId, accountBalance, endDate)); } - - _quotaBalanceDao.saveQuotaBalance(new QuotaBalanceVO(accountId, domainId, aggregatedUsage, endDate)); - saveQuotaAccount(accountId, aggregatedUsage, endDate); + saveQuotaAccount(accountId, accountBalance, endDate); } - protected BigDecimal getUsageValueAccordingToLastQuotaUsageEntryAndLastQuotaBalance(long accountId, long domainId, Date startDate, Date endDate, BigDecimal aggregatedUsage, - String accountToString) { + /** + * Calculates the balance for the given account considering the specified period. The balance is calculated as follows: + *
    + *
  1. The credits added in this period are added to the balance.
  2. + *
  3. All quota consumed in this period are subtracted from the account balance.
  4. + *
+ */ + protected BigDecimal calculateBalanceConsideringCreditsAddedAndQuotaUsed(BigDecimal accountBalance, List accountQuotaUsages, long accountId, long domainId, + Date startDate, Date endDate, String accountToString) { + accountBalance = accountBalance.add(aggregateCreditBetweenDates(accountId, domainId, startDate, endDate, accountToString)); + + for (QuotaUsageVO quotaUsageVO : accountQuotaUsages) { + if (DateUtils.isSameInstant(quotaUsageVO.getStartDate(), startDate)) { + accountBalance = accountBalance.subtract(quotaUsageVO.getQuotaUsed()); + } + } + return accountBalance; + } + + /** + * Retrieves the initial balance prior to the period of the quota processing. + *
    + *
  • + * If it is the first time of processing for the account, the credits prior to the quota processing are added, and the first balance is persisted in the DB. + *
  • + *
  • + * Otherwise, the last real balance of the account is retrieved. + *
  • + *
+ */ + protected BigDecimal retrieveBalanceForUsageCalculation(long accountId, long domainId, Date startDate, String accountToString) { + BigDecimal accountBalance = BigDecimal.ZERO; QuotaUsageVO lastQuotaUsage = _quotaUsageDao.findLastQuotaUsageEntry(accountId, domainId, startDate); if (lastQuotaUsage == null) { - aggregatedUsage = aggregatedUsage.add(aggregateCreditBetweenDates(accountId, domainId, new Date(0), startDate, accountToString)); - QuotaBalanceVO firstBalance = new QuotaBalanceVO(accountId, domainId, aggregatedUsage, startDate); + accountBalance = accountBalance.add(aggregateCreditBetweenDates(accountId, domainId, new Date(0), startDate, accountToString)); + QuotaBalanceVO firstBalance = new QuotaBalanceVO(accountId, domainId, accountBalance, startDate); logger.debug(String.format("Persisting the first quota balance [%s] for account [%s].", firstBalance, accountToString)); _quotaBalanceDao.saveQuotaBalance(firstBalance); } else { - QuotaBalanceVO lastRealBalance = _quotaBalanceDao.findLastBalanceEntry(accountId, domainId, endDate); + QuotaBalanceVO lastRealBalance = _quotaBalanceDao.findLastBalanceEntry(accountId, domainId, startDate); - if (lastRealBalance != null) { - aggregatedUsage = aggregatedUsage.add(lastRealBalance.getCreditBalance()); - aggregatedUsage = aggregatedUsage.add(aggregateCreditBetweenDates(accountId, domainId, lastRealBalance.getUpdatedOn(), endDate, accountToString)); + if (lastRealBalance == null) { + logger.warn("Account [{}] has quota usage entries, however it does not have a quota balance.", accountToString); } else { - logger.warn(String.format("Account [%s] has quota usage entries, however it does not have a quota balance.", accountToString)); + accountBalance = accountBalance.add(lastRealBalance.getCreditBalance()); } } - return aggregatedUsage; + return accountBalance; } protected void saveQuotaAccount(long accountId, BigDecimal aggregatedUsage, Date endDate) { diff --git a/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java b/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java index f15f3f20001..a71ae26e670 100644 --- a/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java +++ b/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java @@ -43,7 +43,6 @@ import com.cloud.resource.ResourceManager; import com.cloud.storage.VMTemplateVO; import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; -import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @@ -89,7 +88,7 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { Long clusterId = plan.getClusterId(); ServiceOffering offering = vmProfile.getServiceOffering(); List hostsCopy = null; - List suitableHosts = new ArrayList(); + List suitableHosts = new ArrayList<>(); if (type == Host.Type.Storage) { return suitableHosts; @@ -107,7 +106,7 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { } if (hosts != null) { // retain all computing hosts, regardless of whether they support routing...it's random after all - hostsCopy = new ArrayList(hosts); + hostsCopy = new ArrayList<>(hosts); if (ObjectUtils.anyNotNull(offeringHostTag, templateTag)) { hostsCopy.retainAll(listHostsByTags(type, dcId, podId, clusterId, offeringHostTag, templateTag)); } else { @@ -124,14 +123,15 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { hostsCopy = ListUtils.union(hostsCopy, _hostDao.findHostsWithTagRuleThatMatchComputeOferringTags(offeringHostTag)); if (hostsCopy.isEmpty()) { - logger.error(String.format("No suitable host found for vm [%s] with tags [%s].", vmProfile, hostTag)); - throw new CloudRuntimeException(String.format("No suitable host found for vm [%s].", vmProfile)); + logger.info("No suitable host found for VM [{}] in {}.", vmProfile, hostTag); + return null; } - logger.debug("Random Allocator found " + hostsCopy.size() + " hosts"); - if (hostsCopy.size() == 0) { + logger.debug("Random Allocator found {} hosts", hostsCopy.size()); + if (hostsCopy.isEmpty()) { return suitableHosts; } + Collections.shuffle(hostsCopy); for (Host host : hostsCopy) { if (suitableHosts.size() == returnUpTo) { @@ -174,7 +174,7 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { if (logger.isDebugEnabled()) { logger.debug("Random Allocator found 0 hosts as given host list is empty"); } - return new ArrayList(); + return new ArrayList<>(); } return findSuitableHosts(vmProfile, plan, type, avoid, hosts, returnUpTo, considerReservedCapacity); } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index b503610eb3c..6c901795fa2 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -782,7 +782,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes */ protected EnumMap examineStorageSubSystemCommandFullCloneFlagForVmware(CopyCommand cmd, EnumMap params) { - EnumMap paramsCopy = new EnumMap(params); + EnumMap paramsCopy = new EnumMap<>(params); HypervisorType hypervisor = cmd.getDestTO().getHypervisorType(); if (hypervisor != null && hypervisor.equals(HypervisorType.VMware)) { DataStoreTO destDataStore = cmd.getDestTO().getDataStore(); @@ -2201,7 +2201,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes throw new Exception("Failed to find the newly create or relocated VM. vmName: " + vmInternalCSName); } } - if (deployAsIs) { + if (deployAsIs && !vmMo.hasSnapshot()) { logger.info("Mapping VM disks to spec disks and tearing down datadisks (if any)"); mapSpecDisksToClonedDisksAndTearDownDatadisks(vmMo, vmInternalCSName, specDisks); } diff --git a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2AuthManagerImpl.java b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2AuthManagerImpl.java index 6d7123ebe8e..b65027d6a24 100644 --- a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2AuthManagerImpl.java +++ b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2AuthManagerImpl.java @@ -136,9 +136,9 @@ public class OAuth2AuthManagerImpl extends ManagerBase implements OAuth2AuthMana public OauthProviderVO registerOauthProvider(RegisterOAuthProviderCmd cmd) { String description = cmd.getDescription(); String provider = cmd.getProvider(); - String clientId = cmd.getClientId(); - String redirectUri = cmd.getRedirectUri(); - String secretKey = cmd.getSecretKey(); + String clientId = StringUtils.trim(cmd.getClientId()); + String redirectUri = StringUtils.trim(cmd.getRedirectUri()); + String secretKey = StringUtils.trim(cmd.getSecretKey()); if (!isOAuthPluginEnabled()) { throw new CloudRuntimeException("OAuth is not enabled, please enable to register"); @@ -168,9 +168,9 @@ public class OAuth2AuthManagerImpl extends ManagerBase implements OAuth2AuthMana public OauthProviderVO updateOauthProvider(UpdateOAuthProviderCmd cmd) { Long id = cmd.getId(); String description = cmd.getDescription(); - String clientId = cmd.getClientId(); - String redirectUri = cmd.getRedirectUri(); - String secretKey = cmd.getSecretKey(); + String clientId = StringUtils.trim(cmd.getClientId()); + String redirectUri = StringUtils.trim(cmd.getRedirectUri()); + String secretKey = StringUtils.trim(cmd.getSecretKey()); Boolean enabled = cmd.getEnabled(); OauthProviderVO providerVO = _oauthProviderDao.findById(id); diff --git a/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java b/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java index 36330d6685c..99ac2492e83 100644 --- a/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java +++ b/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java @@ -128,12 +128,10 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { if (type == Host.Type.Storage) { // FirstFitAllocator should be used for user VMs only since it won't care whether the host is capable of routing or not - return new ArrayList(); + return new ArrayList<>(); } - if (logger.isDebugEnabled()) { - logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); - } + logger.debug("Looking for hosts in zone [{}], pod [{}], cluster [{}]", dcId, podId, clusterId); String hostTagOnOffering = offering.getHostTag(); String hostTagOnTemplate = template.getTemplateTag(); @@ -142,8 +140,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { boolean hasSvcOfferingTag = hostTagOnOffering != null ? true : false; boolean hasTemplateTag = hostTagOnTemplate != null ? true : false; - List clusterHosts = new ArrayList(); - List hostsMatchingUefiTag = new ArrayList(); + List clusterHosts = new ArrayList<>(); + List hostsMatchingUefiTag = new ArrayList<>(); if(isVMDeployedWithUefi){ hostsMatchingUefiTag = _hostDao.listByHostCapability(type, clusterId, podId, dcId, Host.HOST_UEFI_ENABLE); if (logger.isDebugEnabled()) { @@ -159,8 +157,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { if (hostTagOnOffering == null && hostTagOnTemplate == null) { clusterHosts = _resourceMgr.listAllUpAndEnabledNonHAHosts(type, clusterId, podId, dcId); } else { - List hostsMatchingOfferingTag = new ArrayList(); - List hostsMatchingTemplateTag = new ArrayList(); + List hostsMatchingOfferingTag = new ArrayList<>(); + List hostsMatchingTemplateTag = new ArrayList<>(); if (hasSvcOfferingTag) { if (logger.isDebugEnabled()) { logger.debug("Looking for hosts having tag specified on SvcOffering:" + hostTagOnOffering); @@ -205,7 +203,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { if (clusterHosts.isEmpty()) { - logger.error(String.format("No suitable host found for vm [%s] with tags [%s].", vmProfile, hostTagOnOffering)); + logger.error("No suitable host found for vm [{}] with tags [{}].", vmProfile, hostTagOnOffering); throw new CloudRuntimeException(String.format("No suitable host found for vm [%s].", vmProfile)); } // add all hosts that we are not considering to the avoid list @@ -231,8 +229,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { ServiceOffering offering = vmProfile.getServiceOffering(); VMTemplateVO template = (VMTemplateVO)vmProfile.getTemplate(); Account account = vmProfile.getOwner(); - List suitableHosts = new ArrayList(); - List hostsCopy = new ArrayList(hosts); + List suitableHosts = new ArrayList<>(); + List hostsCopy = new ArrayList<>(hosts); if (type == Host.Type.Storage) { // FirstFitAllocator should be used for user VMs only since it won't care whether the host is capable of @@ -314,7 +312,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { } long serviceOfferingId = offering.getId(); - List suitableHosts = new ArrayList(); + List suitableHosts = new ArrayList<>(); ServiceOfferingDetailsVO offeringDetails = null; for (Host host : hosts) { @@ -383,15 +381,15 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { } //now filter the given list of Hosts by this ordered list - Map hostMap = new HashMap(); + Map hostMap = new HashMap<>(); for (Host host : hosts) { hostMap.put(host.getId(), host); } - List matchingHostIds = new ArrayList(hostMap.keySet()); + List matchingHostIds = new ArrayList<>(hostMap.keySet()); hostIdsByFreeCapacity.retainAll(matchingHostIds); - List reorderedHosts = new ArrayList(); + List reorderedHosts = new ArrayList<>(); for(Long id: hostIdsByFreeCapacity){ reorderedHosts.add(hostMap.get(id)); } @@ -413,15 +411,15 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { } //now filter the given list of Hosts by this ordered list - Map hostMap = new HashMap(); + Map hostMap = new HashMap<>(); for (Host host : hosts) { hostMap.put(host.getId(), host); } - List matchingHostIds = new ArrayList(hostMap.keySet()); + List matchingHostIds = new ArrayList<>(hostMap.keySet()); hostIdsByVmCount.retainAll(matchingHostIds); - List reorderedHosts = new ArrayList(); + List reorderedHosts = new ArrayList<>(); for (Long id : hostIdsByVmCount) { reorderedHosts.add(hostMap.get(id)); } @@ -444,11 +442,11 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { // Determine the guest OS category of the template String templateGuestOSCategory = getTemplateGuestOSCategory(template); - List prioritizedHosts = new ArrayList(); - List noHvmHosts = new ArrayList(); + List prioritizedHosts = new ArrayList<>(); + List noHvmHosts = new ArrayList<>(); // If a template requires HVM and a host doesn't support HVM, remove it from consideration - List hostsToCheck = new ArrayList(); + List hostsToCheck = new ArrayList<>(); if (template.isRequiresHvm()) { for (Host host : hosts) { if (hostSupportsHVM(host)) { @@ -468,8 +466,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { } // If a host is tagged with the same guest OS category as the template, move it to a high priority list // If a host is tagged with a different guest OS category than the template, move it to a low priority list - List highPriorityHosts = new ArrayList(); - List lowPriorityHosts = new ArrayList(); + List highPriorityHosts = new ArrayList<>(); + List lowPriorityHosts = new ArrayList<>(); for (Host host : hostsToCheck) { String hostGuestOSCategory = getHostGuestOSCategory(host); if (hostGuestOSCategory == null) { @@ -502,7 +500,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { // if service offering is not GPU enabled then move all the GPU enabled hosts to the end of priority list. if (_serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString()) == null) { - List gpuEnabledHosts = new ArrayList(); + List gpuEnabledHosts = new ArrayList<>(); // Check for GPU enabled hosts. for (Host host : prioritizedHosts) { if (_resourceMgr.isHostGpuEnabled(host.getId())) { diff --git a/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java b/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java index 286bef7d39a..51b45a2dc98 100644 --- a/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java +++ b/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java @@ -26,6 +26,7 @@ import java.util.Set; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -73,7 +74,7 @@ public class RecreateHostAllocator extends FirstFitRoutingAllocator { public List allocateTo(VirtualMachineProfile vm, DeploymentPlan plan, Type type, ExcludeList avoid, int returnUpTo) { List hosts = super.allocateTo(vm, plan, type, avoid, returnUpTo); - if (hosts != null && !hosts.isEmpty()) { + if (CollectionUtils.isNotEmpty(hosts)) { return hosts; } diff --git a/server/src/main/java/com/cloud/api/ApiDBUtils.java b/server/src/main/java/com/cloud/api/ApiDBUtils.java index a30abada404..3269bb83419 100644 --- a/server/src/main/java/com/cloud/api/ApiDBUtils.java +++ b/server/src/main/java/com/cloud/api/ApiDBUtils.java @@ -1103,8 +1103,8 @@ public class ApiDBUtils { return null; } - public static ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(Long diskOfferingId) { - ServiceOfferingVO off = s_serviceOfferingDao.findServiceOfferingByComputeOnlyDiskOffering(diskOfferingId); + public static ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(Long diskOfferingId, boolean includingRemoved) { + ServiceOfferingVO off = s_serviceOfferingDao.findServiceOfferingByComputeOnlyDiskOffering(diskOfferingId, includingRemoved); return off; } public static DomainVO findDomainById(Long domainId) { diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index 6ccf128d537..50b68b70374 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -3127,7 +3127,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q @Override public ListResponse searchForImageStores(ListImageStoresCmd cmd) { Pair, Integer> result = searchForImageStoresInternal(cmd); - ListResponse response = new ListResponse(); + ListResponse response = new ListResponse<>(); List poolResponses = ViewResponseHelper.createImageStoreResponse(result.first().toArray(new ImageStoreJoinVO[result.first().size()])); response.setResponses(poolResponses, result.second()); diff --git a/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDaoImpl.java index abd5eca3389..10a5f6ea73c 100644 --- a/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDaoImpl.java @@ -186,8 +186,8 @@ public class VolumeJoinDaoImpl extends GenericDaoBaseWithTagInformation 0) { DiskOffering computeOnlyDiskOffering = ApiDBUtils.findComputeOnlyDiskOfferingById(volume.getDiskOfferingId()); - if (computeOnlyDiskOffering != null) { - ServiceOffering serviceOffering = ApiDBUtils.findServiceOfferingByComputeOnlyDiskOffering(volume.getDiskOfferingId()); + ServiceOffering serviceOffering = getServiceOfferingForDiskOffering(volume, computeOnlyDiskOffering); + if (serviceOffering != null) { volResponse.setServiceOfferingId(String.valueOf(serviceOffering.getId())); volResponse.setServiceOfferingName(serviceOffering.getName()); volResponse.setServiceOfferingDisplayText(serviceOffering.getDisplayText()); @@ -281,6 +281,26 @@ public class VolumeJoinDaoImpl extends GenericDaoBaseWithTagInformation, Configurable { Pod pod = _podDao.findById(host.getPodId()); Cluster cluster = _clusterDao.findById(host.getClusterId()); if (vm.getHypervisorType() == HypervisorType.BareMetal) { - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap(), displayStorage); + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<>(), displayStorage); logger.debug("Returning Deployment Destination: {}.", dest); return dest; } @@ -517,7 +517,7 @@ StateListener, Configurable { logger.debug("Cannot find suitable storage pools in host [{}] to deploy VM [{}]", host.getUuid(), vm.getUuid()); return null; } - List suitableHosts = new ArrayList(); + List suitableHosts = new ArrayList<>(); suitableHosts.add(host); Pair> potentialResources = findPotentialDeploymentResources( suitableHosts, suitableVolumeStoragePools, avoids, @@ -610,7 +610,7 @@ StateListener, Configurable { boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); if (vm.getHypervisorType() == HypervisorType.BareMetal) { - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap(), + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<>(), displayStorage); logger.debug("Returning Deployment Destination: {}.", dest); return dest; @@ -625,7 +625,7 @@ StateListener, Configurable { List readyAndReusedVolumes = result.second(); if (!suitableVolumeStoragePools.isEmpty()) { - List suitableHosts = new ArrayList(); + List suitableHosts = new ArrayList<>(); suitableHosts.add(host); Pair> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, avoids), @@ -867,9 +867,9 @@ StateListener, Configurable { long vmAccountId = vm.getAccountId(); long vmDomainId = vm.getDomainId(); - List allPodsFromDedicatedID = new ArrayList(); - List allClustersFromDedicatedID = new ArrayList(); - List allHostsFromDedicatedID = new ArrayList(); + List allPodsFromDedicatedID = new ArrayList<>(); + List allClustersFromDedicatedID = new ArrayList<>(); + List allHostsFromDedicatedID = new ArrayList<>(); List domainGroupMappings = _affinityGroupDomainMapDao.listByDomain(vmDomainId); @@ -999,7 +999,7 @@ StateListener, Configurable { final PlannerResourceUsage hostResourceTypeFinal = hostResourceType; // reserve the host for required resourceType // let us lock the reservation entry before updating. - return Transaction.execute(new TransactionCallback() { + return Transaction.execute(new TransactionCallback<>() { @Override public Boolean doInTransaction(TransactionStatus status) { final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); @@ -1091,7 +1091,7 @@ StateListener, Configurable { final long id = reservationEntry.getId(); - return Transaction.execute(new TransactionCallback() { + return Transaction.execute(new TransactionCallback<>() { @Override public Boolean doInTransaction(TransactionStatus status) { final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); @@ -1307,7 +1307,7 @@ StateListener, Configurable { List suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL); // if found suitable hosts in this cluster, find suitable storage // pools for each volume of the VM - if (suitableHosts != null && !suitableHosts.isEmpty()) { + if (CollectionUtils.isNotEmpty(suitableHosts)) { if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) { DeployDestination dest = new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0)); return dest; @@ -1453,7 +1453,7 @@ StateListener, Configurable { } } - return new Pair(requiresShared, requiresLocal); + return new Pair<>(requiresShared, requiresLocal); } protected Pair> findPotentialDeploymentResources(List suitableHosts, Map> suitableVolumeStoragePools, @@ -1465,10 +1465,10 @@ StateListener, Configurable { boolean hostAffinityCheck = false; if (readyAndReusedVolumes == null) { - readyAndReusedVolumes = new ArrayList(); + readyAndReusedVolumes = new ArrayList<>(); } - Map storage = new HashMap(); - TreeSet volumesOrderBySizeDesc = new TreeSet(new Comparator() { + Map storage = new HashMap<>(); + TreeSet volumesOrderBySizeDesc = new TreeSet<>(new Comparator<>() { @Override public int compare(Volume v1, Volume v2) { if (v1.getSize() < v2.getSize()) @@ -1481,7 +1481,7 @@ StateListener, Configurable { boolean multipleVolume = volumesOrderBySizeDesc.size() > 1; boolean deployAsIs = isDeployAsIs(vm); for (Host potentialHost : suitableHosts) { - Map> volumeAllocationMap = new HashMap>(); + Map> volumeAllocationMap = new HashMap<>(); if (deployAsIs) { storage = new HashMap<>(); // Find the common suitable pools @@ -1553,7 +1553,7 @@ StateListener, Configurable { if (volumeAllocationMap.containsKey(potentialSPool)) requestVolumes = volumeAllocationMap.get(potentialSPool); else - requestVolumes = new ArrayList(); + requestVolumes = new ArrayList<>(); requestVolumes.add(vol); List> volumeDiskProfilePair = getVolumeDiskProfilePairs(requestVolumes); if (potentialHost.getHypervisorType() == HypervisorType.VMware) { @@ -1603,7 +1603,7 @@ StateListener, Configurable { logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + potentialHost.getName() + " and associated storage pools for this VM"); volumeAllocationMap.clear(); - return new Pair>(potentialHost, storage); + return new Pair<>(potentialHost, storage); } else { logger.debug("Adding host [{}] to the avoid set because: can access Pool [{}], has enough space [{}], affinity check [{}], fits planner [{}] usage [{}].", potentialHost.getUuid(), hostCanAccessPool, haveEnoughSpace, hostAffinityCheck, resourceUsageRequired.getClass().getSimpleName(), hostFitsPlannerUsage); @@ -1655,21 +1655,20 @@ StateListener, Configurable { } protected List findSuitableHosts(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { - List suitableHosts = new ArrayList(); + List suitableHosts = new ArrayList<>(); for (HostAllocator allocator : _hostAllocators) { suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, avoid, returnUpTo); - if (suitableHosts != null && !suitableHosts.isEmpty()) { + if (CollectionUtils.isNotEmpty(suitableHosts)) { break; } } - if (suitableHosts.isEmpty()) { - logger.debug("No suitable hosts found"); + if (CollectionUtils.isEmpty(suitableHosts)) { + logger.debug("No suitable hosts found."); + } else { + reorderHostsByPriority(plan.getHostPriorities(), suitableHosts); } - // re-order hosts by priority - reorderHostsByPriority(plan.getHostPriorities(), suitableHosts); - return suitableHosts; } @@ -1698,8 +1697,8 @@ StateListener, Configurable { protected Pair>, List> findSuitablePoolsForVolumes(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { List volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId()); - Map> suitableVolumeStoragePools = new HashMap>(); - List readyAndReusedVolumes = new ArrayList(); + Map> suitableVolumeStoragePools = new HashMap<>(); + List readyAndReusedVolumes = new ArrayList<>(); // There should be at least the ROOT volume of the VM in usable state if (volumesTobeCreated.isEmpty()) { @@ -1784,7 +1783,7 @@ StateListener, Configurable { } } - HashSet toRemove = new HashSet(); + HashSet toRemove = new HashSet<>(); for (List lsp : suitableVolumeStoragePools.values()) { for (StoragePool sp : lsp) { toRemove.add(sp.getId()); @@ -1800,7 +1799,7 @@ StateListener, Configurable { logger.debug("No suitable pools found"); } - return new Pair>, List>(suitableVolumeStoragePools, readyAndReusedVolumes); + return new Pair<>(suitableVolumeStoragePools, readyAndReusedVolumes); } private boolean tryToFindPotentialPoolsToAlocateVolume(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, @@ -1824,7 +1823,7 @@ StateListener, Configurable { Map> suitableVolumeStoragePools, List readyAndReusedVolumes, VolumeVO toBeCreated) { logger.debug("Volume [{}] of VM [{}] has pool [{}] already specified. Checking if this pool can be reused.", toBeCreated.getUuid(), vmProfile.getUuid(), toBeCreated.getPoolId()); - List suitablePools = new ArrayList(); + List suitablePools = new ArrayList<>(); StoragePool pool = null; if (toBeCreated.getPoolId() != null) { pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId()); @@ -1951,7 +1950,7 @@ StateListener, Configurable { final VirtualMachine vm = vmProfile.getVirtualMachine(); final long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId()); - return Transaction.execute(new TransactionCallback() { + return Transaction.execute(new TransactionCallback<>() { @Override public String doInTransaction(TransactionStatus status) { boolean saveReservation = true; @@ -1977,7 +1976,7 @@ StateListener, Configurable { if (planner != null) { vmReservation.setDeploymentPlanner(planner.getName()); } - Map volumeReservationMap = new HashMap(); + Map volumeReservationMap = new HashMap<>(); if (vm.getHypervisorType() != HypervisorType.BareMetal) { for (Volume vo : plannedDestination.getStorageForDisks().keySet()) { diff --git a/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java b/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java index 22b9a33f65b..46e6c369c33 100644 --- a/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java +++ b/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java @@ -138,7 +138,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla return null; } - List clusterList = new ArrayList(); + List clusterList = new ArrayList<>(); if (plan.getClusterId() != null) { Long clusterIdSpecified = plan.getClusterId(); logger.debug("Searching resources only under specified Cluster: " + clusterIdSpecified); @@ -209,7 +209,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla } private void reorderClustersBasedOnImplicitTags(List clusterList, int requiredCpu, long requiredRam) { - final HashMap UniqueTagsInClusterMap = new HashMap(); + final HashMap UniqueTagsInClusterMap = new HashMap<>(); Long uniqueTags; for (Long clusterId : clusterList) { uniqueTags = (long) 0; @@ -220,7 +220,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla } UniqueTagsInClusterMap.put(clusterId, uniqueTags); } - Collections.sort(clusterList, new Comparator() { + Collections.sort(clusterList, new Comparator<>() { @Override public int compare(Long o1, Long o2) { Long t1 = UniqueTagsInClusterMap.get(o1); @@ -249,7 +249,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla int requiredCpu = offering.getCpu() * offering.getSpeed(); long requiredRam = offering.getRamSize() * 1024L * 1024L; //list pods under this zone by cpu and ram capacity - List prioritizedPodIds = new ArrayList(); + List prioritizedPodIds; Pair, Map> podCapacityInfo = listPodsByCapacity(plan.getDataCenterId(), requiredCpu, requiredRam); List podsWithCapacity = podCapacityInfo.first(); @@ -277,7 +277,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla return null; } - List clusterList = new ArrayList(); + List clusterList = new ArrayList<>(); //loop over pods for (Long podId : prioritizedPodIds) { logger.debug("Checking resources under Pod: " + podId); @@ -298,7 +298,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla private Map getCapacityThresholdMap() { // Lets build this real time so that the admin won't have to restart MS // if anyone changes these values - Map disableThresholdMap = new HashMap(); + Map disableThresholdMap = new HashMap<>(); String cpuDisableThresholdString = ClusterCPUCapacityDisableThreshold.value().toString(); float cpuDisableThreshold = NumbersUtil.parseFloat(cpuDisableThresholdString, 0.85F); @@ -312,7 +312,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla } private List getCapacitiesForCheckingThreshold() { - List capacityList = new ArrayList(); + List capacityList = new ArrayList<>(); capacityList.add(Capacity.CAPACITY_TYPE_CPU); capacityList.add(Capacity.CAPACITY_TYPE_MEMORY); return capacityList; @@ -339,7 +339,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla } List capacityList = getCapacitiesForCheckingThreshold(); - List clustersCrossingThreshold = new ArrayList(); + List clustersCrossingThreshold = new ArrayList<>(); ServiceOffering offering = vmProfile.getServiceOffering(); int cpu_requested = offering.getCpu() * offering.getSpeed(); @@ -523,7 +523,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla matchingClusters.addAll(hostDao.findClustersThatMatchHostTagRule(hostTagOnOffering)); if (matchingClusters.isEmpty()) { - logger.error(String.format("No suitable host found for the following compute offering tags [%s].", hostTagOnOffering)); + logger.error("No suitable host found for the following compute offering tags [{}].", hostTagOnOffering); throw new CloudRuntimeException("No suitable host found."); } diff --git a/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java b/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java index 0a1114b8307..841f6221182 100644 --- a/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java +++ b/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java @@ -149,7 +149,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc public boolean implementVpc(final Vpc vpc, final DeployDestination dest, final ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - final Map params = new HashMap(1); + final Map params = new HashMap<>(1); params.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true); if (vpc.isRollingRestart()) { @@ -194,7 +194,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc return false; } - final Map params = new HashMap(1); + final Map params = new HashMap<>(1); params.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true); if (network.isRollingRestart()) { @@ -221,24 +221,58 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc return true; } - protected void configureGuestNetwork(final Network network, final List routers ) + protected boolean configureGuestNetworkForRouter(final Network network, + final DomainRouterVO router) throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { + if (!_networkMdl.isVmPartOfNetwork(router.getId(), network.getId())) { + final Map paramsForRouter = new HashMap<>(1); + if (network.getState() == State.Setup) { + paramsForRouter.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true); + } + if (!_vpcRouterMgr.addVpcRouterToGuestNetwork(router, network, paramsForRouter)) { + logger.error("Failed to add VPC router {} to guest network {}", router, network); + return false; + } else { + logger.debug("Successfully added VPC router {} to guest network {}", router, network); + return true; + } + } + return true; + } + + protected void configureGuestNetwork(final Network network, final List routers) throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { - logger.info("Adding VPC routers to Guest Network: " + routers.size() + " to be added!"); + logger.info("Adding VPC routers to Guest Network: {} to be added!", routers.size()); - for (final DomainRouterVO router : routers) { + List backupRouters = new ArrayList<>(); + List remainingRouters = new ArrayList<>(); + for (DomainRouterVO router : routers) { if (!_networkMdl.isVmPartOfNetwork(router.getId(), network.getId())) { - final Map paramsForRouter = new HashMap(1); - if (network.getState() == State.Setup) { - paramsForRouter.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true); - } - if (!_vpcRouterMgr.addVpcRouterToGuestNetwork(router, network, paramsForRouter)) { - logger.error("Failed to add VPC router " + router + " to guest network " + network); + if (router.getRedundantState().equals(DomainRouterVO.RedundantState.BACKUP)) { + backupRouters.add(router); } else { - logger.debug("Successfully added VPC router " + router + " to guest network " + network); + remainingRouters.add(router); } } } + + for (final DomainRouterVO router : backupRouters) { + if (network.getState() != State.Setup) { + if (!_vpcRouterMgr.stopKeepAlivedOnRouter(router, network)) { + logger.error("Failed to stop keepalived on VPC router {} to guest network {}", router, network); + } else { + logger.debug("Successfully stopped keepalived on VPC router {} to guest network {}", router, network); + } + } + } + for (final DomainRouterVO router : remainingRouters) { + configureGuestNetworkForRouter(network, router); + } + for (final DomainRouterVO router : backupRouters) { + if (!configureGuestNetworkForRouter(network, router) && !_vpcRouterMgr.startKeepAlivedOnRouter(router, network)) { + logger.error("Failed to start keepalived on VPC router {} to guest network {}", router, network); + } + } } @Override @@ -258,7 +292,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc } if (vm.getType() == VirtualMachine.Type.User) { - final Map params = new HashMap(1); + final Map params = new HashMap<>(1); params.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true); final RouterDeploymentDefinition routerDeploymentDefinition = routerDeploymentDefinitionBuilder.create() @@ -283,30 +317,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc @Override public boolean shutdown(final Network network, final ReservationContext context, final boolean cleanup) throws ConcurrentOperationException, ResourceUnavailableException { - final Long vpcId = network.getVpcId(); - if (vpcId == null) { - logger.debug("Network " + network + " doesn't belong to any vpc, so skipping unplug nic part"); - return true; - } - - boolean success = true; - final List routers = _routerDao.listByVpcId(vpcId); - for (final VirtualRouter router : routers) { - // 1) Check if router is already a part of the network - if (!_networkMdl.isVmPartOfNetwork(router.getId(), network.getId())) { - logger.debug("Router " + router + " is not a part the network " + network); - continue; - } - // 2) Call unplugNics in the network service - success = success && _vpcRouterMgr.removeVpcRouterFromGuestNetwork(router, network); - if (!success) { - logger.warn("Failed to unplug nic in network " + network + " for virtual router " + router); - } else { - logger.debug("Successfully unplugged nic in network " + network + " for virtual router " + router); - } - } - - return success; + return destroy(network, context); } @Override @@ -385,16 +396,16 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc } private static Map> setCapabilities() { - final Map> capabilities = new HashMap>(); + final Map> capabilities = new HashMap<>(); capabilities.putAll(VirtualRouterElement.capabilities); - final Map sourceNatCapabilities = new HashMap(); + final Map sourceNatCapabilities = new HashMap<>(); sourceNatCapabilities.putAll(capabilities.get(Service.SourceNat)); // TODO This kind of logic is already placed in the DB sourceNatCapabilities.put(Capability.RedundantRouter, "true"); capabilities.put(Service.SourceNat, sourceNatCapabilities); - final Map vpnCapabilities = new HashMap(); + final Map vpnCapabilities = new HashMap<>(); vpnCapabilities.putAll(capabilities.get(Service.Vpn)); vpnCapabilities.put(Capability.VpnTypes, "s2svpn"); capabilities.put(Service.Vpn, vpnCapabilities); @@ -667,7 +678,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final NetworkTopology networkTopology = networkTopologyContext.retrieveNetworkTopology(dcVO); String[] result = null; - final List combinedResults = new ArrayList(); + final List combinedResults = new ArrayList<>(); for (final DomainRouterVO domainRouterVO : routers) { result = networkTopology.applyVpnUsers(vpn, users, domainRouterVO); combinedResults.addAll(Arrays.asList(result)); diff --git a/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java index f45386ca8a7..4156c85f880 100644 --- a/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java @@ -30,6 +30,8 @@ import javax.naming.ConfigurationException; import com.cloud.network.dao.NetworkDao; import com.cloud.network.vpc.dao.VpcDao; +import org.apache.cloudstack.agent.routing.ManageServiceCommand; +import com.cloud.agent.api.routing.NetworkElementCommand; import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; @@ -231,6 +233,54 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian return result; } + @Override + public boolean stopKeepAlivedOnRouter(VirtualRouter router, + Network network) throws ConcurrentOperationException, ResourceUnavailableException { + return manageKeepalivedServiceOnRouter(router, network, "stop"); + } + + @Override + public boolean startKeepAlivedOnRouter(VirtualRouter router, + Network network) throws ConcurrentOperationException, ResourceUnavailableException { + return manageKeepalivedServiceOnRouter(router, network, "start"); + } + + private boolean manageKeepalivedServiceOnRouter(VirtualRouter router, + Network network, String action) throws ConcurrentOperationException, ResourceUnavailableException { + if (network.getTrafficType() != TrafficType.Guest) { + logger.warn("Network {} is not of type {}", network, TrafficType.Guest); + return false; + } + boolean result = true; + try { + if (router.getState() == State.Running) { + final ManageServiceCommand stopCommand = new ManageServiceCommand("keepalived", action); + stopCommand.setAccessDetail(NetworkElementCommand.ROUTER_IP, _routerControlHelper.getRouterControlIp(router.getId())); + + final Commands cmds = new Commands(Command.OnError.Stop); + cmds.addCommand("manageKeepalived", stopCommand); + _nwHelper.sendCommandsToRouter(router, cmds); + + final Answer setupAnswer = cmds.getAnswer("manageKeepalived"); + if (!(setupAnswer != null && setupAnswer.getResult())) { + logger.warn("Unable to {} keepalived on router {}", action, router); + result = false; + } + } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { + logger.debug("Router {} is in {}, so not sending command to the backend", router.getInstanceName(), router.getState()); + } else { + String message = "Unable to " + action + " keepalived on virtual router [" + router + "] is not in the right state " + router.getState(); + logger.warn(message); + throw new ResourceUnavailableException(message, DataCenter.class, router.getDataCenterId()); + } + } catch (final Exception ex) { + logger.warn("Failed to {} keepalived on router {} to network {} due to {}", action, router, network, ex.getLocalizedMessage()); + logger.debug("Failed to {} keepalived on router {} to network {}", action, router, network, ex); + result = false; + } + return result; + } + protected boolean setupVpcGuestNetwork(final Network network, final VirtualRouter router, final boolean add, final NicProfile guestNic) throws ConcurrentOperationException, ResourceUnavailableException { diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 15a52d3f750..81ab2524bd6 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -844,10 +844,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe protected StateMachine2 _stateMachine; static final String FOR_SYSTEMVMS = "forsystemvms"; - static final ConfigKey vmPasswordLength = new ConfigKey("Advanced", Integer.class, "vm.password.length", "6", "Specifies the length of a randomly generated password", false); - static final ConfigKey sshKeyLength = new ConfigKey("Advanced", Integer.class, "ssh.key.length", "2048", "Specifies custom SSH key length (bit)", true, ConfigKey.Scope.Global); - static final ConfigKey humanReadableSizes = new ConfigKey("Advanced", Boolean.class, "display.human.readable.sizes", "true", "Enables outputting human readable byte sizes to logs and usage records.", false, ConfigKey.Scope.Global); - public static final ConfigKey customCsIdentifier = new ConfigKey("Advanced", String.class, "custom.cs.identifier", UUID.randomUUID().toString().split("-")[0].substring(4), "Custom identifier for the cloudstack installation", true, ConfigKey.Scope.Global); + static final ConfigKey vmPasswordLength = new ConfigKey<>("Advanced", Integer.class, "vm.password.length", "6", "Specifies the length of a randomly generated password", false); + static final ConfigKey sshKeyLength = new ConfigKey<>("Advanced", Integer.class, "ssh.key.length", "2048", "Specifies custom SSH key length (bit)", true, ConfigKey.Scope.Global); + static final ConfigKey humanReadableSizes = new ConfigKey<>("Advanced", Boolean.class, "display.human.readable.sizes", "true", "Enables outputting human readable byte sizes to logs and usage records.", false, ConfigKey.Scope.Global); + public static final ConfigKey customCsIdentifier = new ConfigKey<>("Advanced", String.class, "custom.cs.identifier", UUID.randomUUID().toString().split("-")[0].substring(4), "Custom identifier for the cloudstack installation", true, ConfigKey.Scope.Global); private static final VirtualMachine.Type []systemVmTypes = { VirtualMachine.Type.SecondaryStorageVm, VirtualMachine.Type.ConsoleProxy}; private static final List LIVE_MIGRATION_SUPPORTING_HYPERVISORS = List.of(HypervisorType.Hyperv, HypervisorType.KVM, HypervisorType.LXC, HypervisorType.Ovm, HypervisorType.Ovm3, HypervisorType.Simulator, HypervisorType.VMware, HypervisorType.XenServer); @@ -1034,7 +1034,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe protected List _planners; - private final List supportedHypervisors = new ArrayList(); + private final List supportedHypervisors = new ArrayList<>(); public List getPlanners() { return _planners; @@ -1112,7 +1112,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final String[] availableIds = TimeZone.getAvailableIDs(); - _availableIdsMap = new HashMap(availableIds.length); + _availableIdsMap = new HashMap<>(availableIds.length); for (final String id : availableIds) { _availableIdsMap.put(id, true); } @@ -1196,7 +1196,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe final Account caller = getCaller(); final List ids = cmd.getIds(); boolean result = true; - List permittedAccountIds = new ArrayList(); + List permittedAccountIds = new ArrayList<>(); if (_accountService.isNormalUser(caller.getId()) || caller.getType() == Account.Type.PROJECT) { permittedAccountIds.add(caller.getId()); @@ -1211,8 +1211,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, false, sameOwnerEvents); if (ids != null && events.size() < ids.size()) { - result = false; - return result; + return false; } _eventDao.archiveEvents(events); return result; @@ -1223,7 +1222,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe final Account caller = getCaller(); final List ids = cmd.getIds(); boolean result = true; - List permittedAccountIds = new ArrayList(); + List permittedAccountIds = new ArrayList<>(); if (_accountMgr.isNormalUser(caller.getId()) || caller.getType() == Account.Type.PROJECT) { permittedAccountIds.add(caller.getId()); @@ -1238,8 +1237,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, false, sameOwnerEvents); if (ids != null && events.size() < ids.size()) { - result = false; - return result; + return false; } for (final EventVO event : events) { _eventDao.remove(event.getId()); @@ -1322,7 +1320,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final Pair, Integer> result = _clusterDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } private HypervisorType getHypervisorType(VMInstanceVO vm, StoragePool srcVolumePool) { @@ -1362,7 +1360,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe final Pair, Integer> result = searchForServers(cmd.getStartIndex(), cmd.getPageSizeVal(), name, type, state, zoneId, pod, cluster, id, keyword, resourceState, haHosts, null, null); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } protected Pair> filterUefiHostsForMigration(List allHosts, List filteredHosts, VirtualMachine vm) { @@ -1596,20 +1594,17 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, HostAllocator.RETURN_UPTO_ALL, false); } - if (suitableHosts != null && !suitableHosts.isEmpty()) { + if (CollectionUtils.isNotEmpty(suitableHosts)) { break; } } - // re-order hosts by priority _dpMgr.reorderHostsByPriority(plan.getHostPriorities(), suitableHosts); - if (logger.isDebugEnabled()) { - if (suitableHosts.isEmpty()) { - logger.debug("No suitable hosts found"); - } else { - logger.debug("Hosts having capacity and suitable for migration: " + suitableHosts); - } + if (suitableHosts.isEmpty()) { + logger.warn("No suitable hosts found."); + } else { + logger.debug("Hosts having capacity and suitable for migration: {}", suitableHosts); } return new Ternary<>(otherHosts, suitableHosts, requiresStorageMotion); @@ -1660,9 +1655,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe StoragePool datastoreCluster = _poolDao.findById(srcVolumePool.getParent()); avoidPools.add(datastoreCluster); } - abstractDataStoreClustersList((List) allPools, new ArrayList()); + abstractDataStoreClustersList((List) allPools, new ArrayList<>()); abstractDataStoreClustersList((List) suitablePools, avoidPools); - return new Pair, List>(allPools, suitablePools); + return new Pair<>(allPools, suitablePools); } @Override @@ -1694,13 +1689,13 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } // Volume must be attached to an instance for live migration. - List allPools = new ArrayList(); - List suitablePools = new ArrayList(); + List allPools = new ArrayList<>(); + List suitablePools = new ArrayList<>(); // Volume must be in Ready state to be migrated. if (!Volume.State.Ready.equals(volume.getState())) { logger.info("Volume " + volume + " must be in ready state for migration."); - return new Pair, List>(allPools, suitablePools); + return new Pair<>(allPools, suitablePools); } final Long instanceId = volume.getInstanceId(); @@ -1736,7 +1731,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (!storageMotionSupported) { logger.info("Volume " + volume + " is attached to a running vm and the hypervisor doesn't support" + " storage motion."); - return new Pair, List>(allPools, suitablePools); + return new Pair<>(allPools, suitablePools); } } @@ -1759,7 +1754,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } removeDataStoreClusterParents((List) allPools); removeDataStoreClusterParents((List) suitablePools); - return new Pair, List>(allPools, suitablePools); + return new Pair<>(allPools, suitablePools); } private void removeDataStoreClusterParents(List storagePools) { @@ -2034,7 +2029,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final Pair, Integer> result = _hostPodDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } @Override @@ -2172,7 +2167,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final Pair, Integer> result = _vlanDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } @Override @@ -2305,7 +2300,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (scope != null && !scope.isEmpty()) { // Populate values corresponding the resource id - final List configVOList = new ArrayList(); + final List configVOList = new ArrayList<>(); for (final ConfigurationVO param : result.first()) { final ConfigurationVO configVo = _configDao.findByName(param.getName()); if (configVo != null) { @@ -2327,10 +2322,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } } - return new Pair, Integer>(configVOList, configVOList.size()); + return new Pair<>(configVOList, configVOList.size()); } - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } @Override @@ -2344,7 +2339,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final Pair, Integer> result = _configGroupDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } @Override @@ -2556,7 +2551,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe Collections.sort(addrs, Comparator.comparing(IPAddressVO::getAddress)); List wPagination = com.cloud.utils.StringUtils.applyPagination(addrs, cmd.getStartIndex(), cmd.getPageSizeVal()); if (wPagination != null) { - return new Pair, Integer>(wPagination, addrs.size()); + return new Pair<>(wPagination, addrs.size()); } return new Pair<>(addrs, addrs.size()); } @@ -2728,7 +2723,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final Pair, Integer> result = _guestOSCategoryDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } @Override @@ -2781,7 +2776,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final Pair, Integer> result = _guestOSHypervisorDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } @Override @@ -3147,7 +3142,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe public Pair getVncPort(final VirtualMachine vm) { if (vm.getHostId() == null) { logger.warn("VM " + vm.getHostName() + " does not have host, return -1 for its VNC port"); - return new Pair(null, -1); + return new Pair<>(null, -1); } if (logger.isTraceEnabled()) { @@ -3161,10 +3156,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe answer = (GetVncPortAnswer)_agentMgr.easySend(vm.getHostId(), new GetVncPortCommand(vm.getId(), vm.getInstanceName())); } if (answer != null && answer.getResult()) { - return new Pair(answer.getAddress(), answer.getPort()); + return new Pair<>(answer.getAddress(), answer.getPort()); } - return new Pair(null, -1); + return new Pair<>(null, -1); } @Override @@ -3202,21 +3197,19 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe sc.addAnd("archived", SearchCriteria.Op.EQ, false); final Pair, Integer> result = _alertDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } @Override public boolean archiveAlerts(final ArchiveAlertsCmd cmd) { final Long zoneId = _accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), null); - final boolean result = _alertDao.archiveAlert(cmd.getIds(), cmd.getType(), cmd.getStartDate(), cmd.getEndDate(), zoneId); - return result; + return _alertDao.archiveAlert(cmd.getIds(), cmd.getType(), cmd.getStartDate(), cmd.getEndDate(), zoneId); } @Override public boolean deleteAlerts(final DeleteAlertsCmd cmd) { final Long zoneId = _accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), null); - final boolean result = _alertDao.deleteAlert(cmd.getIds(), cmd.getType(), cmd.getStartDate(), cmd.getEndDate(), zoneId); - return result; + return _alertDao.deleteAlert(cmd.getIds(), cmd.getType(), cmd.getStartDate(), cmd.getEndDate(), zoneId); } Pair> getHostIdsForCapacityListing(Long zoneId, Long podId, Long clusterId, Integer capacityType, String tag) { @@ -3482,7 +3475,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Override public List> getCommands() { - final List> cmdList = new ArrayList>(); + final List> cmdList = new ArrayList<>(); cmdList.add(CreateAccountCmd.class); cmdList.add(DeleteAccountCmd.class); cmdList.add(DisableAccountCmd.class); @@ -4251,7 +4244,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final Pair, Integer> result = _vmInstanceDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } @Override @@ -4417,7 +4410,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe logger.warn("Exception whilst creating a signature:" + e); } - final ArrayList cloudParams = new ArrayList(); + final ArrayList cloudParams = new ArrayList<>(); cloudParams.add(cloudIdentifier); cloudParams.add(signature); @@ -4426,7 +4419,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Override public Map listCapabilities(final ListCapabilitiesCmd cmd) { - final Map capabilities = new HashMap(); + final Map capabilities = new HashMap<>(); final Account caller = getCaller(); boolean securityGroupsEnabled = false; @@ -4598,7 +4591,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Override public List getHypervisors(final Long zoneId) { - final List result = new ArrayList(); + final List result = new ArrayList<>(); final String hypers = _configDao.getValue(Config.HypervisorList.key()); final String[] hypervisors = hypers.split(","); @@ -4810,9 +4803,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe final String keyword = cmd.getKeyword(); final Account caller = getCaller(); - final List permittedAccounts = new ArrayList(); + final List permittedAccounts = new ArrayList<>(); - final Ternary domainIdRecursiveListProject = new Ternary(cmd.getDomainId(), cmd.isRecursive(), null); + final Ternary domainIdRecursiveListProject = new Ternary<>(cmd.getDomainId(), cmd.isRecursive(), null); _accountMgr.buildACLSearchParameters(caller, null, cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, domainIdRecursiveListProject, cmd.listAll(), false); final Long domainId = domainIdRecursiveListProject.first(); final Boolean isRecursive = domainIdRecursiveListProject.second(); @@ -5155,7 +5148,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final Pair, Integer> result = _hypervisorCapabilitiesDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } @Override @@ -5302,7 +5295,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Override public List listDeploymentPlanners() { - final List plannersAvailable = new ArrayList(); + final List plannersAvailable = new ArrayList<>(); for (final DeploymentPlanner planner : _planners) { plannersAvailable.add(planner.getName()); } diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index 2c8b79500ba..86b8b0c682c 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -1670,7 +1670,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } List stores = _dataStoreMgr.listImageStores(); - ConcurrentHashMap storageStats = new ConcurrentHashMap(); + ConcurrentHashMap storageStats = new ConcurrentHashMap<>(); for (DataStore store : stores) { if (store.getUri() == null) { continue; @@ -1690,7 +1690,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc logger.trace("HostId: " + storeId + " Used: " + toHumanReadableSize(((StorageStats)answer).getByteUsed()) + " Total Available: " + toHumanReadableSize(((StorageStats)answer).getCapacityBytes())); } } - _storageStats = storageStats; + updateStorageStats(storageStats); ConcurrentHashMap storagePoolStats = new ConcurrentHashMap(); List storagePools = _storagePoolDao.listAll(); @@ -1740,6 +1740,19 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc logger.error("Error trying to retrieve storage stats", t); } } + + private void updateStorageStats(ConcurrentHashMap storageStats) { + for (Long storeId : storageStats.keySet()) { + if (_storageStats.containsKey(storeId) + && (_storageStats.get(storeId).getCapacityBytes() == 0l + || _storageStats.get(storeId).getCapacityBytes() != storageStats.get(storeId).getCapacityBytes())) { + // get add to DB rigorously + _storageManager.updateImageStoreStatus(storeId, null, null, storageStats.get(storeId).getCapacityBytes()); + } + } + // if in _storageStats and not in storageStats it gets discarded + _storageStats = storageStats; + } } class AutoScaleMonitor extends ManagedContextRunnable { diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index f3f0c5dc7e4..a25dadacf44 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -62,6 +62,7 @@ import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateObjectStoragePoolCmd; +import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.heuristics.CreateSecondaryStorageSelectorCmd; import org.apache.cloudstack.api.command.admin.storage.heuristics.RemoveSecondaryStorageSelectorCmd; @@ -138,7 +139,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.time.DateUtils; import org.apache.commons.lang3.EnumUtils; -import org.apache.commons.lang3.StringUtils; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -234,6 +234,7 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.UriUtils; +import com.cloud.utils.StringUtils; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; @@ -407,7 +408,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C int _downloadUrlExpirationInterval; private long _serverId; - private final Map hostListeners = new HashMap(); + private final Map hostListeners = new HashMap<>(); public boolean share(VMInstanceVO vm, List vols, HostVO host, boolean cancelPreviousShare) throws StorageUnavailableException { @@ -465,7 +466,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override public List ListByDataCenterHypervisor(long datacenterId, HypervisorType type) { List pools = _storagePoolDao.listByDataCenterId(datacenterId); - List retPools = new ArrayList(); + List retPools = new ArrayList<>(); for (StoragePoolVO pool : pools) { if (pool.getStatus() != StoragePoolStatus.Up) { continue; @@ -571,7 +572,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver) storeDriver; - HashMap statEntry = new HashMap(); + HashMap statEntry = new HashMap<>(); GetVolumeStatsCommand getVolumeStatsCommand = (GetVolumeStatsCommand) cmd; for (String volumeUuid : getVolumeStatsCommand.getVolumeUuids()) { Pair volumeStats = primaryStoreDriver.getVolumeStats(pool, volumeUuid); @@ -793,7 +794,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C DataStoreProvider provider = _dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider(); DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); if (pool == null) { - Map params = new HashMap(); + Map params = new HashMap<>(); String name = pInfo.getName() != null ? pInfo.getName() : createLocalStoragePoolName(host, pInfo); params.put("zoneId", host.getDataCenterId()); params.put("clusterId", host.getClusterId()); @@ -909,7 +910,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); } - Map params = new HashMap(); + Map params = new HashMap<>(); params.put("zoneId", zone.getId()); params.put("clusterId", clusterId); params.put("podId", podId); @@ -1031,7 +1032,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } private Map extractApiParamAsMap(Map ds) { - Map details = new HashMap(); + Map details = new HashMap<>(); if (ds != null) { Collection detailsCollection = ds.values(); Iterator it = detailsCollection.iterator(); @@ -1442,13 +1443,13 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } for (Long hostId : hostIds) { try { - List answers = new ArrayList(); + List answers = new ArrayList<>(); Command[] cmdArray = cmds.toCommands(); for (Command cmd : cmdArray) { long targetHostId = _hvGuruMgr.getGuruProcessedCommandTargetHost(hostId, cmd); answers.add(_agentMgr.send(targetHostId, cmd)); } - return new Pair(hostId, answers.toArray(new Answer[answers.size()])); + return new Pair<>(hostId, answers.toArray(new Answer[answers.size()])); } catch (AgentUnavailableException e) { logger.debug("Unable to send storage pool command to " + pool + " via " + hostId, e); } catch (OperationTimedoutException e) { @@ -1463,7 +1464,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C public Pair sendToPool(StoragePool pool, long[] hostIdsToTryFirst, List hostIdsToAvoid, Command cmd) throws StorageUnavailableException { Commands cmds = new Commands(cmd); Pair result = sendToPool(pool, hostIdsToTryFirst, hostIdsToAvoid, cmds); - return new Pair(result.first(), result.second()[0]); + return new Pair<>(result.first(), result.second()[0]); } private void cleanupInactiveTemplates() { @@ -1785,7 +1786,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @DB List findAllVolumeIdInSnapshotTable(Long storeId) { String sql = "SELECT volume_id from snapshots, snapshot_store_ref WHERE snapshots.id = snapshot_store_ref.snapshot_id and store_id=? GROUP BY volume_id"; - List list = new ArrayList(); + List list = new ArrayList<>(); try { TransactionLegacy txn = TransactionLegacy.currentTxn(); ResultSet rs = null; @@ -1813,7 +1814,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C pstmt = txn.prepareAutoCloseStatement(sql); pstmt.setLong(1, volumeId); rs = pstmt.executeQuery(); - List list = new ArrayList(); + List list = new ArrayList<>(); while (rs.next()) { list.add(rs.getString(1)); } @@ -2062,7 +2063,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } if (!answer.getResult()) { - throw new CloudRuntimeException(String.format("Unable to process ModifyStoragePoolCommand for pool %s on the host %s due to ", pool.getUuid(), hostId, answer.getDetails())); + throw new CloudRuntimeException(String.format("Unable to process ModifyStoragePoolCommand for pool %s on the host %s due to %s", pool.getUuid(), hostId, answer.getDetails())); } assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + @@ -2297,7 +2298,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // Prepare for the syncvolumepath command DataTO volTO = volFactory.getVolume(volume.getId()).getTO(); DiskTO disk = new DiskTO(volTO, volume.getDeviceId(), volume.getPath(), volume.getVolumeType()); - Map details = new HashMap(); + Map details = new HashMap<>(); details.put(DiskTO.PROTOCOL_TYPE, Storage.StoragePoolType.DatastoreCluster.toString()); disk.setDetails(details); @@ -2416,7 +2417,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId); } - List hosts = new ArrayList(); + List hosts = new ArrayList<>(); if (hostId != null) { hosts.add(hostId); } else { @@ -2571,11 +2572,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); - if (storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canHostAccessStoragePool(host, pool)) { - return true; - } - - return false; + return (storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canHostAccessStoragePool(host, pool)); } @Override @@ -2894,7 +2891,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (CollectionUtils.isEmpty(volumes)) { return false; } - List> answers = new ArrayList>(); + List> answers = new ArrayList<>(); for (Pair volumeDiskProfilePair : volumes) { Volume volume = volumeDiskProfilePair.first(); @@ -3232,7 +3229,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // Check if it's the only/first store in the zone if (stores.size() == 0) { List hypervisorTypes = _clusterDao.getAvailableHypervisorInZone(zoneId); - Set hypSet = new HashSet(hypervisorTypes); + Set hypSet = new HashSet<>(hypervisorTypes); TransactionLegacy txn = TransactionLegacy.open("AutomaticTemplateRegister"); SystemVmTemplateRegistration systemVmTemplateRegistration = new SystemVmTemplateRegistration(); String filePath = null; @@ -3292,7 +3289,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C public ImageStore migrateToObjectStore(String name, String url, String providerName, Map details) throws DiscoveryException, InvalidParameterValueException { // check if current cloud is ready to migrate, we only support cloud with only NFS secondary storages List imgStores = _imageStoreDao.listImageStores(); - List nfsStores = new ArrayList(); + List nfsStores = new ArrayList<>(); if (imgStores != null && imgStores.size() > 0) { for (ImageStoreVO store : imgStores) { if (!store.getProviderName().equals(DataStoreProvider.NFS_IMAGE)) { @@ -3322,20 +3319,38 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return discoverImageStore(name, url, providerName, null, details); } + @Override + public ImageStore updateImageStore(UpdateImageStoreCmd cmd) { + return updateImageStoreStatus(cmd.getId(), cmd.getName(), cmd.getReadonly(), cmd.getCapacityBytes()); + } + @Override @ActionEvent(eventType = EventTypes.EVENT_UPDATE_IMAGE_STORE_ACCESS_STATE, eventDescription = "image store access updated") - public ImageStore updateImageStoreStatus(Long id, Boolean readonly) { + public ImageStore updateImageStoreStatus(Long id, String name, Boolean readonly, Long capacityBytes) { // Input validation ImageStoreVO imageStoreVO = _imageStoreDao.findById(id); if (imageStoreVO == null) { throw new IllegalArgumentException("Unable to find image store with ID: " + id); } - imageStoreVO.setReadonly(readonly); + if (com.cloud.utils.StringUtils.isNotBlank(name)) { + imageStoreVO.setName(name); + } + if (capacityBytes != null) { + imageStoreVO.setTotalSize(capacityBytes); + } + if (readonly != null) { + imageStoreVO.setReadonly(readonly); + } _imageStoreDao.update(id, imageStoreVO); return imageStoreVO; } + @Override + public ImageStore updateImageStoreStatus(Long id, Boolean readonly) { + return updateImageStoreStatus(id, null, readonly, null); + } + /** * @param poolId - Storage pool id for pool to update. * @param failOnChecks - If true, throw an error if pool type and state checks fail. @@ -3380,7 +3395,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } // find the host - List poolIds = new ArrayList(); + List poolIds = new ArrayList<>(); poolIds.add(pool.getId()); List hosts = _storagePoolHostDao.findHostsConnectedToPools(poolIds); if (hosts.size() > 0) { @@ -3417,7 +3432,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C VMTemplateZoneVO tmpltZone; List allTemplates = _vmTemplateDao.listAll(); - List dcIds = new ArrayList(); + List dcIds = new ArrayList<>(); if (zoneId != null) { dcIds.add(zoneId); } else { @@ -3534,7 +3549,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C throw ex; } - Map params = new HashMap(); + Map params = new HashMap<>(); params.put("zoneId", dcId); params.put("url", cmd.getUrl()); params.put("name", cmd.getUrl()); @@ -3622,8 +3637,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // Cleanup expired volume URLs List volumesOnImageStoreList = _volumeStoreDao.listVolumeDownloadUrls(); - HashSet expiredVolumeIds = new HashSet(); - HashSet activeVolumeIds = new HashSet(); + HashSet expiredVolumeIds = new HashSet<>(); + HashSet activeVolumeIds = new HashSet<>(); for (VolumeDataStoreVO volumeOnImageStore : volumesOnImageStoreList) { long volumeId = volumeOnImageStore.getVolumeId(); diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 7dfc3737967..c950322452b 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -1986,7 +1986,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } private Volume changeDiskOfferingForVolumeInternal(VolumeVO volume, Long newDiskOfferingId, Long newSize, Long newMinIops, Long newMaxIops, boolean autoMigrateVolume, boolean shrinkOk) throws ResourceAllocationException { - DiskOfferingVO existingDiskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); + long existingDiskOfferingId = volume.getDiskOfferingId(); + DiskOfferingVO existingDiskOffering = _diskOfferingDao.findByIdIncludingRemoved(existingDiskOfferingId); DiskOfferingVO newDiskOffering = _diskOfferingDao.findById(newDiskOfferingId); Integer newHypervisorSnapshotReserve = null; @@ -1998,6 +1999,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic Long[] updateNewMinIops = {newMinIops}; Long[] updateNewMaxIops = {newMaxIops}; Integer[] updateNewHypervisorSnapshotReserve = {newHypervisorSnapshotReserve}; + volService.validateChangeDiskOfferingEncryptionType(existingDiskOfferingId, newDiskOfferingId); validateVolumeResizeWithNewDiskOfferingAndLoad(volume, existingDiskOffering, newDiskOffering, updateNewSize, updateNewMinIops, updateNewMaxIops, updateNewHypervisorSnapshotReserve); newSize = updateNewSize[0]; newMinIops = updateNewMinIops[0]; diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index ee73818638c..bdec6b95115 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -2137,12 +2137,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Unable to Scale VM, since disk offering id associated with the old service offering is not same for new service offering"); } - DiskOfferingVO currentRootDiskOffering = _diskOfferingDao.findByIdIncludingRemoved(currentServiceOffering.getDiskOfferingId()); - DiskOfferingVO newRootDiskOffering = _diskOfferingDao.findById(newServiceOffering.getDiskOfferingId()); - - if (currentRootDiskOffering.getEncrypt() != newRootDiskOffering.getEncrypt()) { - throw new InvalidParameterValueException("Cannot change volume encryption type via service offering change"); - } + _volService.validateChangeDiskOfferingEncryptionType(currentServiceOffering.getDiskOfferingId(), newServiceOffering.getDiskOfferingId()); } private void changeDiskOfferingForRootVolume(Long vmId, DiskOfferingVO newDiskOffering, Map customParameters, Long zoneId) throws ResourceAllocationException { diff --git a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java index 323a1ed9416..d117c460034 100644 --- a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java +++ b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java @@ -691,34 +691,6 @@ public class UserVmManagerImplTest { prepareAndRunResizeVolumeTest(2L, 10L, 20L, largerDisdkOffering, smallerDisdkOffering); } - @Test - public void validateDiskOfferingCheckForEncryption1Test() { - ServiceOfferingVO currentOffering = prepareOfferingsForEncryptionValidation(1L, true); - ServiceOfferingVO newOffering = prepareOfferingsForEncryptionValidation(2L, true); - userVmManagerImpl.validateDiskOfferingChecks(currentOffering, newOffering); - } - - @Test - public void validateDiskOfferingCheckForEncryption2Test() { - ServiceOfferingVO currentOffering = prepareOfferingsForEncryptionValidation(1L, false); - ServiceOfferingVO newOffering = prepareOfferingsForEncryptionValidation(2L, false); - userVmManagerImpl.validateDiskOfferingChecks(currentOffering, newOffering); - } - - @Test (expected = InvalidParameterValueException.class) - public void validateDiskOfferingCheckForEncryptionFail1Test() { - ServiceOfferingVO currentOffering = prepareOfferingsForEncryptionValidation(1L, false); - ServiceOfferingVO newOffering = prepareOfferingsForEncryptionValidation(2L, true); - userVmManagerImpl.validateDiskOfferingChecks(currentOffering, newOffering); - } - - @Test (expected = InvalidParameterValueException.class) - public void validateDiskOfferingCheckForEncryptionFail2Test() { - ServiceOfferingVO currentOffering = prepareOfferingsForEncryptionValidation(1L, true); - ServiceOfferingVO newOffering = prepareOfferingsForEncryptionValidation(2L, false); - userVmManagerImpl.validateDiskOfferingChecks(currentOffering, newOffering); - } - private void prepareAndRunResizeVolumeTest(Long expectedOfferingId, long expectedMinIops, long expectedMaxIops, DiskOfferingVO currentRootDiskOffering, DiskOfferingVO newRootDiskOffering) { long rootVolumeId = 1l; VolumeVO rootVolumeOfVm = Mockito.mock(VolumeVO.class); @@ -742,20 +714,6 @@ public class UserVmManagerImplTest { return newRootDiskOffering; } - private ServiceOfferingVO prepareOfferingsForEncryptionValidation(long diskOfferingId, boolean encryption) { - ServiceOfferingVO svcOffering = Mockito.mock(ServiceOfferingVO.class); - DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class); - - Mockito.when(svcOffering.getDiskOfferingId()).thenReturn(diskOfferingId); - Mockito.when(diskOffering.getEncrypt()).thenReturn(encryption); - - // Be aware - Multiple calls with the same disk offering ID could conflict - Mockito.when(diskOfferingDao.findByIdIncludingRemoved(diskOfferingId)).thenReturn(diskOffering); - Mockito.when(diskOfferingDao.findById(diskOfferingId)).thenReturn(diskOffering); - - return svcOffering; - } - @Test (expected = CloudRuntimeException.class) public void testUserDataDenyOverride() { Long userDataId = 1L; diff --git a/server/src/test/java/com/cloud/vpc/MockVpcVirtualNetworkApplianceManager.java b/server/src/test/java/com/cloud/vpc/MockVpcVirtualNetworkApplianceManager.java index 3949fa8e6ca..27aff3a06a2 100644 --- a/server/src/test/java/com/cloud/vpc/MockVpcVirtualNetworkApplianceManager.java +++ b/server/src/test/java/com/cloud/vpc/MockVpcVirtualNetworkApplianceManager.java @@ -204,6 +204,20 @@ public class MockVpcVirtualNetworkApplianceManager extends ManagerBase implement return false; } + @Override + public boolean stopKeepAlivedOnRouter(VirtualRouter router, + Network network) throws ConcurrentOperationException, ResourceUnavailableException { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean startKeepAlivedOnRouter(VirtualRouter router, + Network network) throws ConcurrentOperationException, ResourceUnavailableException { + // TODO Auto-generated method stub + return false; + } + /* (non-Javadoc) * @see com.cloud.network.router.VpcVirtualNetworkApplianceManager#destroyPrivateGateway(com.cloud.network.vpc.PrivateGateway, com.cloud.network.router.VirtualRouter) */ diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java index e8158c71f85..e8f6e660451 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java @@ -529,8 +529,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar /** * Get the default network for the secondary storage VM, based on the zone it is in. Delegates to - * either {@link #getDefaultNetworkForZone(DataCenter)} or {@link #getDefaultNetworkForAdvancedSGZone(DataCenter)}, - * depending on the zone network type and whether or not security groups are enabled in the zone. + * either {@link #getDefaultNetworkForAdvancedZone(DataCenter)} or {@link #getDefaultNetworkForBasicZone(DataCenter)}, + * depending on the zone network type and whether security groups are enabled in the zone. * @param dc - The zone (DataCenter) of the secondary storage VM. * @return The default network for use with the secondary storage VM. */ diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/IpTablesHelper.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/IpTablesHelper.java new file mode 100644 index 00000000000..a130f11a82a --- /dev/null +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/IpTablesHelper.java @@ -0,0 +1,67 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.storage.resource; + +import com.cloud.utils.script.Script; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +public class IpTablesHelper { + public static final Logger LOGGER = LogManager.getLogger(IpTablesHelper.class); + + public static final String OUTPUT_CHAIN = "OUTPUT"; + public static final String INPUT_CHAIN = "INPUT"; + public static final String INSERT = " -I "; + public static final String APPEND = " -A "; + + public static boolean needsAdding(String chain, String rule) { + Script command = new Script("/bin/bash", LOGGER); + command.add("-c"); + command.add("iptables -C " + chain + " " + rule); + + String commandOutput = command.execute(); + boolean needsAdding = (commandOutput != null && commandOutput.contains("iptables: Bad rule (does a matching rule exist in that chain?).")); + LOGGER.debug(String.format("Rule [%s], %s need adding to [%s] : %s", + rule, + needsAdding ? "does indeed" : "doesn't", + chain, + commandOutput + )); + return needsAdding; + } + + public static String addConditionally(String chain, boolean insert, String rule, String errMsg) { + LOGGER.info(String.format("Adding rule [%s] to [%s] if required.", rule, chain)); + if (needsAdding(chain, rule)) { + Script command = new Script("/bin/bash", LOGGER); + command.add("-c"); + command.add("iptables" + (insert ? INSERT : APPEND) + chain + " " + rule); + String result = command.execute(); + LOGGER.debug(String.format("Executed [%s] with result [%s]", command, result)); + if (result != null) { + LOGGER.warn(String.format("%s , err = %s", errMsg, result)); + return errMsg + result; + } + } else { + LOGGER.warn("Rule already defined in SVM: " + rule); + } + return null; + } +} diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index 6f2d4d23cf1..26b9e3ea65e 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -247,11 +247,11 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S private String _storageNetmask; private String _storageGateway; private String _nfsVersion; - private final List nfsIps = new ArrayList(); + private final List nfsIps = new ArrayList<>(); protected String _parent = "/mnt/SecStorage"; final private String _tmpltpp = "template.properties"; protected String createTemplateFromSnapshotXenScript; - private HashMap uploadEntityStateMap = new HashMap(); + private HashMap uploadEntityStateMap = new HashMap<>(); private String _ssvmPSK = null; private long processTimeout; @@ -2330,15 +2330,14 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S if (!_inSystemVM) { return null; } - Script command = new Script("/bin/bash", logger); String intf = "eth1"; - command.add("-c"); - command.add("iptables -I OUTPUT -o " + intf + " -d " + destCidr + " -p tcp -m state --state NEW -m tcp -j ACCEPT"); + String rule = String.format("-o %s -d %s -p tcp -m state --state NEW -m tcp -j ACCEPT", intf, destCidr); + String errMsg = String.format("Error in allowing outgoing to %s", destCidr); - String result = command.execute(); + logger.info("Adding rule if required: {}", rule); + String result = IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN, true, rule, errMsg); if (result != null) { - logger.warn("Error in allowing outgoing to " + destCidr + ", err=" + result); - return "Error in allowing outgoing to " + destCidr + ", err=" + result; + return result; } addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, destCidr); @@ -2875,13 +2874,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S if (result != null) { logger.warn("Error in starting sshd service err=" + result); } - command = new Script("/bin/bash", logger); - command.add("-c"); - command.add("iptables -I INPUT -i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT"); - result = command.execute(); - if (result != null) { - logger.warn("Error in opening up ssh port err=" + result); - } + String rule = "-i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT"; + IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN, true, rule, "Error in opening up ssh port"); } private void addRouteToInternalIpOrCidr(String localgw, String eth1ip, String eth1mask, String destIpOrCidr) { diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java index fd5c9e43ac6..ad73a9bc708 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java @@ -48,6 +48,7 @@ import org.apache.cloudstack.storage.command.DownloadCommand; import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; import org.apache.cloudstack.storage.command.DownloadProgressCommand; import org.apache.cloudstack.storage.command.DownloadProgressCommand.RequestType; +import org.apache.cloudstack.storage.resource.IpTablesHelper; import org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource; import org.apache.cloudstack.storage.resource.SecondaryStorageResource; import org.apache.cloudstack.utils.security.ChecksumValue; @@ -1226,17 +1227,14 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager } private void blockOutgoingOnPrivate() { - Script command = new Script("/bin/bash", logger); - String intf = "eth1"; - command.add("-c"); - command.add("iptables -A OUTPUT -o " + intf + " -p tcp -m state --state NEW -m tcp --dport " + "80" + " -j REJECT;" + "iptables -A OUTPUT -o " + intf + - " -p tcp -m state --state NEW -m tcp --dport " + "443" + " -j REJECT;"); - - String result = command.execute(); - if (result != null) { - logger.warn("Error in blocking outgoing to port 80/443 err=" + result); - return; - } + IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN + , false + , "-o " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE + " -p tcp -m state --state NEW -m tcp --dport 80 -j REJECT;" + , "Error in blocking outgoing to port 80"); + IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN + , false + , "-o " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE + " -p tcp -m state --state NEW -m tcp --dport 443 -j REJECT;" + , "Error in blocking outgoing to port 443"); } @Override @@ -1262,17 +1260,19 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager if (result != null) { logger.warn("Error in stopping httpd service err=" + result); } - String port = Integer.toString(TemplateConstants.DEFAULT_TMPLT_COPY_PORT); - String intf = TemplateConstants.DEFAULT_TMPLT_COPY_INTF; - command = new Script("/bin/bash", logger); - command.add("-c"); - command.add("iptables -I INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + port + " -j ACCEPT;" + "iptables -I INPUT -i " + intf + - " -p tcp -m state --state NEW -m tcp --dport " + "443" + " -j ACCEPT;"); - - result = command.execute(); + result = IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN + , true + , "-i " + TemplateConstants.DEFAULT_TMPLT_COPY_INTF + " -p tcp -m state --state NEW -m tcp --dport " + TemplateConstants.DEFAULT_TMPLT_COPY_PORT + " -j ACCEPT" + , "Error in opening up apache2 port " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE); + if (result != null) { + return; + } + result = IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN + , true + , "-i " + TemplateConstants.DEFAULT_TMPLT_COPY_INTF + " -p tcp -m state --state NEW -m tcp --dport 443 -j ACCEPT;" + , "Error in opening up apache2 port 443"); if (result != null) { - logger.warn("Error in opening up apache2 port err=" + result); return; } diff --git a/systemvm/debian/opt/cloud/bin/cs/CsHelper.py b/systemvm/debian/opt/cloud/bin/cs/CsHelper.py index 635a9d69882..c71bcdac4f4 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsHelper.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsHelper.py @@ -221,7 +221,7 @@ def save_iptables(command, iptables_file): def execute2(command, wait=True): """ Execute command """ - logging.info("Executing: %s" % command) + logging.info("Executing2: %s" % command) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) if wait: p.wait() diff --git a/systemvm/debian/opt/cloud/bin/manage_service.sh b/systemvm/debian/opt/cloud/bin/manage_service.sh new file mode 100755 index 00000000000..4d9f6621ed0 --- /dev/null +++ b/systemvm/debian/opt/cloud/bin/manage_service.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +systemctl $1 $2 diff --git a/test/integration/plugins/quota/test_quota_balance.py b/test/integration/plugins/quota/test_quota_balance.py new file mode 100644 index 00000000000..f5c1c75d7b2 --- /dev/null +++ b/test/integration/plugins/quota/test_quota_balance.py @@ -0,0 +1,191 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +Test cases for validating the Quota balance of accounts +""" + +from marvin.cloudstackTestCase import * +from marvin.lib.utils import * +from marvin.lib.base import * +from marvin.lib.common import * +from nose.plugins.attrib import attr + + +class TestQuotaBalance(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestQuotaBalance, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__ + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) + cls.zone + + # Create Account + cls.account = Account.create( + cls.apiclient, + cls.services["account"], + domainid=cls.domain.id + ) + cls._cleanup = [ + cls.account, + ] + + cls.services["account"] = cls.account.name + + if not is_config_suitable(apiclient=cls.apiclient, name='quota.enable.service', value='true'): + cls.debug("Quota service is not enabled, therefore the configuration `quota.enable.service` will be set to `true` and the management server will be restarted.") + Configurations.update(cls.apiclient, "quota.enable.service", "true") + cls.restartServer() + + return + + @classmethod + def restartServer(cls): + """Restart management server""" + + cls.debug("Restarting management server") + sshClient = SshClient( + cls.mgtSvrDetails["mgtSvrIp"], + 22, + cls.mgtSvrDetails["user"], + cls.mgtSvrDetails["passwd"] + ) + + command = "service cloudstack-management restart" + sshClient.execute(command) + + # Waits for management to come up in 5 mins, when it's up it will continue + timeout = time.time() + 300 + while time.time() < timeout: + if cls.isManagementUp() is True: + time.sleep(30) + return + time.sleep(5) + return cls.fail("Management server did not come up, failing") + + @classmethod + def isManagementUp(cls): + try: + cls.apiclient.listInfrastructure(listInfrastructure.listInfrastructureCmd()) + return True + except Exception: + return False + + @classmethod + def tearDownClass(cls): + try: + # Cleanup resources used + cleanup_resources(cls.apiclient, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + self.tariffs = [] + return + + def tearDown(self): + try: + cleanup_resources(self.apiclient, self.cleanup) + self.delete_tariffs() + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def delete_tariffs(self): + for tariff in self.tariffs: + cmd = quotaTariffDelete.quotaTariffDeleteCmd() + cmd.id = tariff.uuid + self.apiclient.quotaTariffDelete(cmd) + + @attr(tags=["advanced", "smoke", "quota"], required_hardware="false") + def test_quota_balance(self): + """ + Test Quota balance + + Validate the following + 1. Add credits to an account + 2. Create Quota tariff for the usage type 21 (VM_DISK_IO_READ) + 3. Simulate quota usage by inserting a row in the `cloud_usage` table + 4. Update the balance of the account by calling the API quotaUpdate + 5. Verify the balance of the account according to the tariff created + """ + + # Create quota tariff for the usage type 21 (VM_DISK_IO_READ) + cmd = quotaTariffCreate.quotaTariffCreateCmd() + cmd.name = 'Tariff' + cmd.value = '10' + cmd.usagetype = '21' + self.tariffs.append(self.apiclient.quotaTariffCreate(cmd)) + + # Add credits to the account + cmd = quotaCredits.quotaCreditsCmd() + cmd.account = self.account.name + cmd.domainid = self.domain.id + cmd.value = 100 + self.apiclient.quotaCredits(cmd) + + # Fetch account ID from account_uuid + account_id_select = f"SELECT id FROM account WHERE uuid = '{self.account.id}';" + self.debug(account_id_select) + qresultset = self.dbclient.execute(account_id_select) + account_id = qresultset[0][0] + + # Fetch domain ID from domain_uuid + domain_id_select = f"SELECT id FROM `domain` d WHERE uuid = '{self.domain.id}';" + self.debug(domain_id_select) + qresultset = self.dbclient.execute(domain_id_select) + domain_id = qresultset[0][0] + + # Fetch zone ID from zone_uuid + zone_id_select = f"SELECT id from data_center dc where dc.uuid = '{self.zone.id}';" + self.debug(zone_id_select) + qresultset = self.dbclient.execute(zone_id_select) + zone_id = qresultset[0][0] + + start_date = datetime.datetime.now() + datetime.timedelta(seconds=1) + end_date = datetime.datetime.now() + datetime.timedelta(hours=1) + + # Manually insert a usage regarding the usage type 21 (VM_DISK_IO_READ) + sql_query = (f"INSERT INTO cloud_usage.cloud_usage (zone_id,account_id,domain_id,description,usage_display,usage_type,raw_usage,vm_instance_id,vm_name,offering_id,template_id," + f"usage_id,`type`,`size`,network_id,start_date,end_date,virtual_size,cpu_speed,cpu_cores,memory,quota_calculated,is_hidden,state)" + f" VALUES ('{zone_id}','{account_id}','{domain_id}','Test','1 Hrs',21,1,NULL,NULL,NULL,NULL,NULL,'VirtualMachine',NULL,NULL,'{start_date}','{end_date}',NULL,NULL,NULL,NULL,0,0,NULL);") + self.debug(sql_query) + self.dbclient.execute(sql_query) + + # Update quota to calculate the balance of the account + cmd = quotaUpdate.quotaUpdateCmd() + self.apiclient.quotaUpdate(cmd) + + # Retrieve the quota balance of the account + cmd = quotaBalance.quotaBalanceCmd() + cmd.domainid = self.account.domainid + cmd.account = self.account.name + response = self.apiclient.quotaBalance(cmd) + + self.debug(f"The quota balance for the account {self.account.name} is {response.balance}.") + self.assertEqual(response.balance.startquota, 90, f"The `startQuota` response field is supposed to be 90 but was {response.balance.startquota}.") + + return diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 26e8358779b..16f30574f04 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -1136,6 +1136,7 @@ "label.ipv6.subnets": "IPv6 Subnets", "label.ip.addresses": "IP Addresses", "label.iqn": "Target IQN", +"label.is.base64.encoded": "Base64 encoded", "label.is.in.progress": "is in progress", "label.is.shared": "Is shared", "label.is2faenabled": "Is 2FA enabled", diff --git a/ui/src/components/view/ResourceLimitTab.vue b/ui/src/components/view/ResourceLimitTab.vue index eba6b79cb7d..a54adf83eae 100644 --- a/ui/src/components/view/ResourceLimitTab.vue +++ b/ui/src/components/view/ResourceLimitTab.vue @@ -142,6 +142,7 @@ export default { subItem.key = subItem.tag ? (subItem.resourcetype + '-' + subItem.tag) : subItem.resourcetype form[subItem.key] = subItem.max || -1 }) + form[item.resourcetype] = item.max == null ? -1 : item.max }) this.form = form this.formRef.value.resetFields() diff --git a/ui/src/config/section/config.js b/ui/src/config/section/config.js index aa108b5b1fa..8f792e51ac9 100644 --- a/ui/src/config/section/config.js +++ b/ui/src/config/section/config.js @@ -101,7 +101,25 @@ export default { label: 'label.edit', dataView: true, popup: true, - args: ['description', 'clientid', 'redirecturi', 'secretkey', 'enabled'] + args: ['description', 'clientid', 'redirecturi', 'secretkey'] + }, + { + api: 'updateOauthProvider', + icon: 'play-circle-outlined', + label: 'label.enable.provider', + message: 'message.confirm.enable.provider', + dataView: true, + defaultArgs: { enabled: true }, + show: (record) => { return record.enabled === false } + }, + { + api: 'updateOauthProvider', + icon: 'pause-circle-outlined', + label: 'label.disable.provider', + message: 'message.confirm.disable.provider', + dataView: true, + defaultArgs: { enabled: false }, + show: (record) => { return record.enabled === true } }, { api: 'deleteOauthProvider', diff --git a/ui/src/config/section/infra/secondaryStorages.js b/ui/src/config/section/infra/secondaryStorages.js index 53fa546d934..3fc64c5c957 100644 --- a/ui/src/config/section/infra/secondaryStorages.js +++ b/ui/src/config/section/infra/secondaryStorages.js @@ -97,21 +97,10 @@ export default { }, { api: 'updateImageStore', - icon: 'stop-outlined', - label: 'label.action.image.store.read.only', - message: 'message.action.secondary.storage.read.only', + icon: 'edit-outlined', + label: 'label.edit', dataView: true, - defaultArgs: { readonly: true }, - show: (record) => { return record.readonly === false } - }, - { - api: 'updateImageStore', - icon: 'check-circle-outlined', - label: 'label.action.image.store.read.write', - message: 'message.action.secondary.storage.read.write', - dataView: true, - defaultArgs: { readonly: false }, - show: (record) => { return record.readonly === true } + args: ['name', 'readonly', 'capacitybytes'] }, { api: 'deleteImageStore', diff --git a/ui/src/utils/plugins.js b/ui/src/utils/plugins.js index b30b70c1a2e..d93c8796659 100644 --- a/ui/src/utils/plugins.js +++ b/ui/src/utils/plugins.js @@ -492,6 +492,15 @@ export const fileSizeUtilPlugin = { } } +function isBase64 (str) { + try { + const decoded = new TextDecoder().decode(Uint8Array.from(atob(str), c => c.charCodeAt(0))) + return btoa(decoded) === str + } catch (err) { + return false + } +} + export const genericUtilPlugin = { install (app) { app.config.globalProperties.$isValidUuid = function (uuid) { @@ -500,8 +509,7 @@ export const genericUtilPlugin = { } app.config.globalProperties.$toBase64AndURIEncoded = function (text) { - const base64regex = /^([0-9a-zA-Z+/]{4})*(([0-9a-zA-Z+/]{2}==)|([0-9a-zA-Z+/]{3}=))?$/ - if (base64regex.test(text)) { + if (isBase64(text)) { return text } return encodeURIComponent(btoa(unescape(encodeURIComponent(text)))) diff --git a/ui/src/views/AutogenView.vue b/ui/src/views/AutogenView.vue index 3ffc83286d7..33e0b41af90 100644 --- a/ui/src/views/AutogenView.vue +++ b/ui/src/views/AutogenView.vue @@ -705,7 +705,6 @@ export default { }, getOkProps () { if (this.selectedRowKeys.length > 0 && this.currentAction?.groupAction) { - return { props: { type: 'default' } } } else { return { props: { type: 'primary' } } } diff --git a/ui/src/views/compute/RegisterUserData.vue b/ui/src/views/compute/RegisterUserData.vue index 990e59ff277..8e6311fdf9a 100644 --- a/ui/src/views/compute/RegisterUserData.vue +++ b/ui/src/views/compute/RegisterUserData.vue @@ -43,6 +43,9 @@ v-model:value="form.userdata" :placeholder="apiParams.userdata.description"/> + + +