From 296035d9a57caa62260a666da726ec45b6801c4b Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Wed, 2 Nov 2022 09:36:16 +0100 Subject: [PATCH 1/7] XenServer/XCP-ng: fix vm memory usage is always 99.9x% (#6852) According to https://docs.citrix.com/en-us/citrix-hypervisor/monitor-performance.html The metrics "memory_internal_free" is already in KiB, no need to convert. "Memory used as reported by the guest agent (KiB). Enabled by default" --- .../cloud/hypervisor/xenserver/resource/CitrixResourceBase.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java index 7462761cd07..f062bac1a59 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java @@ -3526,7 +3526,7 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S } else if (param.matches("vbd_.*_write")) { vmStatsAnswer.setDiskWriteKBs(vmStatsAnswer.getDiskWriteKBs() + getDataAverage(dataNode, col, numRows) / BASE_TO_CONVERT_BYTES_INTO_KILOBYTES); } else if (param.contains("memory_internal_free")) { - vmStatsAnswer.setIntFreeMemoryKBs(vmStatsAnswer.getIntFreeMemoryKBs() + getDataAverage(dataNode, col, numRows) / BASE_TO_CONVERT_BYTES_INTO_KILOBYTES); + vmStatsAnswer.setIntFreeMemoryKBs(vmStatsAnswer.getIntFreeMemoryKBs() + getDataAverage(dataNode, col, numRows)); } else if (param.contains("memory_target")) { vmStatsAnswer.setTargetMemoryKBs(vmStatsAnswer.getTargetMemoryKBs() + getDataAverage(dataNode, col, numRows) / BASE_TO_CONVERT_BYTES_INTO_KILOBYTES); } else if (param.contains("memory")) { From fca5715db13bfb2ce713bf625c96bdb023f14c51 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Wed, 23 Nov 2022 19:03:52 +0530 Subject: [PATCH 2/7] ui: fix guest traffic vlan input (#6895) Fixes VLAN input while adding guest traffic type for the physical network in the zone wizard. When adding a zone physical network is not updated with the guest traffic vlan, resulting in network, vm deployments failures. Signed-off-by: Abhishek Kumar --- ui/src/views/infra/zone/AdvancedGuestTrafficForm.vue | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ui/src/views/infra/zone/AdvancedGuestTrafficForm.vue b/ui/src/views/infra/zone/AdvancedGuestTrafficForm.vue index 572be5448fd..515be333f98 100644 --- a/ui/src/views/infra/zone/AdvancedGuestTrafficForm.vue +++ b/ui/src/views/infra/zone/AdvancedGuestTrafficForm.vue @@ -103,7 +103,8 @@ export default { wrapperCol: { span: 12 } }, validStatus: '', - validMessage: '' + validMessage: '', + formModel: {} } }, watch: { From c8d27765d80186a8b51592b0c1030aab61d1909c Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Thu, 24 Nov 2022 15:08:20 +0530 Subject: [PATCH 3/7] orchestration: fix diskoffering for vr rootdisk (#6853) Fixes incorrect call of using service offering's ID while trying to retrieve linked disk offering. Signed-off-by: Abhishek Kumar Signed-off-by: Abhishek Kumar --- .../src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 64e4ad89154..db600d28dd4 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -524,7 +524,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Override public void allocate(final String vmInstanceName, final VirtualMachineTemplate template, final ServiceOffering serviceOffering, final LinkedHashMap> networks, final DeploymentPlan plan, final HypervisorType hyperType) throws InsufficientCapacityException { - DiskOffering diskOffering = _diskOfferingDao.findById(serviceOffering.getId()); + DiskOffering diskOffering = _diskOfferingDao.findById(serviceOffering.getDiskOfferingId()); allocate(vmInstanceName, template, serviceOffering, new DiskOfferingInfo(diskOffering), new ArrayList<>(), networks, plan, hyperType, null, null); } From 6c436ec90e2b89d33ce9da6f988c44a9c8df6467 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Fri, 25 Nov 2022 14:19:16 +0530 Subject: [PATCH 4/7] server: fix domain shared public template check (#6916) Fixes #6885 Fixes the incorrect inverted check. Signed-off-by: Abhishek Kumar Signed-off-by: Abhishek Kumar --- server/src/main/java/com/cloud/acl/DomainChecker.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/com/cloud/acl/DomainChecker.java b/server/src/main/java/com/cloud/acl/DomainChecker.java index bf7bd52ece7..a8c9ab84f7e 100644 --- a/server/src/main/java/com/cloud/acl/DomainChecker.java +++ b/server/src/main/java/com/cloud/acl/DomainChecker.java @@ -120,7 +120,7 @@ public class DomainChecker extends AdapterBase implements SecurityChecker { */ private void checkPublicTemplateAccess(VirtualMachineTemplate template, Account owner, Account caller){ - if (!QueryService.SharePublicTemplatesWithOtherDomains.valueIn(owner.getDomainId()) || + if (QueryService.SharePublicTemplatesWithOtherDomains.valueIn(owner.getDomainId()) || caller.getDomainId() == owner.getDomainId() || _domainDao.isChildDomain(owner.getDomainId(), caller.getDomainId())) { return; From dbc20320770efa5a62b67aacc226bb2ecb72105e Mon Sep 17 00:00:00 2001 From: Craig Squire Date: Wed, 30 Nov 2022 01:15:35 -0600 Subject: [PATCH 5/7] server: Check for null poolid (#6879) Extract retrieveDatastore method Add unit test for null poolId Fixes #6878 Co-authored-by: Craig Squire Co-authored-by: Stephan Krug --- .../com/cloud/tags/TaggedResourceManagerImpl.java | 11 ++++++++++- .../com/cloud/tags/TaggedResourceManagerImplTest.java | 7 +++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java b/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java index c855c9efa30..60ded224a21 100644 --- a/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java @@ -318,8 +318,10 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso private void informStoragePoolForVmTags(long vmId, String key, String value) { List volumeVos = volumeDao.findByInstance(vmId); for (VolumeVO volume : volumeVos) { - DataStore dataStore = dataStoreMgr.getDataStore(volume.getPoolId(), DataStoreRole.Primary); + Long poolId = volume.getPoolId(); + DataStore dataStore = retrieveDatastore(poolId); if (dataStore == null || !(dataStore.getDriver() instanceof PrimaryDataStoreDriver)) { + s_logger.info(String.format("No data store found for VM %d with pool ID %d.", vmId, poolId)); continue; } PrimaryDataStoreDriver dataStoreDriver = (PrimaryDataStoreDriver) dataStore.getDriver(); @@ -328,4 +330,11 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso } } } + + protected DataStore retrieveDatastore(Long poolId) { + if (poolId == null) { + return null; + } + return dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); + } } diff --git a/server/src/test/java/com/cloud/tags/TaggedResourceManagerImplTest.java b/server/src/test/java/com/cloud/tags/TaggedResourceManagerImplTest.java index 6d5b314263c..d25f2feccb5 100644 --- a/server/src/test/java/com/cloud/tags/TaggedResourceManagerImplTest.java +++ b/server/src/test/java/com/cloud/tags/TaggedResourceManagerImplTest.java @@ -25,6 +25,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; @@ -123,4 +124,10 @@ public class TaggedResourceManagerImplTest extends TestCase { Mockito.doThrow(PermissionDeniedException.class).when(accountManager).checkAccess(caller, null, false, owner); taggedResourceManagerImplSpy.checkTagsDeletePermission(List.of(resourceTag1, resourceTag2), caller); } + + @Test + public void testRetrieveDataStoreNullPoolId() { + DataStore dataStore = taggedResourceManagerImplSpy.retrieveDatastore(null); + Assert.assertNull(dataStore); + } } From 47946db8883fde6257bfcde5a2988f1e2d8aa3f8 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Wed, 30 Nov 2022 12:58:48 +0530 Subject: [PATCH 6/7] server: fix volume migration on user vm scale (#6704) Fixes #6701 When volume migration is initiated by system, account check is not needed. Introduces a new global setting - allow.diskoffering.change.during.scale.vm. This determines whether to allow or disallow disk offering change for root volume during scaling of a stopped or running VM. Signed-off-by: Abhishek Kumar Signed-off-by: Rohit Yadav Co-authored-by: Harikrishna Patnala Co-authored-by: Rohit Yadav Co-authored-by: Daniel Augusto Veronezi Salvador <38945620+GutoVeronezi@users.noreply.github.com> Co-authored-by: Abhishek Kumar --- .../com/cloud/server/ManagementService.java | 2 +- .../cloud/server/ManagementServerImpl.java | 21 +- .../cloud/storage/VolumeApiServiceImpl.java | 20 +- .../main/java/com/cloud/vm/UserVmManager.java | 3 + .../java/com/cloud/vm/UserVmManagerImpl.java | 15 +- .../vm/UnmanagedVMsManagerImpl.java | 2 +- test/integration/smoke/test_scale_vm.py | 521 ++++++++++++++++-- 7 files changed, 517 insertions(+), 67 deletions(-) diff --git a/api/src/main/java/com/cloud/server/ManagementService.java b/api/src/main/java/com/cloud/server/ManagementService.java index 5c385cc18d9..6aea206c796 100644 --- a/api/src/main/java/com/cloud/server/ManagementService.java +++ b/api/src/main/java/com/cloud/server/ManagementService.java @@ -405,7 +405,7 @@ public interface ManagementService { */ Pair, List> listStoragePoolsForMigrationOfVolume(Long volumeId); - Pair, List> listStoragePoolsForMigrationOfVolumeInternal(Long volumeId, Long newDiskOfferingId, Long newSize, Long newMinIops, Long newMaxIops, boolean keepSourceStoragePool, boolean bypassStorageTypeCheck); + Pair, List> listStoragePoolsForSystemMigrationOfVolume(Long volumeId, Long newDiskOfferingId, Long newSize, Long newMinIops, Long newMaxIops, boolean keepSourceStoragePool, boolean bypassStorageTypeCheck); String[] listEventTypes(); diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 074fff86be5..540b2e39159 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -1526,7 +1526,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Override public Pair, List> listStoragePoolsForMigrationOfVolume(final Long volumeId) { - Pair, List> allPoolsAndSuitablePoolsPair = listStoragePoolsForMigrationOfVolumeInternal(volumeId, null, null, null, null, false, true); + Pair, List> allPoolsAndSuitablePoolsPair = listStoragePoolsForMigrationOfVolumeInternal(volumeId, null, null, null, null, false, true, false); List allPools = allPoolsAndSuitablePoolsPair.first(); List suitablePools = allPoolsAndSuitablePoolsPair.second(); List avoidPools = new ArrayList<>(); @@ -1542,13 +1542,20 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe return new Pair, List>(allPools, suitablePools); } - public Pair, List> listStoragePoolsForMigrationOfVolumeInternal(final Long volumeId, Long newDiskOfferingId, Long newSize, Long newMinIops, Long newMaxIops, boolean keepSourceStoragePool, boolean bypassStorageTypeCheck) { - final Account caller = getCaller(); - if (!_accountMgr.isRootAdmin(caller.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Caller is not a root admin, permission denied to migrate the volume"); + @Override + public Pair, List> listStoragePoolsForSystemMigrationOfVolume(final Long volumeId, Long newDiskOfferingId, Long newSize, Long newMinIops, Long newMaxIops, boolean keepSourceStoragePool, boolean bypassStorageTypeCheck) { + return listStoragePoolsForMigrationOfVolumeInternal(volumeId, newDiskOfferingId, newSize, newMinIops, newMaxIops, keepSourceStoragePool, bypassStorageTypeCheck, true); + } + + public Pair, List> listStoragePoolsForMigrationOfVolumeInternal(final Long volumeId, Long newDiskOfferingId, Long newSize, Long newMinIops, Long newMaxIops, boolean keepSourceStoragePool, boolean bypassStorageTypeCheck, boolean bypassAccountCheck) { + if (!bypassAccountCheck) { + final Account caller = getCaller(); + if (!_accountMgr.isRootAdmin(caller.getId())) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Caller is not a root admin, permission denied to migrate the volume"); + } + throw new PermissionDeniedException("No permission to migrate volume, only root admin can migrate a volume"); } - throw new PermissionDeniedException("No permission to migrate volume, only root admin can migrate a volume"); } final VolumeVO volume = _volumeDao.findById(volumeId); diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index e6726f6977b..bbebc7a2f3b 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -96,6 +96,7 @@ import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToSt import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; @@ -1767,14 +1768,14 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic return volume; } - if (currentSize != newSize || newMaxIops != volume.getMaxIops() || newMinIops != volume.getMinIops()) { + if (currentSize != newSize || !compareEqualsIncludingNullOrZero(newMaxIops, volume.getMaxIops()) || !compareEqualsIncludingNullOrZero(newMinIops, volume.getMinIops())) { volumeResizeRequired = true; validateVolumeReadyStateAndHypervisorChecks(volume, currentSize, newSize); } StoragePoolVO existingStoragePool = _storagePoolDao.findById(volume.getPoolId()); - Pair, List> poolsPair = managementService.listStoragePoolsForMigrationOfVolumeInternal(volume.getId(), newDiskOffering.getId(), newSize, newMinIops, newMaxIops, true, false); + Pair, List> poolsPair = managementService.listStoragePoolsForSystemMigrationOfVolume(volume.getId(), newDiskOffering.getId(), newSize, newMinIops, newMaxIops, true, false); List suitableStoragePools = poolsPair.second(); if (!suitableStoragePools.stream().anyMatch(p -> (p.getId() == existingStoragePool.getId()))) { @@ -1823,6 +1824,21 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic return volume; } + /** + * This method is to compare long values, in miniops and maxiops a or b can be null or 0. + * Use this method to treat 0 and null as same + * + * @param a + * @param b + * @return true if a and b are equal excluding 0 and null values. + */ + private boolean compareEqualsIncludingNullOrZero(Long a, Long b) { + a = ObjectUtils.defaultIfNull(a, 0L); + b = ObjectUtils.defaultIfNull(b, 0L); + + return a.equals(b); + } + /** * Returns true if the new disk offering is the same than current offering, and the respective Service offering is a custom (constraint or unconstraint) offering. */ diff --git a/server/src/main/java/com/cloud/vm/UserVmManager.java b/server/src/main/java/com/cloud/vm/UserVmManager.java index cde2d04970d..4449dc54860 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManager.java +++ b/server/src/main/java/com/cloud/vm/UserVmManager.java @@ -47,9 +47,12 @@ import com.cloud.utils.Pair; */ public interface UserVmManager extends UserVmService { String EnableDynamicallyScaleVmCK = "enable.dynamic.scale.vm"; + String AllowDiskOfferingChangeDuringScaleVmCK = "allow.diskoffering.change.during.scale.vm"; String AllowUserExpungeRecoverVmCK ="allow.user.expunge.recover.vm"; ConfigKey EnableDynamicallyScaleVm = new ConfigKey("Advanced", Boolean.class, EnableDynamicallyScaleVmCK, "false", "Enables/Disables dynamically scaling a vm", true, ConfigKey.Scope.Zone); + ConfigKey AllowDiskOfferingChangeDuringScaleVm = new ConfigKey("Advanced", Boolean.class, AllowDiskOfferingChangeDuringScaleVmCK, "false", + "Determines whether to allow or disallow disk offering change for root volume during scaling of a stopped or running vm", true, ConfigKey.Scope.Zone); ConfigKey AllowUserExpungeRecoverVm = new ConfigKey("Advanced", Boolean.class, AllowUserExpungeRecoverVmCK, "false", "Determines whether users can expunge or recover their vm", true, ConfigKey.Scope.Account); ConfigKey DisplayVMOVFProperties = new ConfigKey("Advanced", Boolean.class, "vm.display.ovf.properties", "false", diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 748ee4c6e74..3e17cb75809 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -1241,7 +1241,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // resize and migrate the root volume if required DiskOfferingVO newDiskOffering = _diskOfferingDao.findById(newServiceOffering.getDiskOfferingId()); - changeDiskOfferingForRootVolume(vmId, newDiskOffering, customParameters); + changeDiskOfferingForRootVolume(vmId, newDiskOffering, customParameters, vmInstance.getDataCenterId()); _itMgr.upgradeVmDb(vmId, newServiceOffering, currentServiceOffering); @@ -2000,7 +2000,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // #3 resize or migrate the root volume if required DiskOfferingVO newDiskOffering = _diskOfferingDao.findById(newServiceOffering.getDiskOfferingId()); - changeDiskOfferingForRootVolume(vmId, newDiskOffering, customParameters); + changeDiskOfferingForRootVolume(vmId, newDiskOffering, customParameters, vmInstance.getDataCenterId()); // #4 scale the vm now vmInstance = _vmInstanceDao.findById(vmId); @@ -2036,7 +2036,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } - private void changeDiskOfferingForRootVolume(Long vmId, DiskOfferingVO newDiskOffering, Map customParameters) throws ResourceAllocationException { + private void changeDiskOfferingForRootVolume(Long vmId, DiskOfferingVO newDiskOffering, Map customParameters, Long zoneId) throws ResourceAllocationException { + + if (!AllowDiskOfferingChangeDuringScaleVm.valueIn(zoneId)) { + if (s_logger.isDebugEnabled()) { + s_logger.debug(String.format("Changing the disk offering of the root volume during the compute offering change operation is disabled. Please check the setting [%s].", AllowDiskOfferingChangeDuringScaleVm.key())); + } + return; + } List vols = _volsDao.findReadyAndAllocatedRootVolumesByInstance(vmId); @@ -7791,7 +7798,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] {EnableDynamicallyScaleVm, AllowUserExpungeRecoverVm, VmIpFetchWaitInterval, VmIpFetchTrialMax, + return new ConfigKey[] {EnableDynamicallyScaleVm, AllowDiskOfferingChangeDuringScaleVm, AllowUserExpungeRecoverVm, VmIpFetchWaitInterval, VmIpFetchTrialMax, VmIpFetchThreadPoolMax, VmIpFetchTaskWorkers, AllowDeployVmIfGivenHostFails, EnableAdditionalVmConfig, DisplayVMOVFProperties, KvmAdditionalConfigAllowList, XenServerAdditionalConfigAllowList, VmwareAdditionalConfigAllowList}; } diff --git a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java index 0a4599e1560..38f57d1f0bb 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java @@ -794,7 +794,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { continue; } LOGGER.debug(String.format("Volume %s needs to be migrated", volumeVO.getUuid())); - Pair, List> poolsPair = managementService.listStoragePoolsForMigrationOfVolumeInternal(profile.getVolumeId(), null, null, null, null, false, true); + Pair, List> poolsPair = managementService.listStoragePoolsForSystemMigrationOfVolume(profile.getVolumeId(), null, null, null, null, false, true); if (CollectionUtils.isEmpty(poolsPair.first()) && CollectionUtils.isEmpty(poolsPair.second())) { cleanupFailedImportVM(vm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm: %s during volume ID: %s migration as no suitable pool(s) found", userVm.getInstanceName(), volumeVO.getUuid())); diff --git a/test/integration/smoke/test_scale_vm.py b/test/integration/smoke/test_scale_vm.py index c9b1bf837ba..7f8b65b8465 100644 --- a/test/integration/smoke/test_scale_vm.py +++ b/test/integration/smoke/test_scale_vm.py @@ -25,10 +25,13 @@ from marvin.lib.base import (Account, Host, VirtualMachine, ServiceOffering, + DiskOffering, Template, - Configurations) + Configurations, + Volume) from marvin.lib.common import (get_zone, get_template, + get_test_template, get_domain) from nose.plugins.attrib import attr from marvin.sshClient import SshClient @@ -54,7 +57,7 @@ class TestScaleVm(cloudstackTestCase): return # Get Zone, Domain and templates - domain = get_domain(cls.apiclient) + cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype @@ -73,14 +76,19 @@ class TestScaleVm(cloudstackTestCase): isdynamicallyscalable='true' ) else: - cls.template = Template.register( - cls.apiclient, - cls.services["CentOS7template"], - zoneid=cls.zone.id - ) - cls._cleanup.append(cls.template) - cls.template.download(cls.apiclient) - time.sleep(60) + cls.template = get_test_template( + cls.apiclient, + cls.zone.id, + cls.hypervisor + ) + if cls.template == FAILED: + assert False, "get_test_template() failed to return template\ + with hypervisor %s" % cls.hypervisor + cls.template = Template.update( + cls.template, + cls.apiclient, + isdynamicallyscalable='true' + ) # Set Zones and disk offerings cls.services["small"]["zoneid"] = cls.zone.id @@ -90,7 +98,7 @@ class TestScaleVm(cloudstackTestCase): cls.account = Account.create( cls.apiclient, cls.services["account"], - domainid=domain.id + domainid=cls.domain.id ) cls._cleanup.append(cls.account) @@ -124,37 +132,6 @@ class TestScaleVm(cloudstackTestCase): dynamicscalingenabled=False ) - # create a virtual machine - cls.virtual_machine = VirtualMachine.create( - cls.apiclient, - cls.services["small"], - accountid=cls.account.name, - domainid=cls.account.domainid, - serviceofferingid=cls.small_offering.id, - mode=cls.services["mode"] - ) - - # create a virtual machine which cannot be dynamically scalable - cls.virtual_machine_with_service_offering_dynamic_scaling_disabled = VirtualMachine.create( - cls.apiclient, - cls.services["small"], - accountid=cls.account.name, - domainid=cls.account.domainid, - serviceofferingid=cls.small_offering_dynamic_scaling_disabled.id, - mode=cls.services["mode"] - ) - - # create a virtual machine which cannot be dynamically scalable - cls.virtual_machine_not_dynamically_scalable = VirtualMachine.create( - cls.apiclient, - cls.services["small"], - accountid=cls.account.name, - domainid=cls.account.domainid, - serviceofferingid=cls.small_offering.id, - mode=cls.services["mode"], - dynamicscalingenabled=False - ) - cls._cleanup = [ cls.small_offering, cls.big_offering, @@ -170,6 +147,11 @@ class TestScaleVm(cloudstackTestCase): name="enable.dynamic.scale.vm", value="false" ) + Configurations.update( + cls.apiclient, + name="allow.diskOffering.change.during.scale.vm", + value="false" + ) super(TestScaleVm,cls).tearDownClass() return @@ -177,6 +159,7 @@ class TestScaleVm(cloudstackTestCase): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] + self.services["disk_offering"]["disksize"] = 2 if self.unsupportedHypervisor: self.skipTest("Skipping test because unsupported hypervisor\ @@ -199,6 +182,9 @@ class TestScaleVm(cloudstackTestCase): return ssh_client + def is_host_xcpng8(self, hostname): + return type(hostname) == list and len(hostname) > 0 and 'XCP-ng 8' in hostname[0] + @attr(hypervisor="xenserver") @attr(tags=["advanced", "basic"], required_hardware="false") def test_01_scale_vm(self): @@ -216,6 +202,17 @@ class TestScaleVm(cloudstackTestCase): # scaling is not # guaranteed until tools are installed, vm rebooted + # create a virtual machine + self.virtual_machine = VirtualMachine.create( + self.apiclient, + self.services["small"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.small_offering.id, + mode=self.services["mode"] + ) + self.cleanup.append(self.virtual_machine) + # If hypervisor is Vmware, then check if # the vmware tools are installed and the process is running # Vmware tools are necessary for scale VM operation @@ -227,6 +224,7 @@ class TestScaleVm(cloudstackTestCase): if not "running" in result: self.skipTest("Skipping scale VM operation because\ VMware tools are not installed on the VM") + res = None if self.hypervisor.lower() != 'simulator': hostid = self.virtual_machine.hostid host = Host.list( @@ -251,14 +249,27 @@ class TestScaleVm(cloudstackTestCase): self.apiclient, isdynamicallyscalable='true') + if self.is_host_xcpng8(res): + self.debug("Only scaling for CPU for XCP-ng 8") + offering_data = self.services["service_offerings"]["big"] + offering_data["cpunumber"] = 2 + offering_data["memory"] = self.virtual_machine.memory + self.bigger_offering = ServiceOffering.create( + self.apiclient, + offering_data + ) + self.cleanup.append(self.bigger_offering) + else: + self.bigger_offering = self.big_offering + self.debug("Scaling VM-ID: %s to service offering: %s and state %s" % ( self.virtual_machine.id, - self.big_offering.id, + self.bigger_offering.id, self.virtual_machine.state )) cmd = scaleVirtualMachine.scaleVirtualMachineCmd() - cmd.serviceofferingid = self.big_offering.id + cmd.serviceofferingid = self.bigger_offering.id cmd.id = self.virtual_machine.id try: @@ -297,11 +308,11 @@ class TestScaleVm(cloudstackTestCase): offering %s and the response says %s" % (self.virtual_machine.id, self.virtual_machine.serviceofferingid, - self.big_offering.id, + self.bigger_offering.id, vm_response.serviceofferingid)) self.assertEqual( vm_response.serviceofferingid, - self.big_offering.id, + self.bigger_offering.id, "Check service offering of the VM" ) @@ -313,7 +324,7 @@ class TestScaleVm(cloudstackTestCase): return @attr(tags=["advanced", "basic"], required_hardware="false") - def test_02_scale_vm(self): + def test_02_scale_vm_negative_offering_disable_scaling(self): """Test scale virtual machine which is created from a service offering for which dynamicscalingenabled is false. Scaling operation should fail. """ @@ -325,6 +336,17 @@ class TestScaleVm(cloudstackTestCase): # scaling is not # guaranteed until tools are installed, vm rebooted + # create a virtual machine which cannot be dynamically scalable + self.virtual_machine_with_service_offering_dynamic_scaling_disabled = VirtualMachine.create( + self.apiclient, + self.services["small"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.small_offering_dynamic_scaling_disabled.id, + mode=self.services["mode"] + ) + self.cleanup.append(self.virtual_machine_with_service_offering_dynamic_scaling_disabled) + # If hypervisor is Vmware, then check if # the vmware tools are installed and the process is running # Vmware tools are necessary for scale VM operation @@ -368,7 +390,7 @@ class TestScaleVm(cloudstackTestCase): self.debug("Scaling VM-ID: %s to service offering: %s for which dynamic scaling is disabled and VM state %s" % ( self.virtual_machine_with_service_offering_dynamic_scaling_disabled.id, self.big_offering_dynamic_scaling_disabled.id, - self.virtual_machine.state + self.virtual_machine_with_service_offering_dynamic_scaling_disabled.state )) cmd = scaleVirtualMachine.scaleVirtualMachineCmd() @@ -388,7 +410,7 @@ class TestScaleVm(cloudstackTestCase): self.debug("Scaling VM-ID: %s to service offering: %s for which dynamic scaling is enabled and VM state %s" % ( self.virtual_machine_with_service_offering_dynamic_scaling_disabled.id, self.big_offering.id, - self.virtual_machine.state + self.virtual_machine_with_service_offering_dynamic_scaling_disabled.state )) cmd = scaleVirtualMachine.scaleVirtualMachineCmd() @@ -408,7 +430,7 @@ class TestScaleVm(cloudstackTestCase): return @attr(tags=["advanced", "basic"], required_hardware="false") - def test_03_scale_vm(self): + def test_03_scale_vm_negative_vm_disable_scaling(self): """Test scale virtual machine which is not dynamically scalable to a service offering. Scaling operation should fail. """ # Validate the following @@ -422,6 +444,18 @@ class TestScaleVm(cloudstackTestCase): # scaling is not # guaranteed until tools are installed, vm rebooted + # create a virtual machine which cannot be dynamically scalable + self.virtual_machine_not_dynamically_scalable = VirtualMachine.create( + self.apiclient, + self.services["small"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.small_offering.id, + mode=self.services["mode"], + dynamicscalingenabled=False + ) + self.cleanup.append(self.virtual_machine_not_dynamically_scalable) + # If hypervisor is Vmware, then check if # the vmware tools are installed and the process is running # Vmware tools are necessary for scale VM operation @@ -463,7 +497,7 @@ class TestScaleVm(cloudstackTestCase): self.debug("Scaling VM-ID: %s to service offering: %s for which dynamic scaling is enabled and VM state %s" % ( self.virtual_machine_not_dynamically_scalable.id, self.big_offering.id, - self.virtual_machine.state + self.virtual_machine_not_dynamically_scalable.state )) cmd = scaleVirtualMachine.scaleVirtualMachineCmd() @@ -480,4 +514,387 @@ class TestScaleVm(cloudstackTestCase): else: self.fail("Expected an exception to be thrown, failing") - return \ No newline at end of file + return + + @attr(hypervisor="xenserver") + @attr(tags=["advanced", "basic"], required_hardware="false") + def test_04_scale_vm_with_user_account(self): + """Test scale virtual machine under useraccount + """ + # Validate the following + # Create a user Account and create a VM in it. + # Scale up the vm and see if it scales to the new svc offering + + # Create an user account + self.userAccount = Account.create( + self.apiclient, + self.services["account"], + admin=False, + domainid=self.domain.id + ) + + self.cleanup.append(self.userAccount) + # Create user api client of the user account + self.userapiclient = self.testClient.getUserApiClient( + UserName=self.userAccount.name, + DomainName=self.userAccount.domain + ) + + # create a virtual machine + self.virtual_machine_in_user_account = VirtualMachine.create( + self.userapiclient, + self.services["small"], + accountid=self.userAccount.name, + domainid=self.userAccount.domainid, + serviceofferingid=self.small_offering.id, + mode=self.services["mode"] + ) + + if self.hypervisor.lower() == "vmware": + sshClient = self.virtual_machine_in_user_account.get_ssh_client() + result = str( + sshClient.execute("service vmware-tools status")).lower() + self.debug("and result is: %s" % result) + if not "running" in result: + self.skipTest("Skipping scale VM operation because\ + VMware tools are not installed on the VM") + res = None + if self.hypervisor.lower() != 'simulator': + list_vm_response = VirtualMachine.list( + self.apiclient, + id=self.virtual_machine_in_user_account.id + )[0] + hostid = list_vm_response.hostid + host = Host.list( + self.apiclient, + zoneid=self.zone.id, + hostid=hostid, + type='Routing' + )[0] + + try: + username = self.hostConfig["username"] + password = self.hostConfig["password"] + ssh_client = self.get_ssh_client(host.ipaddress, username, password) + res = ssh_client.execute("hostnamectl | grep 'Operating System' | cut -d':' -f2") + except Exception as e: + pass + + if 'XenServer' in res[0]: + self.skipTest("Skipping test for XenServer as it's License does not allow scaling") + + self.virtual_machine_in_user_account.update( + self.userapiclient, + isdynamicallyscalable='true') + + if self.is_host_xcpng8(res): + self.debug("Only scaling for CPU for XCP-ng 8") + offering_data = self.services["service_offerings"]["big"] + offering_data["cpunumber"] = 2 + offering_data["memory"] = self.virtual_machine_in_user_account.memory + self.bigger_offering = ServiceOffering.create( + self.apiclient, + offering_data + ) + self.cleanup.append(self.bigger_offering) + else: + self.bigger_offering = self.big_offering + + self.debug("Scaling VM-ID: %s to service offering: %s and state %s" % ( + self.virtual_machine_in_user_account.id, + self.bigger_offering.id, + self.virtual_machine_in_user_account.state + )) + + cmd = scaleVirtualMachine.scaleVirtualMachineCmd() + cmd.serviceofferingid = self.bigger_offering.id + cmd.id = self.virtual_machine_in_user_account.id + + try: + self.userapiclient.scaleVirtualMachine(cmd) + except Exception as e: + if "LicenceRestriction" in str(e): + self.skipTest("Your XenServer License does not allow scaling") + else: + self.fail("Scaling failed with the following exception: " + str(e)) + + list_vm_response = VirtualMachine.list( + self.userapiclient, + id=self.virtual_machine_in_user_account.id + ) + + vm_response = list_vm_response[0] + + self.debug( + "Scaling VM-ID: %s from service offering: %s to new service\ + offering %s and the response says %s" % + (self.virtual_machine_in_user_account.id, + self.virtual_machine_in_user_account.serviceofferingid, + self.bigger_offering.id, + vm_response.serviceofferingid)) + self.assertEqual( + vm_response.serviceofferingid, + self.bigger_offering.id, + "Check service offering of the VM" + ) + + self.assertEqual( + vm_response.state, + 'Running', + "Check the state of VM" + ) + return + + @attr(hypervisor="xenserver") + @attr(tags=["advanced", "basic"], required_hardware="false") + def test_05_scale_vm_dont_allow_disk_offering_change(self): + """Test scale virtual machine with disk offering changes + """ + # Validate the following two cases + + # 1 + # create serviceoffering1 with diskoffering1 + # create serviceoffering2 with diskoffering2 + # create a VM with serviceoffering1 + # Scale up the vm to serviceoffering2 + # Check disk offering of root volume to be diskoffering1 since setting allow.diskOffering.change.during.scale.vm is false + + # 2 + # create serviceoffering3 with diskoffering3 + # update setting allow.diskOffering.change.during.scale.vm to true + # scale up the VM to serviceoffering3 + # Check disk offering of root volume to be diskoffering3 since setting allow.diskOffering.change.during.scale.vm is true + + self.disk_offering1 = DiskOffering.create( + self.apiclient, + self.services["disk_offering"], + ) + self._cleanup.append(self.disk_offering1) + offering_data = { + 'displaytext': 'ServiceOffering1WithDiskOffering1', + 'cpuspeed': 500, + 'cpunumber': 1, + 'name': 'ServiceOffering1WithDiskOffering1', + 'memory': 512, + 'diskofferingid': self.disk_offering1.id + } + + self.ServiceOffering1WithDiskOffering1 = ServiceOffering.create( + self.apiclient, + offering_data, + ) + self._cleanup.append(self.ServiceOffering1WithDiskOffering1) + + # create a virtual machine + self.virtual_machine_test = VirtualMachine.create( + self.apiclient, + self.services["small"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.ServiceOffering1WithDiskOffering1.id, + mode=self.services["mode"] + ) + + if self.hypervisor.lower() == "vmware": + sshClient = self.virtual_machine_test.get_ssh_client() + result = str( + sshClient.execute("service vmware-tools status")).lower() + self.debug("and result is: %s" % result) + if not "running" in result: + self.skipTest("Skipping scale VM operation because\ + VMware tools are not installed on the VM") + res = None + if self.hypervisor.lower() != 'simulator': + hostid = self.virtual_machine_test.hostid + host = Host.list( + self.apiclient, + zoneid=self.zone.id, + hostid=hostid, + type='Routing' + )[0] + + try: + username = self.hostConfig["username"] + password = self.hostConfig["password"] + ssh_client = self.get_ssh_client(host.ipaddress, username, password) + res = ssh_client.execute("hostnamectl | grep 'Operating System' | cut -d':' -f2") + except Exception as e: + pass + + if 'XenServer' in res[0]: + self.skipTest("Skipping test for XenServer as it's License does not allow scaling") + + self.virtual_machine_test.update( + self.apiclient, + isdynamicallyscalable='true') + + self.disk_offering2 = DiskOffering.create( + self.apiclient, + self.services["disk_offering"], + ) + self._cleanup.append(self.disk_offering2) + offering_data = { + 'displaytext': 'ServiceOffering2WithDiskOffering2', + 'cpuspeed': 1000, + 'cpunumber': 2, + 'name': 'ServiceOffering2WithDiskOffering2', + 'memory': 1024, + 'diskofferingid': self.disk_offering2.id + } + if self.is_host_xcpng8(res): + self.debug("Only scaling for CPU for XCP-ng 8") + offering_data["memory"] = self.virtual_machine_test.memory + + self.ServiceOffering2WithDiskOffering2 = ServiceOffering.create( + self.apiclient, + offering_data, + ) + self._cleanup.append(self.ServiceOffering2WithDiskOffering2) + + self.debug("Scaling VM-ID: %s to service offering: %s and state %s" % ( + self.virtual_machine_test.id, + self.ServiceOffering2WithDiskOffering2.id, + self.virtual_machine_test.state + )) + + cmd = scaleVirtualMachine.scaleVirtualMachineCmd() + cmd.serviceofferingid = self.ServiceOffering2WithDiskOffering2.id + cmd.id = self.virtual_machine_test.id + + try: + self.apiclient.scaleVirtualMachine(cmd) + except Exception as e: + if "LicenceRestriction" in str(e): + self.skipTest("Your XenServer License does not allow scaling") + else: + self.fail("Scaling failed with the following exception: " + str(e)) + + list_vm_response = VirtualMachine.list( + self.apiclient, + id=self.virtual_machine_test.id + ) + + vm_response = list_vm_response[0] + + self.debug( + "Scaling VM-ID: %s from service offering: %s to new service\ + offering %s and the response says %s" % + (self.virtual_machine_test.id, + self.virtual_machine_test.serviceofferingid, + self.ServiceOffering2WithDiskOffering2.id, + vm_response.serviceofferingid)) + + vm_response = list_vm_response[0] + + self.assertEqual( + vm_response.serviceofferingid, + self.ServiceOffering2WithDiskOffering2.id, + "Check service offering of the VM" + ) + + volume_response = Volume.list( + self.apiclient, + virtualmachineid=self.virtual_machine_test.id, + listall=True + )[0] + + self.assertEqual( + volume_response.diskofferingid, + self.disk_offering1.id, + "Check disk offering of the VM, this should not change since allow.diskOffering.change.during.scale.vm is false" + ) + + # Do same scale vm operation with allow.diskOffering.change.during.scale.vm value to true + if self.hypervisor.lower() in ['simulator']: + self.debug("Simulator doesn't support changing disk offering, volume resize") + return + disk_offering_data = self.services["disk_offering"] + if self.hypervisor.lower() in ['xenserver']: + self.debug("For hypervisor %s, do not resize volume and just change try to change the disk offering") + volume_response = Volume.list( + self.apiclient, + virtualmachineid=self.virtual_machine_test.id, + listall=True + )[0] + disk_offering_data["disksize"] = int(volume_response.size / (1024 ** 3)) + self.disk_offering3 = DiskOffering.create( + self.apiclient, + disk_offering_data, + ) + self._cleanup.append(self.disk_offering3) + offering_data = { + 'displaytext': 'ServiceOffering3WithDiskOffering3', + 'cpuspeed': 1500, + 'cpunumber': 2, + 'name': 'ServiceOffering3WithDiskOffering3', + 'memory': 1024, + 'diskofferingid': self.disk_offering3.id + } + if self.is_host_xcpng8(res): + self.debug("Only scaling for CPU for XCP-ng 8") + offering_data["memory"] = vm_response.memory + + self.ServiceOffering3WithDiskOffering3 = ServiceOffering.create( + self.apiclient, + offering_data, + ) + self._cleanup.append(self.ServiceOffering3WithDiskOffering3) + + Configurations.update( + self.apiclient, + name="allow.diskOffering.change.during.scale.vm", + value="true" + ) + + self.debug("Scaling VM-ID: %s to service offering: %s and state %s" % ( + self.virtual_machine_test.id, + self.ServiceOffering3WithDiskOffering3.id, + self.virtual_machine_test.state + )) + + cmd = scaleVirtualMachine.scaleVirtualMachineCmd() + cmd.serviceofferingid = self.ServiceOffering3WithDiskOffering3.id + cmd.id = self.virtual_machine_test.id + + try: + self.apiclient.scaleVirtualMachine(cmd) + except Exception as e: + if "LicenceRestriction" in str(e): + self.skipTest("Your XenServer License does not allow scaling") + else: + self.fail("Scaling failed with the following exception: " + str(e)) + + list_vm_response = VirtualMachine.list( + self.apiclient, + id=self.virtual_machine_test.id + ) + + vm_response = list_vm_response[0] + + self.debug( + "Scaling VM-ID: %s from service offering: %s to new service\ + offering %s and the response says %s" % + (self.virtual_machine_test.id, + self.virtual_machine_test.serviceofferingid, + self.ServiceOffering3WithDiskOffering3.id, + vm_response.serviceofferingid)) + + self.assertEqual( + vm_response.serviceofferingid, + self.ServiceOffering3WithDiskOffering3.id, + "Check service offering of the VM" + ) + + volume_response = Volume.list( + self.apiclient, + virtualmachineid=self.virtual_machine_test.id, + listall=True + )[0] + + self.assertEqual( + volume_response.diskofferingid, + self.disk_offering3.id, + "Check disk offering of the VM, this should change since allow.diskOffering.change.during.scale.vm is true" + ) + + return From cf32f77e3dedfc75ea89278aacaaef89537f5d8e Mon Sep 17 00:00:00 2001 From: Daniel Augusto Veronezi Salvador <38945620+GutoVeronezi@users.noreply.github.com> Date: Wed, 30 Nov 2022 09:59:05 -0300 Subject: [PATCH 7/7] systemvm: Fix C2S VPN in parallel to S2S VPN (#6907) PR #5375, introduced in version 4.15.2.0, removed parameter %any of VPNs client-to-site (C2S) IPSec secrets: structure before PR vr: ipsec/l2tp vpn secret with no ID selectors #5375: %any : PSK "" structure after PR vr: ipsec/l2tp vpn secret with no ID selectors #5375: : PSK "" Because of that, when a VPN site-so-site (S2S) is created in parallel to a VPN C2S in the same network, the C2S will not handle any IP (%any) anymore and, as the network is being tunneled to the other VPN, the connection will be handled by the final peer. This way, when a VPN S2S is created in parallel to a VPN C2S in the same network, it is only possible to connect to the C2S with the S2S PSK. As ACS is only able to implement a single C2S per network (ACS allows setting more than one IP of the network as VPN, however, only the first will be implemented) and every S2S has its own secret file, the secrets structure of C2S was changed to contain only the PSK: : PSK "" By doing that, StrongSwan will handle correctly C2S connections from any IP and still will use the correct PSK for S2S. Co-authored-by: GutoVeronezi --- systemvm/debian/opt/cloud/bin/configure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/systemvm/debian/opt/cloud/bin/configure.py b/systemvm/debian/opt/cloud/bin/configure.py index 67e575bfb7a..ce5a4b20fb5 100755 --- a/systemvm/debian/opt/cloud/bin/configure.py +++ b/systemvm/debian/opt/cloud/bin/configure.py @@ -999,7 +999,7 @@ class CsRemoteAccessVpn(CsDataBag): secret = CsFile(vpnsecretfilte) secret.empty() - secret.addeq("%s : PSK \"%s\"" % (left, psk)) + secret.addeq(": PSK \"%s\"" % (psk)) secret.commit() xl2tpdconf = CsFile(xl2tpdconffile)