Merge branch '4.15' into main

This commit is contained in:
nicolas 2021-06-16 15:38:18 -03:00
commit d2ab350a40
15 changed files with 3450 additions and 29 deletions

View File

@ -2409,6 +2409,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
}
volume.setPath(result.getPath());
volume.setPoolId(pool.getId());
if (result.getChainInfo() != null) {
volume.setChainInfo(result.getChainInfo());
}
_volsDao.update(volume.getId(), volume);
}
}

View File

@ -224,7 +224,7 @@ public class BridgeVifDriver extends VifDriverBase {
String brName = createVnetBr(vNetId, trafficLabel, protocol);
intf.defBridgeNet(brName, null, nic.getMac(), getGuestNicModel(guestOsType, nicAdapter), networkRateKBps);
} else {
String brName = createVnetBr(vNetId, "private", protocol);
String brName = createVnetBr(vNetId, _bridges.get("private"), protocol);
intf.defBridgeNet(brName, null, nic.getMac(), getGuestNicModel(guestOsType, nicAdapter), networkRateKBps);
}
} else {
@ -291,7 +291,7 @@ public class BridgeVifDriver extends VifDriverBase {
private String createVnetBr(String vNetId, String pifKey, String protocol) throws InternalErrorException {
String nic = _pifs.get(pifKey);
if (nic == null) {
if (nic == null || protocol.equals(Networks.BroadcastDomainType.Vxlan.scheme())) {
// if not found in bridge map, maybe traffic label refers to pif already?
File pif = new File("/sys/class/net/" + pifKey);
if (pif.isDirectory()) {

View File

@ -161,7 +161,7 @@ public class IvsVifDriver extends VifDriverBase {
private String createVnetBr(String vNetId, String pifKey, String protocol) throws InternalErrorException {
String nic = _pifs.get(pifKey);
if (nic == null) {
if (nic == null || protocol.equals(Networks.BroadcastDomainType.Vxlan.scheme())) {
// if not found in bridge map, maybe traffic label refers to pif already?
File pif = new File("/sys/class/net/" + pifKey);
if (pif.isDirectory()) {

View File

@ -4865,7 +4865,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
}
}
VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder();
String chainInfo = _gson.toJson(diskInfoBuilder.getDiskInfoByBackingFileBaseName(volumePath, poolTo.getUuid().replace("-", "")));
String chainInfo = _gson.toJson(diskInfoBuilder.getDiskInfoByBackingFileBaseName(volumePath, targetDsMo.getName()));
MigrateVolumeAnswer answer = new MigrateVolumeAnswer(cmd, true, null, volumePath);
answer.setVolumeChainInfo(chainInfo);
return answer;
@ -7501,8 +7501,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
VolumeObjectTO newVol = new VolumeObjectTO();
newVol.setDataStoreUuid(entry.second().getUuid());
String newPath = vmMo.getVmdkFileBaseName(disk);
String poolName = entry.second().getUuid().replace("-", "");
VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(newPath, poolName);
ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(targetHyperHost, entry.second().getUuid());
DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDs);
VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(newPath, dsMo.getName());
newVol.setId(volumeId);
newVol.setPath(newPath);
newVol.setChainInfo(_gson.toJson(diskInfo));

View File

@ -1817,7 +1817,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
} else {
// This is to find datastores which are removed from datastore cluster.
// The final set childDatastoreUUIDs contains the UUIDs of child datastores which needs to be removed from datastore cluster
childDatastoreUUIDs.remove(childStoragePoolInfo.getUuid());
childDatastoreUUIDs.remove(dataStoreVO.getUuid());
}
} else {
dataStoreVO = createChildDatastoreVO(datastoreClusterPool, childDataStoreAnswer);

View File

@ -31,6 +31,8 @@ import java.util.concurrent.ExecutionException;
import javax.inject.Inject;
import com.cloud.api.query.dao.ServiceOfferingJoinDao;
import com.cloud.api.query.vo.ServiceOfferingJoinVO;
import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd;
import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd;
import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd;
@ -227,6 +229,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
@Inject
private ServiceOfferingDetailsDao _serviceOfferingDetailsDao;
@Inject
private ServiceOfferingJoinDao serviceOfferingJoinDao;
@Inject
private UserVmDao _userVmDao;
@Inject
private UserVmDetailsDao userVmDetailsDao;
@ -966,20 +970,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
}
// if we are to use the existing disk offering
ImageFormat format = null;
if (newDiskOffering == null) {
Long templateId = volume.getTemplateId();
if (templateId != null) {
VMTemplateVO template = _templateDao.findById(templateId);
format = template.getFormat();
}
if (volume.getVolumeType().equals(Volume.Type.ROOT) && diskOffering.getDiskSize() > 0 && format != null && format != ImageFormat.ISO) {
throw new InvalidParameterValueException(
"Failed to resize Root volume. The service offering of this Volume has been configured with a root disk size; "
+ "on such case a Root Volume can only be resized when changing to another Service Offering with a Root disk size. "
+ "For more details please check out the Official Resizing Volumes documentation.");
}
newSize = cmd.getSize();
newHypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve();
@ -990,6 +981,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
+ "customizable or it must be a root volume (if providing a disk offering, make sure it is different from the current disk offering).");
}
if (isNotPossibleToResize(volume, diskOffering)) {
throw new InvalidParameterValueException(
"Failed to resize Root volume. The service offering of this Volume has been configured with a root disk size; "
+ "on such case a Root Volume can only be resized when changing to another Service Offering with a Root disk size. "
+ "For more details please check out the Official Resizing Volumes documentation.");
}
// convert from bytes to GiB
newSize = newSize << 30;
} else {
@ -1213,6 +1211,27 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
shrinkOk);
}
/**
* A volume should not be resized if it covers ALL the following scenarios: <br>
* 1 - Root volume <br>
* 2 - && Current Disk Offering enforces a root disk size (in this case one can resize only by changing the Service Offering)
*/
protected boolean isNotPossibleToResize(VolumeVO volume, DiskOfferingVO diskOffering) {
Long templateId = volume.getTemplateId();
ImageFormat format = null;
if (templateId != null) {
VMTemplateVO template = _templateDao.findByIdIncludingRemoved(templateId);
format = template.getFormat();
}
boolean isNotIso = format != null && format != ImageFormat.ISO;
boolean isRoot = Volume.Type.ROOT.equals(volume.getVolumeType());
ServiceOfferingJoinVO serviceOfferingView = serviceOfferingJoinDao.findById(diskOffering.getId());
boolean isOfferingEnforcingRootDiskSize = serviceOfferingView != null && serviceOfferingView.getRootDiskSize() > 0;
return isOfferingEnforcingRootDiskSize && isRoot && isNotIso;
}
private void checkIfVolumeIsRootAndVmIsRunning(Long newSize, VolumeVO volume, VMInstanceVO vmInstanceVO) {
if (!volume.getSize().equals(newSize) && volume.getVolumeType().equals(Volume.Type.ROOT) && !State.Stopped.equals(vmInstanceVO.getState())) {
throw new InvalidParameterValueException(String.format("Cannot resize ROOT volume [%s] when VM is not on Stopped State. VM %s is in state %s", volume.getName(), vmInstanceVO

View File

@ -1754,7 +1754,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
throw new InvalidParameterValueException("Allocating ip to guest nic " + nicVO.getUuid() + " failed, please choose another ip");
}
final IPAddressVO newIp = _ipAddressDao.findByIpAndDcId(dc.getId(), ipaddr);
final IPAddressVO newIp = _ipAddressDao.findByIpAndSourceNetworkId(network.getId(), ipaddr);
final Vlan vlan = _vlanDao.findById(newIp.getVlanId());
nicVO.setIPv4Gateway(vlan.getVlanGateway());
nicVO.setIPv4Netmask(vlan.getVlanNetmask());

View File

@ -35,6 +35,9 @@ import java.util.List;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import com.cloud.api.query.dao.ServiceOfferingJoinDao;
import com.cloud.api.query.vo.ServiceOfferingJoinVO;
import com.cloud.storage.dao.VMTemplateDao;
import org.apache.cloudstack.acl.ControlledEntity;
import org.apache.cloudstack.acl.SecurityChecker.AccessType;
import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd;
@ -78,7 +81,6 @@ import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao;
import com.cloud.org.Grouping;
import com.cloud.serializer.GsonHelper;
import com.cloud.server.TaggedResourceService;
@ -153,7 +155,9 @@ public class VolumeApiServiceImplTest {
@Mock
private StoragePoolTagsDao storagePoolTagsDao;
@Mock
private HypervisorCapabilitiesDao hypervisorCapabilitiesDao;
private VMTemplateDao templateDao;
@Mock
private ServiceOfferingJoinDao serviceOfferingJoinDao;
private DetachVolumeCmd detachCmd = new DetachVolumeCmd();
private Class<?> _detachCmdClass = detachCmd.getClass();
@ -1079,4 +1083,52 @@ public class VolumeApiServiceImplTest {
Assert.assertTrue(result);
}
@Test
public void isNotPossibleToResizeTestAllFormats() {
Storage.ImageFormat[] imageFormat = Storage.ImageFormat.values();
for (int i = 0; i < imageFormat.length - 1; i++) {
if (imageFormat[i] != Storage.ImageFormat.ISO) {
prepareAndRunTestOfIsNotPossibleToResize(Type.ROOT, 10l, imageFormat[i], true);
} else {
prepareAndRunTestOfIsNotPossibleToResize(Type.ROOT, 10l, imageFormat[i], false);
}
}
}
@Test
public void isNotPossibleToResizeTestAllTypes() {
Type[] types = Type.values();
for (int i = 0; i < types.length - 1; i++) {
if (types[i] != Type.ROOT) {
prepareAndRunTestOfIsNotPossibleToResize(types[i], 10l, Storage.ImageFormat.QCOW2, false);
} else {
prepareAndRunTestOfIsNotPossibleToResize(types[i], 10l, Storage.ImageFormat.QCOW2, true);
}
}
}
@Test
public void isNotPossibleToResizeTestNoRootDiskSize() {
prepareAndRunTestOfIsNotPossibleToResize(Type.ROOT, 0l, Storage.ImageFormat.QCOW2, false);
}
private void prepareAndRunTestOfIsNotPossibleToResize(Type volumeType, Long rootDisk, Storage.ImageFormat imageFormat, boolean expectedIsNotPossibleToResize) {
VolumeVO volume = Mockito.mock(VolumeVO.class);
when(volume.getVolumeType()).thenReturn(volumeType);
when(volume.getTemplateId()).thenReturn(1l);
DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class);
ServiceOfferingJoinVO serviceOfferingJoinVO = Mockito.mock(ServiceOfferingJoinVO.class);
when(serviceOfferingJoinVO.getRootDiskSize()).thenReturn(rootDisk);
when(serviceOfferingJoinDao.findById(Mockito.anyLong())).thenReturn(serviceOfferingJoinVO);
VMTemplateVO template = Mockito.mock(VMTemplateVO.class);
when(template.getFormat()).thenReturn(imageFormat);
when(templateDao.findByIdIncludingRemoved(Mockito.anyLong())).thenReturn(template);
boolean result = volumeApiServiceImpl.isNotPossibleToResize(volume, diskOffering);
Assert.assertEquals(expectedIsNotPossibleToResize, result);
}
}

View File

@ -604,6 +604,7 @@ public class UserVmManagerTest {
NicVO nic = new NicVO("nic", 1L, 2L, VirtualMachine.Type.User);
when(_nicDao.findById(anyLong())).thenReturn(nic);
nic.setIPv4Address("10.10.10.9");
when(_vmDao.findById(anyLong())).thenReturn(_vmMock);
when(_networkDao.findById(anyLong())).thenReturn(_networkMock);
doReturn(9L).when(_networkMock).getNetworkOfferingId();
@ -630,9 +631,9 @@ public class UserVmManagerTest {
when(vlan.getVlanNetmask()).thenReturn("255.255.255.0");
when(_ipAddrMgr.allocatePublicIpForGuestNic(Mockito.eq(_networkMock), nullable(Long.class), Mockito.eq(_accountMock), anyString())).thenReturn("10.10.10.10");
lenient().when(_ipAddressDao.findByIpAndSourceNetworkId(anyLong(), anyString())).thenReturn(null);
when(_ipAddressDao.findByIpAndSourceNetworkId(anyLong(), eq("10.10.10.10"))).thenReturn(newIp);
when(_ipAddressDao.findByIpAndSourceNetworkId(anyLong(), eq("10.10.10.9"))).thenReturn(null);
when(_nicDao.persist(any(NicVO.class))).thenReturn(nic);
when(_ipAddressDao.findByIpAndDcId(anyLong(), anyString())).thenReturn(newIp);
when(_vlanDao.findById(anyLong())).thenReturn(vlan);
Account caller = new AccountVO("testaccount", 1, "networkdomain", (short)0, UUID.randomUUID().toString());

View File

@ -78,7 +78,7 @@ def parse_reports(file_path_list):
exit_code = 0
for file_path in file_path_list:
data = lxml.etree.iterparse(file_path, tag='testcase')
data = lxml.etree.iterparse(file_path, tag='testcase', huge_tree=True)
for event, elem in data:
name = ''
status = 'Success'

3332
ui/public/locales/el_GR.json Normal file

File diff suppressed because it is too large Load Diff

View File

@ -2076,6 +2076,7 @@
"label.supportsstrechedl2subnet": "Supports Streched L2 Subnet",
"label.suspend.project": "Suspend Project",
"label.switch.type": "Switch Type",
"label.sync.storage": "Sync Storage Pool",
"label.system.capacity": "System Capacity",
"label.system.offering": "System Offering",
"label.system.offering.for.router": "System Offering for Router",
@ -2640,6 +2641,7 @@
"message.confirm.start.lb.vm": "Please confirm you want to start LB VM",
"message.confirm.stop.kubernetes.cluster": "Please confirm that you want to stop this Kubernetes cluster.",
"message.confirm.stop.lb.vm": "Please confirm you want to stop LB VM",
"message.confirm.sync.storage": "Please confirm you want to sync the storage pool",
"message.confirm.upgrade.router.newer.template": "Please confirm that you want to upgrade router to use newer template",
"message.confirm.upgrade.routers.account.newtemplate": "Please confirm that you want to upgrade all routers in this account to use newer template",
"message.confirm.upgrade.routers.cluster.newtemplate": "Please confirm that you want to upgrade all routers in this cluster to use newer template",
@ -2777,7 +2779,7 @@
"message.error.custom.disk.size": "Please enter custom disk size",
"message.error.date": "Please select a date",
"message.error.description": "Please enter description",
"message.error.discovering.feature": "Exception caught while discoverying features",
"message.error.discovering.feature": "Exception caught while discovering features",
"message.error.display.text": "Please enter display text",
"message.error.domain": "Enter your domain, leave empty for ROOT domain",
"message.error.enable.saml": "Unable to find users IDs to enable SAML Single Sign On, kindly enable it manually.",

View File

@ -41,6 +41,7 @@
<a-menu-item key="pl" value="plPL">Polish</a-menu-item>
<a-menu-item key="pt_BR" value="ptBR">Português brasileiro</a-menu-item>
<a-menu-item key="ru_RU" value="ruRU">Русский</a-menu-item>
<a-menu-item key="el_GR" value="elGR">Ελληνικά</a-menu-item>
</a-menu>
</a-dropdown>
</template>

View File

@ -81,6 +81,14 @@ export default {
defaultArgs: { enabled: true },
show: (record) => { return record.state === 'Disabled' }
},
{
api: 'syncStoragePool',
icon: 'sync',
label: 'label.sync.storage',
message: 'message.confirm.sync.storage',
dataView: true,
show: (record) => { return record.state === 'Up' && record.type === 'DatastoreCluster' }
},
{
api: 'enableStorageMaintenance',
icon: 'plus-square',

View File

@ -175,7 +175,8 @@
}
]
}]"
:placeholder="apiParams.format.description">
:placeholder="apiParams.format.description"
@change="val => { selectedFormat = val }">
<a-select-option v-for="opt in format.opts" :key="opt.id">
{{ opt.name || opt.description }}
</a-select-option>
@ -209,7 +210,7 @@
</a-form-item>
</a-row>
<a-form-item :label="$t('label.deployasis')" v-if="hyperVMWShow">
<a-form-item :label="$t('label.deployasis')" v-if="selectedFormat === 'OVA'">
<a-switch
v-decorator="['deployasis', {
initialValue: false,
@ -388,6 +389,7 @@ export default {
hyperKVMShow: false,
hyperXenServerShow: false,
hyperVMWShow: false,
selectedFormat: '',
deployasis: false,
zoneError: '',
zoneErrorMessage: '',