mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge branch '4.19'
This commit is contained in:
commit
e61f3bae4d
1
.github/workflows/ci.yml
vendored
1
.github/workflows/ci.yml
vendored
@ -54,6 +54,7 @@ jobs:
|
||||
smoke/test_deploy_vm_with_userdata
|
||||
smoke/test_deploy_vms_in_parallel
|
||||
smoke/test_deploy_vms_with_varied_deploymentplanners
|
||||
smoke/test_restore_vm
|
||||
smoke/test_diagnostics
|
||||
smoke/test_direct_download
|
||||
smoke/test_disk_offerings
|
||||
|
||||
@ -85,7 +85,7 @@ public interface VolumeOrchestrationService {
|
||||
VolumeInfo moveVolume(VolumeInfo volume, long destPoolDcId, Long destPoolPodId, Long destPoolClusterId, HypervisorType dataDiskHyperType)
|
||||
throws ConcurrentOperationException, StorageUnavailableException;
|
||||
|
||||
Volume allocateDuplicateVolume(Volume oldVol, Long templateId);
|
||||
Volume allocateDuplicateVolume(Volume oldVol, DiskOffering diskOffering, Long templateId);
|
||||
|
||||
boolean volumeOnSharedStoragePool(Volume volume);
|
||||
|
||||
|
||||
@ -306,11 +306,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
}
|
||||
|
||||
@Override
|
||||
public Volume allocateDuplicateVolume(Volume oldVol, Long templateId) {
|
||||
return allocateDuplicateVolumeVO(oldVol, templateId);
|
||||
public Volume allocateDuplicateVolume(Volume oldVol, DiskOffering diskOffering, Long templateId) {
|
||||
return allocateDuplicateVolumeVO(oldVol, diskOffering, templateId);
|
||||
}
|
||||
|
||||
public VolumeVO allocateDuplicateVolumeVO(Volume oldVol, Long templateId) {
|
||||
public VolumeVO allocateDuplicateVolumeVO(Volume oldVol, DiskOffering diskOffering, Long templateId) {
|
||||
VolumeVO newVol = new VolumeVO(oldVol.getVolumeType(), oldVol.getName(), oldVol.getDataCenterId(), oldVol.getDomainId(), oldVol.getAccountId(), oldVol.getDiskOfferingId(),
|
||||
oldVol.getProvisioningType(), oldVol.getSize(), oldVol.getMinIops(), oldVol.getMaxIops(), oldVol.get_iScsiName());
|
||||
if (templateId != null) {
|
||||
@ -322,8 +322,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
newVol.setInstanceId(oldVol.getInstanceId());
|
||||
newVol.setRecreatable(oldVol.isRecreatable());
|
||||
newVol.setFormat(oldVol.getFormat());
|
||||
|
||||
if (oldVol.getPassphraseId() != null) {
|
||||
if ((diskOffering == null || diskOffering.getEncrypt()) && oldVol.getPassphraseId() != null) {
|
||||
PassphraseVO passphrase = passphraseDao.persist(new PassphraseVO(true));
|
||||
newVol.setPassphraseId(passphrase.getId());
|
||||
}
|
||||
@ -1170,7 +1169,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
return Transaction.execute(new TransactionCallback<VolumeVO>() {
|
||||
@Override
|
||||
public VolumeVO doInTransaction(TransactionStatus status) {
|
||||
VolumeVO newVolume = allocateDuplicateVolumeVO(existingVolume, templateIdToUseFinal);
|
||||
VolumeVO newVolume = allocateDuplicateVolumeVO(existingVolume, null, templateIdToUseFinal);
|
||||
try {
|
||||
stateTransitTo(existingVolume, Volume.Event.DestroyRequested);
|
||||
} catch (NoTransitionException e) {
|
||||
|
||||
@ -483,19 +483,19 @@ public class SystemVmTemplateRegistration {
|
||||
templateZoneVO = vmTemplateZoneDao.persist(templateZoneVO);
|
||||
} else {
|
||||
templateZoneVO.setLastUpdated(new java.util.Date());
|
||||
if (vmTemplateZoneDao.update(templateZoneVO.getId(), templateZoneVO)) {
|
||||
if (!vmTemplateZoneDao.update(templateZoneVO.getId(), templateZoneVO)) {
|
||||
templateZoneVO = null;
|
||||
}
|
||||
}
|
||||
return templateZoneVO;
|
||||
}
|
||||
|
||||
private void createCrossZonesTemplateZoneRefEntries(VMTemplateVO template) {
|
||||
private void createCrossZonesTemplateZoneRefEntries(Long templateId) {
|
||||
List<DataCenterVO> dcs = dataCenterDao.listAll();
|
||||
for (DataCenterVO dc : dcs) {
|
||||
VMTemplateZoneVO templateZoneVO = createOrUpdateTemplateZoneEntry(dc.getId(), template.getId());
|
||||
VMTemplateZoneVO templateZoneVO = createOrUpdateTemplateZoneEntry(dc.getId(), templateId);
|
||||
if (templateZoneVO == null) {
|
||||
throw new CloudRuntimeException(String.format("Failed to create template_zone_ref record for the systemVM template for hypervisor: %s and zone: %s", template.getHypervisorType().name(), dc));
|
||||
throw new CloudRuntimeException(String.format("Failed to create template_zone_ref record for the systemVM template (id: %s) and zone: %s", templateId, dc));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -625,8 +625,9 @@ public class SystemVmTemplateRegistration {
|
||||
throw new CloudRuntimeException(String.format("Failed to register template for hypervisor: %s", hypervisor.name()));
|
||||
}
|
||||
templateId = template.getId();
|
||||
createCrossZonesTemplateZoneRefEntries(template);
|
||||
}
|
||||
createCrossZonesTemplateZoneRefEntries(templateId);
|
||||
|
||||
details.setId(templateId);
|
||||
String destTempFolderName = String.valueOf(templateId);
|
||||
String destTempFolder = filePath + PARTIAL_TEMPLATE_FOLDER + destTempFolderName;
|
||||
|
||||
@ -1240,7 +1240,7 @@ public class VolumeServiceImpl implements VolumeService {
|
||||
|
||||
volumeInfo.processEvent(Event.DestroyRequested);
|
||||
|
||||
Volume newVol = _volumeMgr.allocateDuplicateVolume(volume, null);
|
||||
Volume newVol = _volumeMgr.allocateDuplicateVolume(volume, null, null);
|
||||
VolumeVO newVolume = (VolumeVO) newVol;
|
||||
newVolume.set_iScsiName(null);
|
||||
volDao.update(newVolume.getId(), newVolume);
|
||||
|
||||
@ -1008,6 +1008,10 @@ public class LibvirtVMDef {
|
||||
return _diskLabel;
|
||||
}
|
||||
|
||||
public void setDiskLabel(String label) {
|
||||
_diskLabel = label;
|
||||
}
|
||||
|
||||
public DiskType getDiskType() {
|
||||
return _diskType;
|
||||
}
|
||||
|
||||
@ -18,13 +18,20 @@
|
||||
*/
|
||||
package com.cloud.hypervisor.kvm.resource;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.libvirt.Connect;
|
||||
import org.libvirt.Domain;
|
||||
import org.libvirt.LibvirtException;
|
||||
import org.libvirt.TypedParameter;
|
||||
import org.libvirt.TypedStringParameter;
|
||||
import org.libvirt.TypedUlongParameter;
|
||||
|
||||
public class MigrateKVMAsync implements Callable<Domain> {
|
||||
protected Logger logger = Logger.getLogger(getClass());
|
||||
|
||||
private final LibvirtComputingResource libvirtComputingResource;
|
||||
|
||||
@ -37,6 +44,8 @@ public class MigrateKVMAsync implements Callable<Domain> {
|
||||
private boolean migrateNonSharedInc;
|
||||
private boolean autoConvergence;
|
||||
|
||||
protected Set<String> migrateDiskLabels;
|
||||
|
||||
// Libvirt Migrate Flags reference:
|
||||
// https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainMigrateFlags
|
||||
|
||||
@ -87,7 +96,7 @@ public class MigrateKVMAsync implements Callable<Domain> {
|
||||
private static final int LIBVIRT_VERSION_SUPPORTS_AUTO_CONVERGE = 1002003;
|
||||
|
||||
public MigrateKVMAsync(final LibvirtComputingResource libvirtComputingResource, final Domain dm, final Connect dconn, final String dxml,
|
||||
final boolean migrateStorage, final boolean migrateNonSharedInc, final boolean autoConvergence, final String vmName, final String destIp) {
|
||||
final boolean migrateStorage, final boolean migrateNonSharedInc, final boolean autoConvergence, final String vmName, final String destIp, Set<String> migrateDiskLabels) {
|
||||
this.libvirtComputingResource = libvirtComputingResource;
|
||||
|
||||
this.dm = dm;
|
||||
@ -98,6 +107,7 @@ public class MigrateKVMAsync implements Callable<Domain> {
|
||||
this.autoConvergence = autoConvergence;
|
||||
this.vmName = vmName;
|
||||
this.destIp = destIp;
|
||||
this.migrateDiskLabels = migrateDiskLabels;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -121,6 +131,37 @@ public class MigrateKVMAsync implements Callable<Domain> {
|
||||
flags |= VIR_MIGRATE_AUTO_CONVERGE;
|
||||
}
|
||||
|
||||
return dm.migrate(dconn, flags, dxml, vmName, "tcp:" + destIp, libvirtComputingResource.getMigrateSpeed());
|
||||
TypedParameter [] parameters = createTypedParameterList();
|
||||
|
||||
logger.debug(String.format("Migrating [%s] with flags [%s], destination [%s] and speed [%s]. The disks with the following labels will be migrated [%s].", vmName, flags,
|
||||
destIp, libvirtComputingResource.getMigrateSpeed(), migrateDiskLabels));
|
||||
|
||||
return dm.migrate(dconn, parameters, flags);
|
||||
|
||||
}
|
||||
|
||||
protected TypedParameter[] createTypedParameterList() {
|
||||
int sizeOfMigrateDiskLabels = 0;
|
||||
if (migrateDiskLabels != null) {
|
||||
sizeOfMigrateDiskLabels = migrateDiskLabels.size();
|
||||
}
|
||||
|
||||
TypedParameter[] parameters = new TypedParameter[4 + sizeOfMigrateDiskLabels];
|
||||
parameters[0] = new TypedStringParameter(Domain.DomainMigrateParameters.VIR_MIGRATE_PARAM_DEST_NAME, vmName);
|
||||
parameters[1] = new TypedStringParameter(Domain.DomainMigrateParameters.VIR_MIGRATE_PARAM_DEST_XML, dxml);
|
||||
parameters[2] = new TypedStringParameter(Domain.DomainMigrateParameters.VIR_MIGRATE_PARAM_URI, "tcp:" + destIp);
|
||||
parameters[3] = new TypedUlongParameter(Domain.DomainMigrateParameters.VIR_MIGRATE_PARAM_BANDWIDTH, libvirtComputingResource.getMigrateSpeed());
|
||||
|
||||
if (sizeOfMigrateDiskLabels == 0) {
|
||||
return parameters;
|
||||
}
|
||||
|
||||
Iterator<String> iterator = migrateDiskLabels.iterator();
|
||||
for (int i = 0; i < sizeOfMigrateDiskLabels; i++) {
|
||||
parameters[4 + i] = new TypedStringParameter(Domain.DomainMigrateParameters.VIR_MIGRATE_PARAM_MIGRATE_DISKS, iterator.next());
|
||||
}
|
||||
|
||||
return parameters;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -24,6 +24,7 @@ import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.URISyntaxException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
@ -188,6 +189,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
||||
// migrateStorage's value should always only be associated with the initial state of mapMigrateStorage.
|
||||
final boolean migrateStorage = MapUtils.isNotEmpty(mapMigrateStorage);
|
||||
final boolean migrateStorageManaged = command.isMigrateStorageManaged();
|
||||
Set<String> migrateDiskLabels = null;
|
||||
|
||||
if (migrateStorage) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
@ -197,6 +199,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(String.format("Changed VM [%s] XML configuration of used storage. New XML configuration is [%s].", vmName, xmlDesc));
|
||||
}
|
||||
migrateDiskLabels = getMigrateStorageDeviceLabels(disks, mapMigrateStorage);
|
||||
}
|
||||
|
||||
Map<String, DpdkTO> dpdkPortsMapping = command.getDpdkInterfaceMapping();
|
||||
@ -225,7 +228,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
||||
|
||||
final Callable<Domain> worker = new MigrateKVMAsync(libvirtComputingResource, dm, dconn, xmlDesc,
|
||||
migrateStorage, migrateNonSharedInc,
|
||||
command.isAutoConvergence(), vmName, command.getDestinationIp());
|
||||
command.isAutoConvergence(), vmName, command.getDestinationIp(), migrateDiskLabels);
|
||||
final Future<Domain> migrateThread = executor.submit(worker);
|
||||
executor.shutdown();
|
||||
long sleeptime = 0;
|
||||
@ -364,6 +367,30 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
||||
return new MigrateAnswer(command, result == null, result, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the disk labels (vda, vdb...) of the disks mapped for migration on mapMigrateStorage.
|
||||
* @param diskDefinitions list of all the disksDefinitions of the VM.
|
||||
* @param mapMigrateStorage map of the disks that should be migrated.
|
||||
* @return set with the labels of the disks that should be migrated.
|
||||
* */
|
||||
protected Set<String> getMigrateStorageDeviceLabels(List<DiskDef> diskDefinitions, Map<String, MigrateCommand.MigrateDiskInfo> mapMigrateStorage) {
|
||||
HashSet<String> setOfLabels = new HashSet<>();
|
||||
logger.debug("Searching for disk labels of disks [{}].", mapMigrateStorage.keySet());
|
||||
for (String fileName : mapMigrateStorage.keySet()) {
|
||||
for (DiskDef diskDef : diskDefinitions) {
|
||||
String diskPath = diskDef.getDiskPath();
|
||||
if (diskPath != null && diskPath.contains(fileName)) {
|
||||
setOfLabels.add(diskDef.getDiskLabel());
|
||||
logger.debug("Found label [{}] for disk [{}].", diskDef.getDiskLabel(), fileName);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return setOfLabels;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Checks if the CPU shares are equal in the source host and destination host.
|
||||
* <ul>
|
||||
|
||||
@ -0,0 +1,83 @@
|
||||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
|
||||
package com.cloud.hypervisor.kvm.resource;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.libvirt.Connect;
|
||||
import org.libvirt.Domain;
|
||||
import org.libvirt.TypedParameter;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class MigrateKVMAsyncTest {
|
||||
|
||||
@Mock
|
||||
private LibvirtComputingResource libvirtComputingResource;
|
||||
@Mock
|
||||
private Connect connect;
|
||||
@Mock
|
||||
private Domain domain;
|
||||
|
||||
|
||||
@Test
|
||||
public void createTypedParameterListTestNoMigrateDiskLabels() {
|
||||
MigrateKVMAsync migrateKVMAsync = new MigrateKVMAsync(libvirtComputingResource, domain, connect, "testxml",
|
||||
false, false, false, "tst", "1.1.1.1", null);
|
||||
|
||||
Mockito.doReturn(10).when(libvirtComputingResource).getMigrateSpeed();
|
||||
|
||||
TypedParameter[] result = migrateKVMAsync.createTypedParameterList();
|
||||
|
||||
Assert.assertEquals(4, result.length);
|
||||
|
||||
Assert.assertEquals("tst", result[0].getValueAsString());
|
||||
Assert.assertEquals("testxml", result[1].getValueAsString());
|
||||
Assert.assertEquals("tcp:1.1.1.1", result[2].getValueAsString());
|
||||
Assert.assertEquals("10", result[3].getValueAsString());
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void createTypedParameterListTestWithMigrateDiskLabels() {
|
||||
Set<String> labels = Set.of("vda", "vdb");
|
||||
MigrateKVMAsync migrateKVMAsync = new MigrateKVMAsync(libvirtComputingResource, domain, connect, "testxml",
|
||||
false, false, false, "tst", "1.1.1.1", labels);
|
||||
|
||||
Mockito.doReturn(10).when(libvirtComputingResource).getMigrateSpeed();
|
||||
|
||||
TypedParameter[] result = migrateKVMAsync.createTypedParameterList();
|
||||
|
||||
Assert.assertEquals(6, result.length);
|
||||
|
||||
Assert.assertEquals("tst", result[0].getValueAsString());
|
||||
Assert.assertEquals("testxml", result[1].getValueAsString());
|
||||
Assert.assertEquals("tcp:1.1.1.1", result[2].getValueAsString());
|
||||
Assert.assertEquals("10", result[3].getValueAsString());
|
||||
|
||||
Assert.assertEquals(labels, Set.of(result[4].getValueAsString(), result[5].getValueAsString()));
|
||||
}
|
||||
|
||||
}
|
||||
@ -26,9 +26,11 @@ import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.xml.parsers.DocumentBuilder;
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
@ -574,6 +576,14 @@ public class LibvirtMigrateCommandWrapperTest {
|
||||
" </devices>\n" +
|
||||
"</domain>\n";
|
||||
|
||||
private Map<String, MigrateDiskInfo> createMapMigrateStorage(String sourceText, String path) {
|
||||
Map<String, MigrateDiskInfo> mapMigrateStorage = new HashMap<String, MigrateDiskInfo>();
|
||||
|
||||
MigrateDiskInfo diskInfo = new MigrateDiskInfo("123456", DiskType.BLOCK, DriverType.RAW, Source.FILE, sourceText);
|
||||
mapMigrateStorage.put(path, diskInfo);
|
||||
return mapMigrateStorage;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReplaceIpForVNCInDescFile() {
|
||||
final String targetIp = "192.168.22.21";
|
||||
@ -750,10 +760,8 @@ public class LibvirtMigrateCommandWrapperTest {
|
||||
|
||||
@Test
|
||||
public void testReplaceStorage() throws Exception {
|
||||
Map<String, MigrateDiskInfo> mapMigrateStorage = new HashMap<String, MigrateDiskInfo>();
|
||||
Map<String, MigrateDiskInfo> mapMigrateStorage = createMapMigrateStorage("sourceTest", "/mnt/812ea6a3-7ad0-30f4-9cab-01e3f2985b98/4650a2f7-fce5-48e2-beaa-bcdf063194e6");
|
||||
|
||||
MigrateDiskInfo diskInfo = new MigrateDiskInfo("123456", DiskType.BLOCK, DriverType.RAW, Source.FILE, "sourctest");
|
||||
mapMigrateStorage.put("/mnt/812ea6a3-7ad0-30f4-9cab-01e3f2985b98/4650a2f7-fce5-48e2-beaa-bcdf063194e6", diskInfo);
|
||||
final String result = libvirtMigrateCmdWrapper.replaceStorage(fullfile, mapMigrateStorage, true);
|
||||
|
||||
InputStream in = IOUtils.toInputStream(result, "UTF-8");
|
||||
@ -767,7 +775,6 @@ public class LibvirtMigrateCommandWrapperTest {
|
||||
|
||||
@Test
|
||||
public void testReplaceStorageWithSecrets() throws Exception {
|
||||
Map<String, MigrateDiskInfo> mapMigrateStorage = new HashMap<String, MigrateDiskInfo>();
|
||||
|
||||
final String xmlDesc =
|
||||
"<domain type='kvm' id='3'>" +
|
||||
@ -788,8 +795,7 @@ public class LibvirtMigrateCommandWrapperTest {
|
||||
|
||||
final String volumeFile = "3530f749-82fd-458e-9485-a357e6e541db";
|
||||
String newDiskPath = "/mnt/2d0435e1-99e0-4f1d-94c0-bee1f6f8b99e/" + volumeFile;
|
||||
MigrateDiskInfo diskInfo = new MigrateDiskInfo("123456", DiskType.BLOCK, DriverType.RAW, Source.FILE, newDiskPath);
|
||||
mapMigrateStorage.put("/mnt/07eb495b-5590-3877-9fb7-23c6e9a40d40/bf8621b3-027c-497d-963b-06319650f048", diskInfo);
|
||||
Map<String, MigrateDiskInfo> mapMigrateStorage = createMapMigrateStorage(newDiskPath, "/mnt/07eb495b-5590-3877-9fb7-23c6e9a40d40/bf8621b3-027c-497d-963b-06319650f048");
|
||||
final String result = libvirtMigrateCmdWrapper.replaceStorage(xmlDesc, mapMigrateStorage, false);
|
||||
final String expectedSecretUuid = LibvirtComputingResource.generateSecretUUIDFromString(volumeFile);
|
||||
|
||||
@ -941,4 +947,64 @@ public class LibvirtMigrateCommandWrapperTest {
|
||||
|
||||
Assert.assertEquals(updateShares, newVmCpuShares);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMigrateStorageDeviceLabelsTestNoDiskDefinitions() {
|
||||
Map<String, MigrateDiskInfo> mapMigrateStorage = createMapMigrateStorage("sourceTest", "/mnt/812ea6a3-7ad0-30f4-9cab-01e3f2985b98/4650a2f7-fce5-48e2-beaa-bcdf063194e6");
|
||||
|
||||
Set<String> result = libvirtMigrateCmdWrapper.getMigrateStorageDeviceLabels(new ArrayList<>(), mapMigrateStorage);
|
||||
|
||||
assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMigrateStorageDeviceLabelsTestNoMapMigrateStorage() {
|
||||
List<DiskDef> disks = new ArrayList<>();
|
||||
DiskDef diskDef0 = new DiskDef();
|
||||
|
||||
diskDef0.setDiskPath("volPath");
|
||||
disks.add(diskDef0);
|
||||
|
||||
Set<String> result = libvirtMigrateCmdWrapper.getMigrateStorageDeviceLabels(disks, new HashMap<>());
|
||||
|
||||
assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMigrateStorageDeviceLabelsTestPathIsNotFound() {
|
||||
List<DiskDef> disks = new ArrayList<>();
|
||||
DiskDef diskDef0 = new DiskDef();
|
||||
|
||||
diskDef0.setDiskPath("volPath");
|
||||
disks.add(diskDef0);
|
||||
|
||||
Map<String, MigrateDiskInfo> mapMigrateStorage = createMapMigrateStorage("sourceTest", "/mnt/812ea6a3-7ad0-30f4-9cab-01e3f2985b98/4650a2f7-fce5-48e2-beaa-bcdf063194e6");
|
||||
|
||||
Set<String> result = libvirtMigrateCmdWrapper.getMigrateStorageDeviceLabels(disks, mapMigrateStorage);
|
||||
|
||||
assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getMigrateStorageDeviceLabelsTestFindPathAndLabels() {
|
||||
List<DiskDef> disks = new ArrayList<>();
|
||||
DiskDef diskDef0 = new DiskDef();
|
||||
DiskDef diskDef1 = new DiskDef();
|
||||
|
||||
diskDef0.setDiskPath("volPath1");
|
||||
diskDef0.setDiskLabel("vda");
|
||||
disks.add(diskDef0);
|
||||
|
||||
diskDef1.setDiskPath("volPath2");
|
||||
diskDef1.setDiskLabel("vdb");
|
||||
disks.add(diskDef1);
|
||||
|
||||
Map<String, MigrateDiskInfo> mapMigrateStorage = createMapMigrateStorage("sourceTest", "volPath1");
|
||||
mapMigrateStorage.put("volPath2", new MigrateDiskInfo("123457", DiskType.BLOCK, DriverType.RAW, Source.FILE, "sourceText"));
|
||||
|
||||
Set<String> result = libvirtMigrateCmdWrapper.getMigrateStorageDeviceLabels(disks, mapMigrateStorage);
|
||||
|
||||
assertTrue(result.containsAll(Arrays.asList("vda", "vdb")));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -35,6 +35,7 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
||||
import com.linbit.linstor.api.ApiClient;
|
||||
import com.linbit.linstor.api.ApiConsts;
|
||||
import com.linbit.linstor.api.ApiException;
|
||||
import com.linbit.linstor.api.Configuration;
|
||||
import com.linbit.linstor.api.DevelopersApi;
|
||||
@ -81,6 +82,10 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
|
||||
}
|
||||
}
|
||||
|
||||
private void logLinstorAnswers(@Nonnull ApiCallRcList answers) {
|
||||
answers.forEach(this::logLinstorAnswer);
|
||||
}
|
||||
|
||||
private void checkLinstorAnswersThrow(@Nonnull ApiCallRcList answers) {
|
||||
answers.forEach(this::logLinstorAnswer);
|
||||
if (answers.hasError())
|
||||
@ -296,23 +301,90 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
|
||||
return true;
|
||||
}
|
||||
|
||||
private boolean tryDisconnectLinstor(String volumePath, KVMStoragePool pool)
|
||||
{
|
||||
if (volumePath == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
logger.debug("Linstor: Using storage pool: " + pool.getUuid());
|
||||
final DevelopersApi api = getLinstorAPI(pool);
|
||||
|
||||
Optional<ResourceWithVolumes> optRsc;
|
||||
try
|
||||
{
|
||||
List<ResourceWithVolumes> resources = api.viewResources(
|
||||
Collections.singletonList(localNodeName),
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null);
|
||||
|
||||
optRsc = getResourceByPath(resources, volumePath);
|
||||
} catch (ApiException apiEx) {
|
||||
// couldn't query linstor controller
|
||||
logger.error(apiEx.getBestMessage());
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
if (optRsc.isPresent()) {
|
||||
try {
|
||||
Resource rsc = optRsc.get();
|
||||
|
||||
// if diskless resource remove it, in the worst case it will be transformed to a tiebreaker
|
||||
if (rsc.getFlags() != null &&
|
||||
rsc.getFlags().contains(ApiConsts.FLAG_DRBD_DISKLESS) &&
|
||||
!rsc.getFlags().contains(ApiConsts.FLAG_TIE_BREAKER)) {
|
||||
ApiCallRcList delAnswers = api.resourceDelete(rsc.getName(), localNodeName);
|
||||
logLinstorAnswers(delAnswers);
|
||||
}
|
||||
|
||||
// remove allow-two-primaries
|
||||
ResourceDefinitionModify rdm = new ResourceDefinitionModify();
|
||||
rdm.deleteProps(Collections.singletonList("DrbdOptions/Net/allow-two-primaries"));
|
||||
ApiCallRcList answers = api.resourceDefinitionModify(rsc.getName(), rdm);
|
||||
if (answers.hasError()) {
|
||||
logger.error(
|
||||
String.format("Failed to remove 'allow-two-primaries' on %s: %s",
|
||||
rsc.getName(), LinstorUtil.getBestErrorMessage(answers)));
|
||||
// do not fail here as removing allow-two-primaries property isn't fatal
|
||||
}
|
||||
} catch (ApiException apiEx) {
|
||||
logger.error(apiEx.getBestMessage());
|
||||
// do not fail here as removing allow-two-primaries property or deleting diskless isn't fatal
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
logger.warn("Linstor: Couldn't find resource for this path: " + volumePath);
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool)
|
||||
{
|
||||
logger.debug("Linstor: disconnectPhysicalDisk {}:{}", pool.getUuid(), volumePath);
|
||||
if (MapStorageUuidToStoragePool.containsValue(pool)) {
|
||||
return tryDisconnectLinstor(volumePath, pool);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect)
|
||||
{
|
||||
// as of now this is only relevant for iscsi targets
|
||||
logger.info("Linstor: disconnectPhysicalDisk(Map<String, String> volumeToDisconnect) called?");
|
||||
return false;
|
||||
}
|
||||
|
||||
private Optional<ResourceWithVolumes> getResourceByPath(final List<ResourceWithVolumes> resources, String path) {
|
||||
return resources.stream()
|
||||
.filter(rsc -> rsc.getVolumes().stream()
|
||||
.anyMatch(v -> v.getDevicePath().equals(path)))
|
||||
.anyMatch(v -> path.equals(v.getDevicePath())))
|
||||
.findFirst();
|
||||
}
|
||||
|
||||
@ -333,46 +405,8 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
|
||||
logger.debug("Linstor: disconnectPhysicalDiskByPath " + localPath);
|
||||
final KVMStoragePool pool = optFirstPool.get();
|
||||
|
||||
logger.debug("Linstor: Using storpool: " + pool.getUuid());
|
||||
final DevelopersApi api = getLinstorAPI(pool);
|
||||
|
||||
Optional<ResourceWithVolumes> optRsc;
|
||||
try {
|
||||
List<ResourceWithVolumes> resources = api.viewResources(
|
||||
Collections.singletonList(localNodeName),
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null);
|
||||
|
||||
optRsc = getResourceByPath(resources, localPath);
|
||||
} catch (ApiException apiEx) {
|
||||
// couldn't query linstor controller
|
||||
logger.error(apiEx.getBestMessage());
|
||||
return false;
|
||||
return tryDisconnectLinstor(localPath, pool);
|
||||
}
|
||||
|
||||
if (optRsc.isPresent()) {
|
||||
try {
|
||||
Resource rsc = optRsc.get();
|
||||
ResourceDefinitionModify rdm = new ResourceDefinitionModify();
|
||||
rdm.deleteProps(Collections.singletonList("DrbdOptions/Net/allow-two-primaries"));
|
||||
ApiCallRcList answers = api.resourceDefinitionModify(rsc.getName(), rdm);
|
||||
if (answers.hasError()) {
|
||||
logger.error(
|
||||
String.format("Failed to remove 'allow-two-primaries' on %s: %s",
|
||||
rsc.getName(), LinstorUtil.getBestErrorMessage(answers)));
|
||||
// do not fail here as removing allow-two-primaries property isn't fatal
|
||||
}
|
||||
} catch(ApiException apiEx){
|
||||
logger.error(apiEx.getBestMessage());
|
||||
// do not fail here as removing allow-two-primaries property isn't fatal
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.info("Linstor: Couldn't find resource for this path: {}", localPath);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -3934,7 +3934,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
|
||||
srvOffrDomainDetailSearch.entity().getName(), serviceOfferingSearch.entity().setString(ApiConstants.DOMAIN_ID));
|
||||
}
|
||||
|
||||
List<String> hostTags = getHostTagsFromTemplateForServiceOfferingsListing(caller, templateId);
|
||||
List<String> hostTags = new ArrayList<>();
|
||||
if (currentVmOffering != null) {
|
||||
hostTags.addAll(com.cloud.utils.StringUtils.csvTagsToList(currentVmOffering.getHostTag()));
|
||||
}
|
||||
|
||||
@ -4342,7 +4342,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
||||
}
|
||||
|
||||
if (offering.getDefaultUse()) {
|
||||
throw new InvalidParameterValueException("Default service offerings cannot be deleted");
|
||||
throw new InvalidParameterValueException(String.format("The system service offering [%s] is marked for default use and cannot be deleted", offering.getDisplayText()));
|
||||
}
|
||||
|
||||
final User user = _userDao.findById(userId);
|
||||
|
||||
@ -7981,7 +7981,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
||||
|
||||
for (VolumeVO root : rootVols) {
|
||||
if ( !Volume.State.Allocated.equals(root.getState()) || newTemplateId != null ) {
|
||||
_volumeService.validateDestroyVolume(root, caller, expunge, false);
|
||||
_volumeService.validateDestroyVolume(root, caller, Volume.State.Allocated.equals(root.getState()) || expunge, false);
|
||||
final UserVmVO userVm = vm;
|
||||
Pair<UserVmVO, Volume> vmAndNewVol = Transaction.execute(new TransactionCallbackWithException<Pair<UserVmVO, Volume>, CloudRuntimeException>() {
|
||||
@Override
|
||||
@ -7998,19 +7998,19 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
||||
Volume newVol = null;
|
||||
if (newTemplateId != null) {
|
||||
if (isISO) {
|
||||
newVol = volumeMgr.allocateDuplicateVolume(root, null);
|
||||
newVol = volumeMgr.allocateDuplicateVolume(root, diskOffering, null);
|
||||
userVm.setIsoId(newTemplateId);
|
||||
userVm.setGuestOSId(template.getGuestOSId());
|
||||
userVm.setTemplateId(newTemplateId);
|
||||
} else {
|
||||
newVol = volumeMgr.allocateDuplicateVolume(root, newTemplateId);
|
||||
newVol = volumeMgr.allocateDuplicateVolume(root, diskOffering, newTemplateId);
|
||||
userVm.setGuestOSId(template.getGuestOSId());
|
||||
userVm.setTemplateId(newTemplateId);
|
||||
}
|
||||
// check and update VM if it can be dynamically scalable with the new template
|
||||
updateVMDynamicallyScalabilityUsingTemplate(userVm, newTemplateId);
|
||||
} else {
|
||||
newVol = volumeMgr.allocateDuplicateVolume(root, null);
|
||||
newVol = volumeMgr.allocateDuplicateVolume(root, diskOffering, null);
|
||||
}
|
||||
|
||||
updateVolume(newVol, template, userVm, diskOffering, details);
|
||||
@ -8044,7 +8044,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
||||
|
||||
// Detach, destroy and create the usage event for the old root volume.
|
||||
_volsDao.detachVolume(root.getId());
|
||||
_volumeService.destroyVolume(root.getId(), caller, expunge, false);
|
||||
_volumeService.destroyVolume(root.getId(), caller, Volume.State.Allocated.equals(root.getState()) || expunge, false);
|
||||
|
||||
if (currentTemplate.getId() != template.getId() && VirtualMachine.Type.User.equals(vm.type) && !VirtualMachineManager.ResourceCountRunningVMsonly.value()) {
|
||||
ServiceOfferingVO serviceOffering = serviceOfferingDao.findById(vm.getId(), vm.getServiceOfferingId());
|
||||
|
||||
108
test/integration/smoke/test_restore_vm.py
Normal file
108
test/integration/smoke/test_restore_vm.py
Normal file
@ -0,0 +1,108 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
""" P1 tests for Scaling up Vm
|
||||
"""
|
||||
# Import Local Modules
|
||||
from marvin.cloudstackTestCase import cloudstackTestCase
|
||||
from marvin.lib.base import (VirtualMachine, Volume, ServiceOffering, Template)
|
||||
from marvin.lib.common import (get_zone, get_domain)
|
||||
from nose.plugins.attrib import attr
|
||||
|
||||
_multiprocess_shared_ = True
|
||||
|
||||
|
||||
class TestRestoreVM(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
testClient = super(TestRestoreVM, cls).getClsTestClient()
|
||||
cls.apiclient = testClient.getApiClient()
|
||||
cls.services = testClient.getParsedTestDataConfig()
|
||||
|
||||
# Get Zone, Domain and templates
|
||||
cls.domain = get_domain(cls.apiclient)
|
||||
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
|
||||
cls.hypervisor = testClient.getHypervisorInfo()
|
||||
cls.services['mode'] = cls.zone.networktype
|
||||
|
||||
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
|
||||
|
||||
cls.service_offering = ServiceOffering.create(cls.apiclient, cls.services["service_offering"])
|
||||
|
||||
cls.template_t1 = Template.register(cls.apiclient, cls.services["test_templates"][
|
||||
cls.hypervisor.lower() if cls.hypervisor.lower() != 'simulator' else 'xenserver'],
|
||||
zoneid=cls.zone.id, hypervisor=cls.hypervisor.lower())
|
||||
|
||||
cls.template_t2 = Template.register(cls.apiclient, cls.services["test_templates"][
|
||||
cls.hypervisor.lower() if cls.hypervisor.lower() != 'simulator' else 'xenserver'],
|
||||
zoneid=cls.zone.id, hypervisor=cls.hypervisor.lower())
|
||||
|
||||
cls._cleanup = [cls.service_offering, cls.template_t1, cls.template_t2]
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
super(TestRestoreVM, cls).tearDownClass()
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "basic"], required_hardware="false")
|
||||
def test_01_restore_vm(self):
|
||||
"""Test restore virtual machine
|
||||
"""
|
||||
# create a virtual machine
|
||||
virtual_machine = VirtualMachine.create(self.apiclient, self.services["virtual_machine"], zoneid=self.zone.id,
|
||||
templateid=self.template_t1.id,
|
||||
serviceofferingid=self.service_offering.id)
|
||||
self._cleanup.append(virtual_machine)
|
||||
|
||||
root_vol = Volume.list(self.apiclient, virtualmachineid=virtual_machine.id)[0]
|
||||
self.assertEqual(root_vol.state, 'Ready', "Volume should be in Ready state")
|
||||
self.assertEqual(root_vol.size, self.template_t1.size, "Size of volume and template should match")
|
||||
|
||||
virtual_machine.restore(self.apiclient, self.template_t2.id)
|
||||
restored_vm = VirtualMachine.list(self.apiclient, id=virtual_machine.id)[0]
|
||||
self.assertEqual(restored_vm.state, 'Running', "VM should be in a running state")
|
||||
self.assertEqual(restored_vm.templateid, self.template_t2.id, "VM's template after restore is incorrect")
|
||||
root_vol = Volume.list(self.apiclient, virtualmachineid=restored_vm.id)[0]
|
||||
self.assertEqual(root_vol.state, 'Ready', "Volume should be in Ready state")
|
||||
self.assertEqual(root_vol.size, self.template_t2.size, "Size of volume and template should match")
|
||||
|
||||
@attr(tags=["advanced", "basic"], required_hardware="false")
|
||||
def test_02_restore_vm_allocated_root(self):
|
||||
"""Test restore virtual machine with root disk in allocated state
|
||||
"""
|
||||
# create a virtual machine with allocated root disk by setting startvm=False
|
||||
virtual_machine = VirtualMachine.create(self.apiclient, self.services["virtual_machine"], zoneid=self.zone.id,
|
||||
templateid=self.template_t1.id,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
startvm=False)
|
||||
self._cleanup.append(virtual_machine)
|
||||
root_vol = Volume.list(self.apiclient, virtualmachineid=virtual_machine.id)[0]
|
||||
self.assertEqual(root_vol.state, 'Allocated', "Volume should be in Allocated state")
|
||||
self.assertEqual(root_vol.size, self.template_t1.size, "Size of volume and template should match")
|
||||
|
||||
virtual_machine.restore(self.apiclient, self.template_t2.id)
|
||||
restored_vm = VirtualMachine.list(self.apiclient, id=virtual_machine.id)[0]
|
||||
self.assertEqual(restored_vm.state, 'Stopped', "Check the state of VM")
|
||||
self.assertEqual(restored_vm.templateid, self.template_t2.id, "Check the template of VM")
|
||||
|
||||
root_vol = Volume.list(self.apiclient, virtualmachineid=restored_vm.id)[0]
|
||||
self.assertEqual(root_vol.state, 'Allocated', "Volume should be in Allocated state")
|
||||
self.assertEqual(root_vol.size, self.template_t2.size, "Size of volume and template should match")
|
||||
|
||||
virtual_machine.start(self.apiclient)
|
||||
root_vol = Volume.list(self.apiclient, virtualmachineid=restored_vm.id)[0]
|
||||
self.assertEqual(root_vol.state, 'Ready', "Volume should be in Ready state")
|
||||
@ -36,7 +36,7 @@
|
||||
:items="templates"
|
||||
:selected="tabKey"
|
||||
:loading="loading.templates"
|
||||
:preFillContent="resource.templateid"
|
||||
:preFillContent="dataPrefill"
|
||||
:key="templateKey"
|
||||
@handle-search-filter="($event) => fetchAllTemplates($event)"
|
||||
@update-template-iso="updateFieldValue"
|
||||
@ -61,7 +61,7 @@
|
||||
:zoneId="resource.zoneId"
|
||||
:value="diskOffering ? diskOffering.id : ''"
|
||||
:loading="loading.diskOfferings"
|
||||
:preFillContent="resource.diskofferingid"
|
||||
:preFillContent="dataPrefill"
|
||||
:isIsoSelected="false"
|
||||
:isRootDiskOffering="true"
|
||||
@on-selected-disk-size="onSelectDiskSize"
|
||||
@ -170,7 +170,11 @@ export default {
|
||||
],
|
||||
diskOffering: {},
|
||||
diskOfferingCount: 0,
|
||||
templateKey: 0
|
||||
templateKey: 0,
|
||||
dataPrefill: {
|
||||
templateid: this.resource.templateid,
|
||||
diskofferingid: this.resource.diskofferingid
|
||||
}
|
||||
}
|
||||
},
|
||||
beforeCreate () {
|
||||
@ -192,8 +196,10 @@ export default {
|
||||
},
|
||||
handleSubmit () {
|
||||
const params = {
|
||||
virtualmachineid: this.resource.id,
|
||||
templateid: this.templateid
|
||||
virtualmachineid: this.resource.id
|
||||
}
|
||||
if (this.templateid) {
|
||||
params.templateid = this.templateid
|
||||
}
|
||||
if (this.overrideDiskOffering) {
|
||||
params.diskofferingid = this.diskOffering.id
|
||||
@ -285,9 +291,11 @@ export default {
|
||||
},
|
||||
onSelectDiskSize (rowSelected) {
|
||||
this.diskOffering = rowSelected
|
||||
this.dataPrefill.diskofferingid = rowSelected.id
|
||||
},
|
||||
updateFieldValue (input, value) {
|
||||
this[input] = value
|
||||
this.dataPrefill[input] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -19,26 +19,24 @@
|
||||
|
||||
package com.cloud.utils.net;
|
||||
|
||||
import static com.cloud.utils.AutoCloseableUtil.closeAutoCloseable;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.net.InetAddress;
|
||||
import java.net.NetworkInterface;
|
||||
import java.net.SocketException;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Collections;
|
||||
import java.util.Formatter;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* This class retrieves the (first) MAC address for the machine is it is loaded on and stores it statically for retrieval.
|
||||
* It can also be used for formatting MAC addresses.
|
||||
* copied fnd addpeted rom the public domain utility from John Burkard.
|
||||
**/
|
||||
public class MacAddress {
|
||||
protected static Logger LOGGER = LogManager.getLogger(MacAddress.class);
|
||||
|
||||
private long _addr = 0;
|
||||
|
||||
protected MacAddress() {
|
||||
@ -76,213 +74,52 @@ public class MacAddress {
|
||||
return toString(":");
|
||||
}
|
||||
|
||||
private static MacAddress s_address;
|
||||
private static MacAddress macAddress;
|
||||
|
||||
static {
|
||||
String macAddress = null;
|
||||
|
||||
Process p = null;
|
||||
BufferedReader in = null;
|
||||
|
||||
String macString = null;
|
||||
try {
|
||||
String osname = System.getProperty("os.name");
|
||||
|
||||
if (osname.startsWith("Windows")) {
|
||||
p = Runtime.getRuntime().exec(new String[] {"ipconfig", "/all"}, null);
|
||||
} else if (osname.startsWith("Solaris") || osname.startsWith("SunOS")) {
|
||||
// Solaris code must appear before the generic code
|
||||
String hostName = MacAddress.getFirstLineOfCommand(new String[] {"uname", "-n"});
|
||||
if (hostName != null) {
|
||||
p = Runtime.getRuntime().exec(new String[] {"/usr/sbin/arp", hostName}, null);
|
||||
final List<NetworkInterface> nics = Collections.list(NetworkInterface.getNetworkInterfaces());
|
||||
Collections.reverse(nics);
|
||||
for (final NetworkInterface nic : nics) {
|
||||
final byte[] mac = nic.getHardwareAddress();
|
||||
if (mac != null &&
|
||||
!nic.isVirtual() &&
|
||||
!nic.isLoopback() &&
|
||||
!nic.getName().startsWith("br") &&
|
||||
!nic.getName().startsWith("veth") &&
|
||||
!nic.getName().startsWith("vnet")) {
|
||||
StringBuilder macAddressBuilder = new StringBuilder();
|
||||
for (byte b : mac) {
|
||||
macAddressBuilder.append(String.format("%02X", b));
|
||||
}
|
||||
} else if (new File("/usr/sbin/lanscan").exists()) {
|
||||
p = Runtime.getRuntime().exec(new String[] {"/usr/sbin/lanscan"}, null);
|
||||
} else if (new File("/sbin/ifconfig").exists()) {
|
||||
p = Runtime.getRuntime().exec(new String[] {"/sbin/ifconfig", "-a"}, null);
|
||||
}
|
||||
|
||||
if (p != null) {
|
||||
in = new BufferedReader(new InputStreamReader(p.getInputStream()), 128);
|
||||
String l = null;
|
||||
while ((l = in.readLine()) != null) {
|
||||
macAddress = MacAddress.parse(l);
|
||||
if (macAddress != null) {
|
||||
short parsedShortMacAddress = MacAddress.parseShort(macAddress);
|
||||
if (parsedShortMacAddress != 0xff && parsedShortMacAddress != 0x00)
|
||||
macString = macAddressBuilder.toString();
|
||||
break;
|
||||
}
|
||||
macAddress = null;
|
||||
}
|
||||
} catch (SocketException ignore) {
|
||||
}
|
||||
|
||||
} catch (SecurityException ex) {
|
||||
LOGGER.info("[ignored] security exception in static initializer of MacAddress", ex);
|
||||
} catch (IOException ex) {
|
||||
LOGGER.info("[ignored] io exception in static initializer of MacAddress");
|
||||
} finally {
|
||||
if (p != null) {
|
||||
closeAutoCloseable(in, "closing init process input stream");
|
||||
closeAutoCloseable(p.getErrorStream(), "closing init process error output stream");
|
||||
closeAutoCloseable(p.getOutputStream(), "closing init process std output stream");
|
||||
p.destroy();
|
||||
}
|
||||
}
|
||||
long macAddressLong = 0;
|
||||
|
||||
long clockSeqAndNode = 0;
|
||||
|
||||
if (macAddress != null) {
|
||||
if (macAddress.indexOf(':') != -1) {
|
||||
clockSeqAndNode |= MacAddress.parseLong(macAddress);
|
||||
} else if (macAddress.startsWith("0x")) {
|
||||
clockSeqAndNode |= MacAddress.parseLong(macAddress.substring(2));
|
||||
}
|
||||
if (macString != null) {
|
||||
macAddressLong = Long.parseLong(macString, 16);
|
||||
} else {
|
||||
try {
|
||||
byte[] local = InetAddress.getLocalHost().getAddress();
|
||||
clockSeqAndNode |= (local[0] << 24) & 0xFF000000L;
|
||||
clockSeqAndNode |= (local[1] << 16) & 0xFF0000;
|
||||
clockSeqAndNode |= (local[2] << 8) & 0xFF00;
|
||||
clockSeqAndNode |= local[3] & 0xFF;
|
||||
macAddressLong |= (local[0] << 24) & 0xFF000000L;
|
||||
macAddressLong |= (local[1] << 16) & 0xFF0000;
|
||||
macAddressLong |= (local[2] << 8) & 0xFF00;
|
||||
macAddressLong |= local[3] & 0xFF;
|
||||
} catch (UnknownHostException ex) {
|
||||
clockSeqAndNode |= (long)(Math.random() * 0x7FFFFFFF);
|
||||
macAddressLong |= (long)(Math.random() * 0x7FFFFFFF);
|
||||
}
|
||||
}
|
||||
|
||||
s_address = new MacAddress(clockSeqAndNode);
|
||||
MacAddress.macAddress = new MacAddress(macAddressLong);
|
||||
}
|
||||
|
||||
public static MacAddress getMacAddress() {
|
||||
return s_address;
|
||||
}
|
||||
|
||||
private static String getFirstLineOfCommand(String[] commands) throws IOException {
|
||||
|
||||
Process p = null;
|
||||
BufferedReader reader = null;
|
||||
|
||||
try {
|
||||
p = Runtime.getRuntime().exec(commands);
|
||||
reader = new BufferedReader(new InputStreamReader(p.getInputStream()), 128);
|
||||
|
||||
return reader.readLine();
|
||||
} finally {
|
||||
if (p != null) {
|
||||
closeAutoCloseable(reader, "closing process input stream");
|
||||
closeAutoCloseable(p.getErrorStream(), "closing process error output stream");
|
||||
closeAutoCloseable(p.getOutputStream(), "closing process std output stream");
|
||||
p.destroy();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* The MAC address parser attempts to find the following patterns:
|
||||
* <ul>
|
||||
* <li>.{1,2}:.{1,2}:.{1,2}:.{1,2}:.{1,2}:.{1,2}</li>
|
||||
* <li>.{1,2}-.{1,2}-.{1,2}-.{1,2}-.{1,2}-.{1,2}</li>
|
||||
* </ul>
|
||||
*
|
||||
* This is copied from the author below. The author encouraged copying
|
||||
* it.
|
||||
*
|
||||
*/
|
||||
static String parse(String in) {
|
||||
|
||||
// lanscan
|
||||
|
||||
int hexStart = in.indexOf("0x");
|
||||
if (hexStart != -1) {
|
||||
int hexEnd = in.indexOf(' ', hexStart);
|
||||
if (hexEnd != -1) {
|
||||
return in.substring(hexStart, hexEnd);
|
||||
}
|
||||
}
|
||||
|
||||
int octets = 0;
|
||||
int lastIndex, old, end;
|
||||
|
||||
if (in.indexOf('-') > -1) {
|
||||
in = in.replace('-', ':');
|
||||
}
|
||||
|
||||
lastIndex = in.lastIndexOf(':');
|
||||
|
||||
if (lastIndex > in.length() - 2)
|
||||
return null;
|
||||
|
||||
end = Math.min(in.length(), lastIndex + 3);
|
||||
|
||||
++octets;
|
||||
old = lastIndex;
|
||||
while (octets != 5 && lastIndex != -1 && lastIndex > 1) {
|
||||
lastIndex = in.lastIndexOf(':', --lastIndex);
|
||||
if (old - lastIndex == 3 || old - lastIndex == 2) {
|
||||
++octets;
|
||||
old = lastIndex;
|
||||
}
|
||||
}
|
||||
|
||||
if (octets == 5 && lastIndex > 1) {
|
||||
return in.substring(lastIndex - 2, end).trim();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a <code>long</code> from a hex encoded number. This method will skip
|
||||
* all characters that are not 0-9 and a-f (the String is lower cased first).
|
||||
* Returns 0 if the String does not contain any interesting characters.
|
||||
*
|
||||
* @param s the String to extract a <code>long</code> from, may not be <code>null</code>
|
||||
* @return a <code>long</code>
|
||||
* @throws NullPointerException if the String is <code>null</code>
|
||||
*/
|
||||
private static long parseLong(String s) throws NullPointerException {
|
||||
s = s.toLowerCase();
|
||||
long out = 0;
|
||||
byte shifts = 0;
|
||||
char c;
|
||||
for (int i = 0; i < s.length() && shifts < 16; i++) {
|
||||
c = s.charAt(i);
|
||||
if ((c > 47) && (c < 58)) {
|
||||
out <<= 4;
|
||||
++shifts;
|
||||
out |= c - 48;
|
||||
} else if ((c > 96) && (c < 103)) {
|
||||
++shifts;
|
||||
out <<= 4;
|
||||
out |= c - 87;
|
||||
}
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a <code>short</code> from a hex encoded number. This method will skip
|
||||
* all characters that are not 0-9 and a-f (the String is lower cased first).
|
||||
* Returns 0 if the String does not contain any interesting characters.
|
||||
*
|
||||
* @param s the String to extract a <code>short</code> from, may not be <code>null</code>
|
||||
* @return a <code>short</code>
|
||||
* @throws NullPointerException if the String is <code>null</code>
|
||||
*/
|
||||
private static short parseShort(String s) throws NullPointerException {
|
||||
s = s.toLowerCase();
|
||||
short out = 0;
|
||||
byte shifts = 0;
|
||||
char c;
|
||||
for (int i = 0; i < s.length() && shifts < 4; i++) {
|
||||
c = s.charAt(i);
|
||||
if ((c > 47) && (c < 58)) {
|
||||
out <<= 4;
|
||||
++shifts;
|
||||
out |= c - 48;
|
||||
} else if ((c > 96) && (c < 103)) {
|
||||
++shifts;
|
||||
out <<= 4;
|
||||
out |= c - 87;
|
||||
}
|
||||
}
|
||||
return out;
|
||||
return macAddress;
|
||||
}
|
||||
}
|
||||
|
||||
@ -41,14 +41,14 @@ public class MacAddressTest {
|
||||
public final void testMacAddressToLong() throws Exception {
|
||||
// TODO this test should fail this address is beyond the acceptable range for macaddresses
|
||||
MacAddress mac = new MacAddress(Long.MAX_VALUE);
|
||||
assertEquals(Long.MAX_VALUE,mac.toLong());
|
||||
assertEquals(Long.MAX_VALUE, mac.toLong());
|
||||
System.out.println(mac.toString());
|
||||
}
|
||||
|
||||
// TODO public final void testToLong() throws Exception {
|
||||
// TODO public final void testToByteArray() throws Exception {
|
||||
// TODO public final void testToStringString() throws Exception {
|
||||
// TODO public final void testToString() throws Exception {
|
||||
// TODO public final void testGetMacAddress() throws Exception {
|
||||
// TODO public final void testParse() throws Exception {
|
||||
@Test
|
||||
public final void testSpecificMacAddress() throws Exception {
|
||||
// Test specific mac address 76:3F:76:EB:02:81
|
||||
MacAddress mac = new MacAddress(130014950130305L);
|
||||
assertEquals("76:3f:76:eb:02:81", mac.toString());
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user