mirror of
https://github.com/apache/cloudstack.git
synced 2025-11-02 20:02:29 +01:00
Merge remote-tracking branch 'apache/4.15' into main
This commit is contained in:
commit
56f4da6dce
@ -66,7 +66,7 @@ public interface TemplateDataStoreDao extends GenericDao<TemplateDataStoreVO, Lo
|
||||
|
||||
List<TemplateDataStoreVO> listByTemplate(long templateId);
|
||||
|
||||
List<TemplateDataStoreVO> listByTemplateNotBypassed(long templateId);
|
||||
List<TemplateDataStoreVO> listByTemplateNotBypassed(long templateId, Long... storeIds);
|
||||
|
||||
TemplateDataStoreVO findByTemplateZoneReady(long templateId, Long zoneId);
|
||||
|
||||
|
||||
@ -99,6 +99,7 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase<TemplateDataStoreVO
|
||||
templateSearch.and("template_id", templateSearch.entity().getTemplateId(), SearchCriteria.Op.EQ);
|
||||
templateSearch.and("download_state", templateSearch.entity().getDownloadState(), SearchCriteria.Op.NEQ);
|
||||
templateSearch.and("destroyed", templateSearch.entity().getDestroyed(), SearchCriteria.Op.EQ);
|
||||
templateSearch.and("storeids", templateSearch.entity().getDataStoreId(), Op.IN);
|
||||
templateSearch.done();
|
||||
|
||||
templateRoleSearch = createSearchBuilder();
|
||||
@ -421,11 +422,12 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase<TemplateDataStoreVO
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TemplateDataStoreVO> listByTemplateNotBypassed(long templateId) {
|
||||
public List<TemplateDataStoreVO> listByTemplateNotBypassed(long templateId, Long... storeIds) {
|
||||
SearchCriteria<TemplateDataStoreVO> sc = templateSearch.create();
|
||||
sc.setParameters("template_id", templateId);
|
||||
sc.setParameters("download_state", Status.BYPASSED);
|
||||
sc.setParameters("destroyed", false);
|
||||
sc.setParameters("storeids", storeIds);
|
||||
return search(sc, null);
|
||||
}
|
||||
|
||||
|
||||
@ -16,6 +16,55 @@
|
||||
// under the License.
|
||||
package com.cloud.hypervisor.vmware.manager;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.net.URLDecoder;
|
||||
import java.rmi.RemoteException;
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.RejectedExecutionException;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import org.apache.cloudstack.api.command.admin.zone.AddVmwareDcCmd;
|
||||
import org.apache.cloudstack.api.command.admin.zone.ImportVsphereStoragePoliciesCmd;
|
||||
import org.apache.cloudstack.api.command.admin.zone.ListVmwareDcsCmd;
|
||||
import org.apache.cloudstack.api.command.admin.zone.ListVsphereStoragePoliciesCmd;
|
||||
import org.apache.cloudstack.api.command.admin.zone.ListVsphereStoragePolicyCompatiblePoolsCmd;
|
||||
import org.apache.cloudstack.api.command.admin.zone.RemoveVmwareDcCmd;
|
||||
import org.apache.cloudstack.api.command.admin.zone.UpdateVmwareDcCmd;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.framework.config.Configurable;
|
||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||
import org.apache.cloudstack.framework.jobs.impl.AsyncJobManagerImpl;
|
||||
import org.apache.cloudstack.management.ManagementServerHost;
|
||||
import org.apache.cloudstack.storage.command.CheckDataStoreStoragePolicyComplainceCommand;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.utils.identity.ManagementServerNode;
|
||||
import org.apache.commons.codec.digest.DigestUtils;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.amazonaws.util.CollectionUtils;
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.Listener;
|
||||
@ -120,51 +169,6 @@ import com.google.common.base.Strings;
|
||||
import com.vmware.pbm.PbmProfile;
|
||||
import com.vmware.vim25.AboutInfo;
|
||||
import com.vmware.vim25.ManagedObjectReference;
|
||||
import org.apache.cloudstack.api.command.admin.zone.AddVmwareDcCmd;
|
||||
import org.apache.cloudstack.api.command.admin.zone.ImportVsphereStoragePoliciesCmd;
|
||||
import org.apache.cloudstack.api.command.admin.zone.ListVmwareDcsCmd;
|
||||
import org.apache.cloudstack.api.command.admin.zone.ListVsphereStoragePoliciesCmd;
|
||||
import org.apache.cloudstack.api.command.admin.zone.ListVsphereStoragePolicyCompatiblePoolsCmd;
|
||||
import org.apache.cloudstack.api.command.admin.zone.RemoveVmwareDcCmd;
|
||||
import org.apache.cloudstack.api.command.admin.zone.UpdateVmwareDcCmd;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.framework.config.Configurable;
|
||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||
import org.apache.cloudstack.framework.jobs.impl.AsyncJobManagerImpl;
|
||||
import org.apache.cloudstack.management.ManagementServerHost;
|
||||
import org.apache.cloudstack.storage.command.CheckDataStoreStoragePolicyComplainceCommand;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.utils.identity.ManagementServerNode;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.net.URLDecoder;
|
||||
import java.rmi.RemoteException;
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.RejectedExecutionException;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class VmwareManagerImpl extends ManagerBase implements VmwareManager, VmwareStorageMount, Listener, VmwareDatacenterService, Configurable {
|
||||
private static final Logger s_logger = Logger.getLogger(VmwareManagerImpl.class);
|
||||
@ -268,6 +272,24 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
|
||||
_storageMgr = new VmwareStorageManagerImpl(this);
|
||||
}
|
||||
|
||||
private boolean isSystemVmIsoCopyNeeded(File srcIso, File destIso) {
|
||||
if (!destIso.exists()) {
|
||||
return true;
|
||||
}
|
||||
boolean copyNeeded = false;
|
||||
try {
|
||||
String srcIsoMd5 = DigestUtils.md5Hex(new FileInputStream(srcIso));
|
||||
String destIsoMd5 = DigestUtils.md5Hex(new FileInputStream(destIso));
|
||||
copyNeeded = !StringUtils.equals(srcIsoMd5, destIsoMd5);
|
||||
if (copyNeeded) {
|
||||
s_logger.debug(String.format("MD5 checksum: %s for source ISO: %s is different from MD5 checksum: %s from destination ISO: %s", srcIsoMd5, srcIso.getAbsolutePath(), destIsoMd5, destIso.getAbsolutePath()));
|
||||
}
|
||||
} catch (IOException e) {
|
||||
s_logger.debug(String.format("Unable to compare MD5 checksum for systemvm.iso at source: %s and destination: %s", srcIso.getAbsolutePath(), destIso.getAbsolutePath()), e);
|
||||
}
|
||||
return copyNeeded;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getConfigComponentName() {
|
||||
return VmwareManagerImpl.class.getSimpleName();
|
||||
@ -706,7 +728,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
|
||||
|
||||
File srcIso = getSystemVMPatchIsoFile();
|
||||
File destIso = new File(mountPoint + "/systemvm/" + getSystemVMIsoFileNameOnDatastore());
|
||||
if (!destIso.exists()) {
|
||||
if (isSystemVmIsoCopyNeeded(srcIso, destIso)) {
|
||||
s_logger.info("Inject SSH key pairs before copying systemvm.iso into secondary storage");
|
||||
_configServer.updateKeyPairs();
|
||||
|
||||
|
||||
@ -380,8 +380,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
||||
try {
|
||||
if (workerVm != null) {
|
||||
// detach volume and destroy worker vm
|
||||
workerVm.detachAllDisks();
|
||||
workerVm.destroy();
|
||||
workerVm.detachAllDisksAndDestroy();
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
s_logger.warn("Failed to destroy worker VM: " + workerVMName);
|
||||
@ -670,8 +669,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
||||
|
||||
} finally {
|
||||
if (clonedVm != null) {
|
||||
clonedVm.detachAllDisks();
|
||||
clonedVm.destroy();
|
||||
clonedVm.detachAllDisksAndDestroy();
|
||||
}
|
||||
|
||||
vmMo.removeSnapshot(templateUniqueName, false);
|
||||
@ -923,8 +921,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
||||
clonedVm.detachAllDisks();
|
||||
} finally {
|
||||
if (clonedVm != null) {
|
||||
clonedVm.detachAllDisks();
|
||||
clonedVm.destroy();
|
||||
clonedVm.detachAllDisksAndDestroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -980,8 +977,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
||||
}
|
||||
} finally {
|
||||
if (clonedVm != null) {
|
||||
clonedVm.detachAllDisks();
|
||||
clonedVm.destroy();
|
||||
clonedVm.detachAllDisksAndDestroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1037,8 +1033,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
||||
}
|
||||
if (workerVm != null) {
|
||||
//detach volume and destroy worker vm
|
||||
workerVm.detachAllDisks();
|
||||
workerVm.destroy();
|
||||
workerVm.detachAllDisksAndDestroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -5981,9 +5981,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
if (recycle) {
|
||||
s_logger.info("Recycle pending worker VM: " + vmMo.getName());
|
||||
|
||||
vmMo.cancelPendingTasks();
|
||||
vmMo.powerOff();
|
||||
vmMo.detachAllDisks();
|
||||
vmMo.destroy();
|
||||
vmMo.detachAllDisksAndDestroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -430,8 +430,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
||||
virtualDeviceBackingInfo = backingInfo.getParent();
|
||||
}
|
||||
|
||||
vmMo.detachAllDisks();
|
||||
vmMo.destroy();
|
||||
vmMo.detachAllDisksAndDestroy();
|
||||
|
||||
VmwareStorageLayoutHelper.moveVolumeToRootFolder(dcMo, backingFiles);
|
||||
|
||||
@ -821,8 +820,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
||||
if (volume.getDeviceId().equals(0L)) {
|
||||
if (existingVm != null) {
|
||||
s_logger.info("Found existing VM " + vmName + " before cloning from template, destroying it");
|
||||
existingVm.detachAllDisks();
|
||||
existingVm.destroy();
|
||||
existingVm.detachAllDisksAndDestroy();
|
||||
}
|
||||
s_logger.info("ROOT Volume from deploy-as-is template, cloning template");
|
||||
cloneVMFromTemplate(hyperHost, template.getPath(), vmName, primaryStore.getUuid());
|
||||
@ -854,8 +852,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
||||
s_logger.info("Destroy dummy VM after volume creation");
|
||||
if (vmMo != null) {
|
||||
s_logger.warn("Unable to destroy a null VM ManagedObjectReference");
|
||||
vmMo.detachAllDisks();
|
||||
vmMo.destroy();
|
||||
vmMo.detachAllDisksAndDestroy();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -930,8 +927,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
||||
String vmdkFileBaseName = vmMo.getVmdkFileBaseNames().get(0);
|
||||
if (volume.getVolumeType() == Volume.Type.DATADISK) {
|
||||
s_logger.info("detach disks from volume-wrapper VM " + vmName);
|
||||
vmMo.detachAllDisks();
|
||||
vmMo.destroy();
|
||||
vmMo.detachAllDisksAndDestroy();
|
||||
}
|
||||
return vmdkFileBaseName;
|
||||
}
|
||||
@ -968,11 +964,8 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
||||
dsMo.moveDatastoreFile(vmwareLayoutFilePair[i], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[i], dcMo.getMor(), true);
|
||||
}
|
||||
|
||||
s_logger.info("detach disks from volume-wrapper VM " + vmdkName);
|
||||
vmMo.detachAllDisks();
|
||||
|
||||
s_logger.info("destroy volume-wrapper VM " + vmdkName);
|
||||
vmMo.destroy();
|
||||
s_logger.info("detach disks from volume-wrapper VM and destroy" + vmdkName);
|
||||
vmMo.detachAllDisksAndDestroy();
|
||||
|
||||
String srcFile = dsMo.getDatastorePath(vmdkName, true);
|
||||
|
||||
@ -1144,8 +1137,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
||||
}
|
||||
if (workerVm != null) {
|
||||
//detach volume and destroy worker vm
|
||||
workerVm.detachAllDisks();
|
||||
workerVm.destroy();
|
||||
workerVm.detachAllDisksAndDestroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1282,8 +1274,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
||||
|
||||
} finally {
|
||||
if (clonedVm != null) {
|
||||
clonedVm.detachAllDisks();
|
||||
clonedVm.destroy();
|
||||
clonedVm.detachAllDisksAndDestroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1364,8 +1355,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
||||
} finally {
|
||||
try {
|
||||
if (volume.getVmName() == null && workerVmMo != null) {
|
||||
workerVmMo.detachAllDisks();
|
||||
workerVmMo.destroy();
|
||||
workerVmMo.detachAllDisksAndDestroy();
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
s_logger.error("Failed to destroy worker VM created for detached volume");
|
||||
@ -1675,8 +1665,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
||||
|
||||
workerVM.exportVm(installFullPath, exportName, false, false);
|
||||
|
||||
workerVM.detachAllDisks();
|
||||
workerVM.destroy();
|
||||
workerVM.detachAllDisksAndDestroy();
|
||||
}
|
||||
|
||||
private String getTemplateVmdkName(String installFullPath, String exportName) {
|
||||
@ -1853,8 +1842,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
||||
return new Pair<>(diskDevice, disks);
|
||||
} finally {
|
||||
if (clonedVm != null) {
|
||||
clonedVm.detachAllDisks();
|
||||
clonedVm.destroy();
|
||||
clonedVm.detachAllDisksAndDestroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2023,8 +2011,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
||||
try {
|
||||
if (workerVm != null) {
|
||||
// detach volume and destroy worker vm
|
||||
workerVm.detachAllDisks();
|
||||
workerVm.destroy();
|
||||
workerVm.detachAllDisksAndDestroy();
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
s_logger.warn("Failed to destroy worker VM: " + workerVMName);
|
||||
@ -2566,8 +2553,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
||||
} finally {
|
||||
s_logger.info("Destroy dummy VM after volume creation");
|
||||
if (vmMo != null) {
|
||||
vmMo.detachAllDisks();
|
||||
vmMo.destroy();
|
||||
vmMo.detachAllDisksAndDestroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3779,8 +3765,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
||||
return _storage.getSize(srcOVFFileName);
|
||||
} finally {
|
||||
if (clonedVm != null) {
|
||||
clonedVm.detachAllDisks();
|
||||
clonedVm.destroy();
|
||||
clonedVm.detachAllDisksAndDestroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -48,5 +48,10 @@
|
||||
<artifactId>xen-api</artifactId>
|
||||
<version>${cs.xapi.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.maven</groupId>
|
||||
<artifactId>maven-artifact</artifactId>
|
||||
<version>3.6.3</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
|
||||
@ -31,7 +31,9 @@ import javax.naming.ConfigurationException;
|
||||
import javax.persistence.EntityExistsException;
|
||||
|
||||
import org.apache.cloudstack.hypervisor.xenserver.XenserverConfigs;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.maven.artifact.versioning.ComparableVersion;
|
||||
import org.apache.xmlrpc.XmlRpcException;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
@ -122,6 +124,16 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
|
||||
private String xenServerIsoName = TemplateManager.XS_TOOLS_ISO;
|
||||
private String xenServerIsoDisplayText = "XenServer Tools Installer ISO (xen-pv-drv-iso)";
|
||||
|
||||
public final static String MIN_UEFI_SUPPORTED_VERSION = "8.2";
|
||||
|
||||
public static boolean isUefiSupported(String hostProductVersion) {
|
||||
if (StringUtils.isEmpty(hostProductVersion)) {
|
||||
return false;
|
||||
}
|
||||
ComparableVersion version = new ComparableVersion(hostProductVersion);
|
||||
return version.compareTo(new ComparableVersion(MIN_UEFI_SUPPORTED_VERSION)) >= 0;
|
||||
}
|
||||
|
||||
protected XcpServerDiscoverer() {
|
||||
}
|
||||
|
||||
@ -309,6 +321,9 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
|
||||
details.put("username", username);
|
||||
params.put("username", username);
|
||||
details.put("password", password);
|
||||
if (isUefiSupported(prodVersion)) {
|
||||
details.put(com.cloud.host.Host.HOST_UEFI_ENABLE, Boolean.TRUE.toString());
|
||||
}
|
||||
params.put("password", password);
|
||||
params.put("zone", Long.toString(dcId));
|
||||
params.put("guid", record.uuid);
|
||||
|
||||
@ -16,6 +16,7 @@
|
||||
// under the License.
|
||||
package com.cloud.hypervisor.xenserver.resource;
|
||||
|
||||
import static com.cloud.hypervisor.xenserver.discoverer.XcpServerDiscoverer.isUefiSupported;
|
||||
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
@ -51,6 +52,7 @@ import javax.naming.ConfigurationException;
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.diagnostics.CopyToSecondaryStorageAnswer;
|
||||
import org.apache.cloudstack.diagnostics.CopyToSecondaryStorageCommand;
|
||||
import org.apache.cloudstack.diagnostics.DiagnosticsService;
|
||||
@ -1438,6 +1440,11 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
} catch (final Exception e) {
|
||||
throw new CloudRuntimeException("Unable to finalize VM MetaData: " + vmSpec);
|
||||
}
|
||||
try {
|
||||
setVmBootDetails(vm, conn, vmSpec.getBootType(), vmSpec.getBootMode());
|
||||
} catch (final XenAPIException | XmlRpcException e) {
|
||||
throw new CloudRuntimeException(String.format("Unable to handle VM boot options: %s", vmSpec), e);
|
||||
}
|
||||
return vm;
|
||||
}
|
||||
|
||||
@ -1784,6 +1791,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
}
|
||||
details.put("product_brand", productBrand);
|
||||
details.put("product_version", _host.getProductVersion());
|
||||
if (isUefiSupported(_host.getProductVersion())) {
|
||||
details.put(com.cloud.host.Host.HOST_UEFI_ENABLE, Boolean.TRUE.toString());
|
||||
}
|
||||
if (hr.softwareVersion.get("product_version_text_short") != null) {
|
||||
details.put("product_version_text_short", hr.softwareVersion.get("product_version_text_short"));
|
||||
cmd.setHypervisorVersion(hr.softwareVersion.get("product_version_text_short"));
|
||||
@ -1942,6 +1952,20 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
}
|
||||
}
|
||||
|
||||
protected void setVmBootDetails(final VM vm, final Connection conn, String bootType, String bootMode) throws XenAPIException, XmlRpcException {
|
||||
if (!ApiConstants.BootType.UEFI.toString().equals(bootType)) {
|
||||
bootType = ApiConstants.BootType.BIOS.toString();
|
||||
}
|
||||
Boolean isSecure = bootType.equals(ApiConstants.BootType.UEFI.toString()) &&
|
||||
ApiConstants.BootMode.SECURE.toString().equals(bootMode);
|
||||
final Map<String, String> bootParams = vm.getHVMBootParams(conn);
|
||||
bootParams.replace("firmware", bootType.toLowerCase());
|
||||
vm.setHVMBootParams(conn, bootParams);
|
||||
final Map<String, String> platform = vm.getPlatform(conn);
|
||||
platform.put("secureboot", isSecure.toString());
|
||||
vm.setPlatform(conn, platform);
|
||||
}
|
||||
|
||||
/**
|
||||
* This method just creates a XenServer network following the tunnel network
|
||||
* naming convention
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
|
||||
package com.cloud.hypervisor.xenserver.discoverer;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.InOrder;
|
||||
@ -70,4 +71,25 @@ public class XcpServerDiscovererTest {
|
||||
inOrder.verify(vmTemplateVOMock).setDisplayText("XenServer Tools Installer ISO (xen-pv-drv-iso)");
|
||||
inOrder.verify(vmTemplateDao).update(1L, vmTemplateVOMock);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void uefiSupportedVersionTest() {
|
||||
Assert.assertTrue(XcpServerDiscoverer.isUefiSupported("8.2"));
|
||||
Assert.assertTrue(XcpServerDiscoverer.isUefiSupported("8.2.0"));
|
||||
Assert.assertTrue(XcpServerDiscoverer.isUefiSupported("8.2.1"));
|
||||
Assert.assertTrue(XcpServerDiscoverer.isUefiSupported("9"));
|
||||
Assert.assertTrue(XcpServerDiscoverer.isUefiSupported("9.1"));
|
||||
Assert.assertTrue(XcpServerDiscoverer.isUefiSupported("9.1.0"));
|
||||
Assert.assertTrue(XcpServerDiscoverer.isUefiSupported("10"));
|
||||
Assert.assertTrue(XcpServerDiscoverer.isUefiSupported("10.1"));
|
||||
Assert.assertTrue(XcpServerDiscoverer.isUefiSupported("10.1.0"));
|
||||
Assert.assertFalse(XcpServerDiscoverer.isUefiSupported(null));
|
||||
Assert.assertFalse(XcpServerDiscoverer.isUefiSupported(""));
|
||||
Assert.assertFalse(XcpServerDiscoverer.isUefiSupported("abc"));
|
||||
Assert.assertFalse(XcpServerDiscoverer.isUefiSupported("0"));
|
||||
Assert.assertFalse(XcpServerDiscoverer.isUefiSupported("7.4"));
|
||||
Assert.assertFalse(XcpServerDiscoverer.isUefiSupported("8"));
|
||||
Assert.assertFalse(XcpServerDiscoverer.isUefiSupported("8.1"));
|
||||
Assert.assertFalse(XcpServerDiscoverer.isUefiSupported("8.1.0"));
|
||||
}
|
||||
}
|
||||
|
||||
@ -139,7 +139,7 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri
|
||||
EndPoint ep = epSelector.select(volume);
|
||||
Answer answer = null;
|
||||
if (ep == null) {
|
||||
String errMsg = "No remote endpoint to send DeleteCommand, check if host or ssvm is down?";
|
||||
String errMsg = "No remote endpoint to send CreateObjectCommand, check if host or ssvm is down?";
|
||||
s_logger.error(errMsg);
|
||||
answer = new Answer(cmd, false, errMsg);
|
||||
} else {
|
||||
@ -281,7 +281,7 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri
|
||||
EndPoint ep = epSelector.select(srcData, destData);
|
||||
Answer answer = null;
|
||||
if (ep == null) {
|
||||
String errMsg = "No remote endpoint to send command, check if host or ssvm is down?";
|
||||
String errMsg = "No remote endpoint to send CopyCommand, check if host or ssvm is down?";
|
||||
s_logger.error(errMsg);
|
||||
answer = new Answer(cmd, false, errMsg);
|
||||
} else {
|
||||
|
||||
@ -156,7 +156,9 @@ public class TemplateJoinDaoImpl extends GenericDaoBaseWithTagInformation<Templa
|
||||
|
||||
@Override
|
||||
public TemplateResponse newTemplateResponse(EnumSet<ApiConstants.DomainDetails> detailsView, ResponseView view, TemplateJoinVO template) {
|
||||
List<TemplateDataStoreVO> templatesInStore = _templateStoreDao.listByTemplateNotBypassed(template.getId());
|
||||
List<ImageStoreVO> storesInZone = dataStoreDao.listStoresByZoneId(template.getDataCenterId());
|
||||
Long[] storeIds = storesInZone.stream().map(ImageStoreVO::getId).toArray(Long[]::new);
|
||||
List<TemplateDataStoreVO> templatesInStore = _templateStoreDao.listByTemplateNotBypassed(template.getId(), storeIds);
|
||||
List<Map<String, String>> downloadProgressDetails = new ArrayList();
|
||||
HashMap<String, String> downloadDetailInImageStores = null;
|
||||
for (TemplateDataStoreVO templateInStore : templatesInStore) {
|
||||
|
||||
@ -1463,12 +1463,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
* If the volume is not in the primary storage, we do nothing here.
|
||||
*/
|
||||
protected void expungeVolumesInPrimaryStorageIfNeeded(VolumeVO volume) throws InterruptedException, ExecutionException {
|
||||
VolumeInfo volOnPrimary = volFactory.getVolume(volume.getId(), DataStoreRole.Primary);
|
||||
if (volOnPrimary != null) {
|
||||
s_logger.info("Expunging volume " + volume.getId() + " from primary data store");
|
||||
AsyncCallFuture<VolumeApiResult> future = volService.expungeVolumeAsync(volOnPrimary);
|
||||
future.get();
|
||||
}
|
||||
expungeVolumesInPrimaryOrSecondary(volume, DataStoreRole.Primary);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1476,16 +1471,29 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
* If it is, we will execute an asynchronous call to delete it there. Then, we decrement the {@link ResourceType#secondary_storage} for the account that owns the volume.
|
||||
*/
|
||||
protected void expungeVolumesInSecondaryStorageIfNeeded(VolumeVO volume) throws InterruptedException, ExecutionException {
|
||||
VolumeInfo volOnSecondary = volFactory.getVolume(volume.getId(), DataStoreRole.Image);
|
||||
if (volOnSecondary != null) {
|
||||
s_logger.info("Expunging volume " + volume.getId() + " from secondary data store");
|
||||
AsyncCallFuture<VolumeApiResult> future2 = volService.expungeVolumeAsync(volOnSecondary);
|
||||
future2.get();
|
||||
|
||||
_resourceLimitMgr.decrementResourceCount(volOnSecondary.getAccountId(), ResourceType.secondary_storage, volOnSecondary.getSize());
|
||||
}
|
||||
expungeVolumesInPrimaryOrSecondary(volume, DataStoreRole.Image);
|
||||
}
|
||||
|
||||
private void expungeVolumesInPrimaryOrSecondary(VolumeVO volume, DataStoreRole role) throws InterruptedException, ExecutionException {
|
||||
VolumeInfo volOnStorage = volFactory.getVolume(volume.getId(), role);
|
||||
if (volOnStorage != null) {
|
||||
s_logger.info("Expunging volume " + volume.getId() + " from " + role + " data store");
|
||||
AsyncCallFuture<VolumeApiResult> future = volService.expungeVolumeAsync(volOnStorage);
|
||||
VolumeApiResult result = future.get();
|
||||
if (result.isFailed()) {
|
||||
String msg = "Failed to expunge the volume " + volume + " in " + role + " data store";
|
||||
s_logger.warn(msg);
|
||||
String details = "";
|
||||
if (result.getResult() != null && !result.getResult().isEmpty()) {
|
||||
details = msg + " : " + result.getResult();
|
||||
}
|
||||
throw new CloudRuntimeException(details);
|
||||
}
|
||||
if (DataStoreRole.Image.equals(role)) {
|
||||
_resourceLimitMgr.decrementResourceCount(volOnStorage.getAccountId(), ResourceType.secondary_storage, volOnStorage.getSize());
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Clean volumes cache entries (if they exist).
|
||||
*/
|
||||
|
||||
@ -742,7 +742,7 @@ class CsRemoteAccessVpn(CsDataBag):
|
||||
|
||||
secret = CsFile(vpnsecretfilte)
|
||||
secret.empty()
|
||||
secret.addeq("%s %%any : PSK \"%s\"" % (left, psk))
|
||||
secret.addeq("%s : PSK \"%s\"" % (left, psk))
|
||||
secret.commit()
|
||||
|
||||
xl2tpdconf = CsFile(xl2tpdconffile)
|
||||
@ -1008,7 +1008,7 @@ class CsForwardingRules(CsDataBag):
|
||||
|
||||
# Configure the hairpin snat
|
||||
self.fw.append(["nat", "front", "-A POSTROUTING -s %s -d %s -j SNAT -o %s --to-source %s" %
|
||||
(self.getNetworkByIp(rule['internal_ip']), rule["internal_ip"], self.getDeviceByIp(rule["internal_ip"]), self.getGuestIp())])
|
||||
(self.getNetworkByIp(rule['internal_ip']), rule["internal_ip"], self.getDeviceByIp(rule["internal_ip"]), self.getGuestIpByIp(rule["internal_ip"]))])
|
||||
|
||||
|
||||
class IpTablesExecutor:
|
||||
|
||||
@ -26,7 +26,7 @@
|
||||
@click="$message.success(`${$t('label.copied.clipboard')} : ${name}`)"
|
||||
v-clipboard:copy="name" >
|
||||
<slot name="avatar">
|
||||
<os-logo v-if="resource.ostypeid || resource.ostypename" :osId="resource.ostypeid" :osName="resource.ostypename" size="4x" @update-osname="(name) => this.resource.ostypename = name"/>
|
||||
<os-logo v-if="resource.ostypeid || resource.ostypename" :osId="resource.ostypeid" :osName="resource.ostypename" size="4x" @update-osname="(name) => resource.ostypename = name"/>
|
||||
<a-icon v-else-if="typeof $route.meta.icon ==='string'" style="font-size: 36px" :type="$route.meta.icon" />
|
||||
<a-icon v-else style="font-size: 36px" :component="$route.meta.icon" />
|
||||
</slot>
|
||||
|
||||
@ -477,10 +477,9 @@
|
||||
</span>
|
||||
<div style="margin-top: 15px" v-show="this.showDetails">
|
||||
<div
|
||||
v-if="vm.templateid && ['KVM', 'VMware'].includes(hypervisor) && !template.deployasis">
|
||||
v-if="vm.templateid && ['KVM', 'VMware', 'XenServer'].includes(hypervisor) && !template.deployasis">
|
||||
<a-form-item :label="$t('label.boottype')">
|
||||
<a-select
|
||||
:autoFocus="vm.templateid && ['KVM', 'VMware'].includes(hypervisor) && !template.deployasis"
|
||||
v-decorator="['boottype']"
|
||||
@change="fetchBootModes"
|
||||
>
|
||||
@ -1652,6 +1651,7 @@ export default {
|
||||
duration: 0
|
||||
})
|
||||
}
|
||||
eventBus.$emit('vm-refresh-data')
|
||||
},
|
||||
loadingMessage: `${title} ${this.$t('label.in.progress')}`,
|
||||
catchMessage: this.$t('error.fetching.async.job.result'),
|
||||
|
||||
@ -32,16 +32,18 @@
|
||||
@handle-search-filter="($event) => fetchData($event)" />
|
||||
|
||||
<compute-selection
|
||||
v-if="selectedOffering && selectedOffering.iscustomized"
|
||||
:cpuNumberInputDecorator="cpuNumberKey"
|
||||
:cpuSpeedInputDecorator="cpuSpeedKey"
|
||||
:memoryInputDecorator="memoryKey"
|
||||
v-if="selectedOffering && (selectedOffering.iscustomized || selectedOffering.iscustomizediops)"
|
||||
:cpunumber-input-decorator="cpuNumberKey"
|
||||
:cpuspeed-input-decorator="cpuSpeedKey"
|
||||
:memory-input-decorator="memoryKey"
|
||||
:computeOfferingId="selectedOffering.id"
|
||||
:isConstrained="'serviceofferingdetails' in selectedOffering"
|
||||
:minCpu="getMinCpu()"
|
||||
:maxCpu="'serviceofferingdetails' in selectedOffering ? selectedOffering.serviceofferingdetails.maxcpunumber*1 : Number.MAX_SAFE_INTEGER"
|
||||
:minMemory="getMinMemory()"
|
||||
:maxMemory="'serviceofferingdetails' in selectedOffering ? selectedOffering.serviceofferingdetails.maxmemory*1 : Number.MAX_SAFE_INTEGER"
|
||||
:isCustomized="selectedOffering.iscustomized"
|
||||
:isCustomizedIOps="'iscustomizediops' in selectedOffering && selectedOffering.iscustomizediops"
|
||||
@update-compute-cpunumber="updateFieldValue"
|
||||
@update-compute-cpuspeed="updateFieldValue"
|
||||
@update-compute-memory="updateFieldValue" />
|
||||
@ -120,14 +122,14 @@ export default {
|
||||
if (this.resource.state === 'Running') {
|
||||
return this.resource.cpunumber
|
||||
}
|
||||
return 'serviceofferingdetails' in this.selectedOffering ? this.selectedOffering.serviceofferingdetails.mincpunumber * 1 : 1
|
||||
return this.selectedOffering?.serviceofferingdetails?.mincpunumber * 1 || 1
|
||||
},
|
||||
getMinMemory () {
|
||||
// We can only scale up while a VM is running
|
||||
if (this.resource.state === 'Running') {
|
||||
return this.resource.memory
|
||||
}
|
||||
return 'serviceofferingdetails' in this.selectedOffering ? this.selectedOffering.serviceofferingdetails.minmemory * 1 : 32
|
||||
return this.selectedOffering?.serviceofferingdetails?.minmemory * 1 || 32
|
||||
},
|
||||
getMessage () {
|
||||
if (this.resource.hypervisor === 'VMware') {
|
||||
|
||||
@ -25,7 +25,7 @@
|
||||
:validate-status="errors.cpu.status"
|
||||
:help="errors.cpu.message">
|
||||
<a-row :gutter="12">
|
||||
<a-col :md="10" :lg="10" v-show="isConstrained">
|
||||
<a-col :md="10" :lg="10" v-show="isConstrained && maxCpu && !isNaN(maxCpu)">
|
||||
<a-slider
|
||||
:min="minCpu"
|
||||
:max="maxCpu"
|
||||
@ -61,7 +61,7 @@
|
||||
:validate-status="errors.memory.status"
|
||||
:help="errors.memory.message">
|
||||
<a-row :gutter="12">
|
||||
<a-col :md="10" :lg="10" v-show="isConstrained">
|
||||
<a-col :md="10" :lg="10" v-show="isConstrained && maxMemory && !isNaN(maxMemory)">
|
||||
<a-slider
|
||||
:min="minMemory"
|
||||
:max="maxMemory"
|
||||
@ -175,7 +175,11 @@ export default {
|
||||
},
|
||||
computed: {
|
||||
colContraned () {
|
||||
return this.isConstrained ? 12 : 8
|
||||
if (this.isConstrained && this.maxCpu && !isNaN(this.maxCpu)) {
|
||||
return 12
|
||||
}
|
||||
|
||||
return 8
|
||||
}
|
||||
},
|
||||
watch: {
|
||||
|
||||
@ -2115,8 +2115,7 @@ public class HypervisorHostHelper {
|
||||
throw e;
|
||||
}
|
||||
} finally {
|
||||
workerVmMo.detachAllDisks();
|
||||
workerVmMo.destroy();
|
||||
workerVmMo.detachAllDisksAndDestroy();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -40,6 +40,8 @@ import com.cloud.storage.Storage;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.vmware.vim25.InvalidStateFaultMsg;
|
||||
import com.vmware.vim25.RuntimeFaultFaultMsg;
|
||||
import com.vmware.vim25.TaskInfo;
|
||||
import com.vmware.vim25.TaskInfoState;
|
||||
import com.vmware.vim25.VirtualMachineTicket;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
@ -1506,6 +1508,11 @@ public class VirtualMachineMO extends BaseMO {
|
||||
return chain;
|
||||
}
|
||||
|
||||
public void detachAllDisksAndDestroy() throws Exception {
|
||||
detachAllDisks();
|
||||
destroy();
|
||||
}
|
||||
|
||||
public void detachAllDisks() throws Exception {
|
||||
if (s_logger.isTraceEnabled())
|
||||
s_logger.trace("vCenter API trace - detachAllDisk(). target MOR: " + _mor.getValue());
|
||||
@ -2056,8 +2063,7 @@ public class VirtualMachineMO extends BaseMO {
|
||||
return clonedVmMo;
|
||||
} finally {
|
||||
if (!bSuccess) {
|
||||
clonedVmMo.detachAllDisks();
|
||||
clonedVmMo.destroy();
|
||||
clonedVmMo.detachAllDisksAndDestroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3608,6 +3614,30 @@ public class VirtualMachineMO extends BaseMO {
|
||||
return ticket.getTicket();
|
||||
}
|
||||
|
||||
public void cancelPendingTasks() throws Exception {
|
||||
String vmName = getVmName();
|
||||
s_logger.debug("Checking for pending tasks of the VM: " + vmName);
|
||||
|
||||
ManagedObjectReference taskmgr = _context.getServiceContent().getTaskManager();
|
||||
List<ManagedObjectReference> tasks = _context.getVimClient().getDynamicProperty(taskmgr, "recentTask");
|
||||
|
||||
int vmTasks = 0, vmPendingTasks = 0;
|
||||
for (ManagedObjectReference task : tasks) {
|
||||
TaskInfo info = (TaskInfo) (_context.getVimClient().getDynamicProperty(task, "info"));
|
||||
if (info.getEntityName().equals(vmName)) {
|
||||
vmTasks++;
|
||||
if (!(info.getState().equals(TaskInfoState.SUCCESS) || info.getState().equals(TaskInfoState.ERROR))) {
|
||||
String taskName = StringUtils.isNotBlank(info.getName()) ? info.getName() : "Unknown";
|
||||
s_logger.debug(taskName + " task pending for the VM: " + vmName + ", cancelling it");
|
||||
vmPendingTasks++;
|
||||
_context.getVimClient().cancelTask(task);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s_logger.debug(vmPendingTasks + " pending tasks for the VM: " + vmName + " found, out of " + vmTasks + " recent VM tasks");
|
||||
}
|
||||
|
||||
public void tagAsWorkerVM() throws Exception {
|
||||
setCustomFieldValue(CustomFieldConstants.CLOUD_WORKER, "true");
|
||||
String workerTag = String.format("%d-%s", System.currentTimeMillis(), getContext().getStockObject("noderuninfo"));
|
||||
|
||||
@ -42,6 +42,8 @@ import org.apache.cloudstack.utils.security.SecureSSLSocketFactory;
|
||||
import com.vmware.pbm.PbmPortType;
|
||||
import com.vmware.pbm.PbmService;
|
||||
import com.vmware.pbm.PbmServiceInstanceContent;
|
||||
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.w3c.dom.Element;
|
||||
|
||||
@ -779,4 +781,54 @@ public class VmwareClient {
|
||||
return vCenterSessionTimeout;
|
||||
}
|
||||
|
||||
public void cancelTask(ManagedObjectReference task) throws Exception {
|
||||
TaskInfo info = (TaskInfo)(getDynamicProperty(task, "info"));
|
||||
if (info == null) {
|
||||
s_logger.warn("Unable to get the task info, so couldn't cancel the task");
|
||||
return;
|
||||
}
|
||||
|
||||
String taskName = StringUtils.isNotBlank(info.getName()) ? info.getName() : "Unknown";
|
||||
taskName += "(" + info.getKey() + ")";
|
||||
|
||||
String entityName = StringUtils.isNotBlank(info.getEntityName()) ? info.getEntityName() : "";
|
||||
|
||||
if (info.getState().equals(TaskInfoState.SUCCESS)) {
|
||||
s_logger.debug(taskName + " task successfully completed for the entity " + entityName + ", can't cancel it");
|
||||
return;
|
||||
}
|
||||
|
||||
if (info.getState().equals(TaskInfoState.ERROR)) {
|
||||
s_logger.debug(taskName + " task execution failed for the entity " + entityName + ", can't cancel it");
|
||||
return;
|
||||
}
|
||||
|
||||
s_logger.debug(taskName + " task pending for the entity " + entityName + ", trying to cancel");
|
||||
if (!info.isCancelable()) {
|
||||
s_logger.warn(taskName + " task will continue to run on vCenter because it can't be cancelled");
|
||||
return;
|
||||
}
|
||||
|
||||
s_logger.debug("Cancelling task " + taskName + " of the entity " + entityName);
|
||||
getService().cancelTask(task);
|
||||
|
||||
// Since task cancellation is asynchronous, wait for the task to be cancelled
|
||||
Object[] result = waitForValues(task, new String[] {"info.state", "info.error"}, new String[] {"state"},
|
||||
new Object[][] {new Object[] {TaskInfoState.SUCCESS, TaskInfoState.ERROR}});
|
||||
|
||||
if (result != null && result.length == 2) { //result for 2 properties: info.state, info.error
|
||||
if (result[0].equals(TaskInfoState.SUCCESS)) {
|
||||
s_logger.warn("Failed to cancel" + taskName + " task of the entity " + entityName + ", the task successfully completed");
|
||||
}
|
||||
|
||||
if (result[1] instanceof LocalizedMethodFault) {
|
||||
MethodFault fault = ((LocalizedMethodFault)result[1]).getFault();
|
||||
if (fault instanceof RequestCanceled) {
|
||||
s_logger.debug(taskName + " task of the entity " + entityName + " was successfully cancelled");
|
||||
}
|
||||
} else {
|
||||
s_logger.warn("Couldn't cancel " + taskName + " task of the entity " + entityName + " due to " + ((LocalizedMethodFault)result[1]).getLocalizedMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user