diff --git a/.asf.yaml b/.asf.yaml index ce89a03d9ce..1772623f1b1 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -41,6 +41,7 @@ github: features: wiki: true issues: true + discussions: true projects: true enabled_merge_buttons: @@ -49,15 +50,21 @@ github: rebase: false collaborators: + - acs-robot - kiranchavala - rajujith - alexandremattioli - vishesh92 - GaOrtiga - - acs-robot - BryanMLima - SadiJr - JoaoJandre - winterhazel protected_branches: ~ + +notifications: + commits: commits@cloudstack.apache.org + issues: commits@cloudstack.apache.org + pullrequests: commits@cloudstack.apache.org + discussions: users@cloudstack.apache.org diff --git a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java index c682314097a..0e979d370f4 100644 --- a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java +++ b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java @@ -14,6 +14,8 @@ */ package com.cloud.agent.properties; +import org.apache.cloudstack.utils.security.KeyStoreUtils; + /** * Class of constant agent's properties available to configure on * "agent.properties". @@ -779,6 +781,13 @@ public class AgentProperties{ */ public static final Property KVM_HEARTBEAT_CHECKER_TIMEOUT = new Property<>("kvm.heartbeat.checker.timeout", 360000L); + /** + * Keystore passphrase + * Data type: String.
+ * Default value: null + */ + public static final Property KEYSTORE_PASSPHRASE = new Property<>(KeyStoreUtils.KS_PASSPHRASE_PROPERTY, null, String.class); + public static class Property { private String name; private T defaultValue; diff --git a/api/src/main/java/com/cloud/event/EventTypes.java b/api/src/main/java/com/cloud/event/EventTypes.java index 5fce169ffed..67fed5500ee 100644 --- a/api/src/main/java/com/cloud/event/EventTypes.java +++ b/api/src/main/java/com/cloud/event/EventTypes.java @@ -320,6 +320,7 @@ public class EventTypes { public static final String EVENT_DOMAIN_CREATE = "DOMAIN.CREATE"; public static final String EVENT_DOMAIN_DELETE = "DOMAIN.DELETE"; public static final String EVENT_DOMAIN_UPDATE = "DOMAIN.UPDATE"; + public static final String EVENT_DOMAIN_MOVE = "DOMAIN.MOVE"; // Snapshots public static final String EVENT_SNAPSHOT_COPY = "SNAPSHOT.COPY"; @@ -878,6 +879,7 @@ public class EventTypes { entityEventDetails.put(EVENT_DOMAIN_CREATE, Domain.class); entityEventDetails.put(EVENT_DOMAIN_DELETE, Domain.class); entityEventDetails.put(EVENT_DOMAIN_UPDATE, Domain.class); + entityEventDetails.put(EVENT_DOMAIN_MOVE, Domain.class); // Snapshots entityEventDetails.put(EVENT_SNAPSHOT_CREATE, Snapshot.class); diff --git a/api/src/main/java/com/cloud/storage/Storage.java b/api/src/main/java/com/cloud/storage/Storage.java index 1ee7200a313..8a2ec1a8905 100644 --- a/api/src/main/java/com/cloud/storage/Storage.java +++ b/api/src/main/java/com/cloud/storage/Storage.java @@ -77,13 +77,18 @@ public class Storage { } public static enum Capability { - HARDWARE_ACCELERATION("HARDWARE_ACCELERATION"); + HARDWARE_ACCELERATION("HARDWARE_ACCELERATION"), + ALLOW_MIGRATE_OTHER_POOLS("ALLOW_MIGRATE_OTHER_POOLS"); private final String capability; private Capability(String capability) { this.capability = capability; } + + public String toString() { + return this.capability; + } } public static enum ProvisioningType { @@ -150,7 +155,8 @@ public class Storage { ManagedNFS(true, false, false), Linstor(true, true, false), DatastoreCluster(true, true, false), // for VMware, to abstract pool of clusters - StorPool(true, true, true); + StorPool(true, true, true), + FiberChannel(true, true, false); // Fiber Channel Pool for KVM hypervisors is used to find the volume by WWN value (/dev/disk/by-id/wwn-) private final boolean shared; private final boolean overprovisioning; diff --git a/api/src/main/java/com/cloud/user/DomainService.java b/api/src/main/java/com/cloud/user/DomainService.java index 3ccfcbcea4c..06109cf5ff7 100644 --- a/api/src/main/java/com/cloud/user/DomainService.java +++ b/api/src/main/java/com/cloud/user/DomainService.java @@ -20,9 +20,11 @@ import java.util.List; import org.apache.cloudstack.api.command.admin.domain.ListDomainChildrenCmd; import org.apache.cloudstack.api.command.admin.domain.ListDomainsCmd; +import org.apache.cloudstack.api.command.admin.domain.MoveDomainCmd; import com.cloud.domain.Domain; import com.cloud.exception.PermissionDeniedException; +import com.cloud.exception.ResourceAllocationException; import com.cloud.utils.Pair; public interface DomainService { @@ -66,4 +68,5 @@ public interface DomainService { */ Domain findDomainByIdOrPath(Long id, String domainPath); + Domain moveDomainAndChildrenToNewParentDomain(MoveDomainCmd cmd) throws ResourceAllocationException; } diff --git a/api/src/main/java/com/cloud/vm/UserVmService.java b/api/src/main/java/com/cloud/vm/UserVmService.java index d58b75b0dca..c32c099ed3a 100644 --- a/api/src/main/java/com/cloud/vm/UserVmService.java +++ b/api/src/main/java/com/cloud/vm/UserVmService.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.vm; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -518,7 +519,8 @@ public interface UserVmService { UserVm importVM(final DataCenter zone, final Host host, final VirtualMachineTemplate template, final String instanceName, final String displayName, final Account owner, final String userData, final Account caller, final Boolean isDisplayVm, final String keyboard, final long accountId, final long userId, final ServiceOffering serviceOffering, final String sshPublicKey, - final String hostName, final HypervisorType hypervisorType, final Map customParameters, final VirtualMachine.PowerState powerState) throws InsufficientCapacityException; + final String hostName, final HypervisorType hypervisorType, final Map customParameters, + final VirtualMachine.PowerState powerState, final LinkedHashMap> networkNicMap) throws InsufficientCapacityException; /** * Unmanage a guest VM from CloudStack diff --git a/api/src/main/java/com/cloud/vm/VmDetailConstants.java b/api/src/main/java/com/cloud/vm/VmDetailConstants.java index 124d9d50d5b..9338cc11cd4 100644 --- a/api/src/main/java/com/cloud/vm/VmDetailConstants.java +++ b/api/src/main/java/com/cloud/vm/VmDetailConstants.java @@ -39,6 +39,7 @@ public interface VmDetailConstants { // KVM specific (internal) String KVM_VNC_PORT = "kvm.vnc.port"; String KVM_VNC_ADDRESS = "kvm.vnc.address"; + String KVM_VNC_PASSWORD = "kvm.vnc.password"; // KVM specific, custom virtual GPU hardware String VIDEO_HARDWARE = "video.hardware"; diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 00fb71ace00..99b4c9750ce 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -212,6 +212,7 @@ public class ApiConstants { public static final String HOST_IDS = "hostids"; public static final String HOST_IP = "hostip"; public static final String HOST_NAME = "hostname"; + public static final String HOST = "host"; public static final String HOST_CONTROL_STATE = "hostcontrolstate"; public static final String HOSTS_MAP = "hostsmap"; public static final String HYPERVISOR = "hypervisor"; @@ -1079,7 +1080,9 @@ public class ApiConstants { public static final String SOURCE_NAT_IP_ID = "sourcenatipaddressid"; public static final String HAS_RULES = "hasrules"; public static final String NSX_DETAIL_KEY = "forNsx"; + public static final String DISK_PATH = "diskpath"; public static final String IMPORT_SOURCE = "importsource"; + public static final String TEMP_PATH = "temppath"; public static final String OBJECT_STORAGE = "objectstore"; public static final String HEURISTIC_RULE = "heuristicrule"; public static final String HEURISTIC_TYPE_VALID_OPTIONS = "Valid options are: ISO, SNAPSHOT, TEMPLATE and VOLUME."; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/MoveDomainCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/MoveDomainCmd.java new file mode 100644 index 00000000000..586345b2de7 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/MoveDomainCmd.java @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.domain; + +import com.cloud.domain.Domain; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.user.Account; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.DomainResponse; + +@APICommand(name = "moveDomain", description = "Moves a domain and its children to a new parent domain.", since = "4.19.0.0", responseObject = DomainResponse.class, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin}) +public class MoveDomainCmd extends BaseCmd { + + private static final String APINAME = "moveDomain"; + @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, required = true, entityType = DomainResponse.class, description = "The ID of the domain to be moved.") + private Long domainId; + + @Parameter(name = ApiConstants.PARENT_DOMAIN_ID, type = CommandType.UUID, required = true, entityType = DomainResponse.class, + description = "The ID of the new parent domain of the domain to be moved.") + private Long parentDomainId; + + public Long getDomainId() { + return domainId; + } + + public Long getParentDomainId() { + return parentDomainId; + } + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public void execute() throws ResourceAllocationException { + Domain domain = _domainService.moveDomainAndChildrenToNewParentDomain(this); + + if (domain != null) { + DomainResponse response = _responseGenerator.createDomainResponse(domain); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to move the domain."); + } + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java index 09ec5394921..7a907e0f76a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.api.command.admin.storage; import java.util.List; +import java.util.Map; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.log4j.Logger; @@ -32,6 +33,7 @@ import org.apache.cloudstack.api.response.StoragePoolResponse; import com.cloud.storage.StoragePool; import com.cloud.user.Account; +@SuppressWarnings("rawtypes") @APICommand(name = "updateStoragePool", description = "Updates a storage pool.", responseObject = StoragePoolResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateStoragePoolCmd extends BaseCmd { @@ -61,6 +63,20 @@ public class UpdateStoragePoolCmd extends BaseCmd { " enable it back.") private Boolean enabled; + @Parameter(name = ApiConstants.DETAILS, + type = CommandType.MAP, + required = false, + description = "the details for the storage pool", + since = "4.19.0") + private Map details; + + @Parameter(name = ApiConstants.URL, + type = CommandType.STRING, + required = false, + description = "the URL of the storage pool", + since = "4.19.0") + private String url; + @Parameter(name = ApiConstants.IS_TAG_A_RULE, type = CommandType.BOOLEAN, description = ApiConstants.PARAMETER_DESCRIPTION_IS_TAG_A_RULE) private Boolean isTagARule; @@ -115,6 +131,22 @@ public class UpdateStoragePoolCmd extends BaseCmd { return ApiCommandResourceType.StoragePool; } + public Map getDetails() { + return details; + } + + public void setDetails(Map details) { + this.details = details; + } + + public String getUrl() { + return url; + } + + public void setUrl(String url) { + this.url = url; + } + @Override public void execute() { StoragePool result = _storageService.updateStoragePool(this); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java index 532a3f0d392..d632c786a16 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java @@ -84,7 +84,7 @@ public class ImportUnmanagedInstanceCmd extends BaseAsyncCmd { @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, - description = "the hypervisor name of the instance") + description = "the name of the instance as it is known to the hypervisor") private String name; @Parameter(name = ApiConstants.DISPLAY_NAME, diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportVmCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportVmCmd.java index 01f517fb837..e8b9f3addde 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportVmCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportVmCmd.java @@ -31,13 +31,18 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ResponseObject; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.HostResponse; +import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VmwareDatacenterResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.vm.VmImportService; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; +import javax.inject.Inject; + @APICommand(name = "importVm", description = "Import virtual machine from a unmanaged host into CloudStack", responseObject = UserVmResponse.class, @@ -47,21 +52,72 @@ import org.apache.log4j.Logger; authorized = {RoleType.Admin}, since = "4.19.0") public class ImportVmCmd extends ImportUnmanagedInstanceCmd { - public static final Logger LOGGER = Logger.getLogger(ImportVmCmd.class); + @Inject + public VmImportService vmImportService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + + @Parameter(name = ApiConstants.ZONE_ID, + type = CommandType.UUID, + entityType = ZoneResponse.class, + required = true, + description = "the zone ID") + private Long zoneId; + + @Parameter(name = ApiConstants.USERNAME, + type = CommandType.STRING, + description = "the username for the host") + private String username; + + @Parameter(name = ApiConstants.PASSWORD, + type = CommandType.STRING, + description = "the password for the host") + private String password; + + @Parameter(name = ApiConstants.HOST, + type = CommandType.STRING, + description = "the host name or IP address") + private String host; + @Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, required = true, description = "hypervisor type of the host") private String hypervisor; + @Parameter(name = ApiConstants.DISK_PATH, + type = CommandType.STRING, + description = "path of the disk image") + private String diskPath; + @Parameter(name = ApiConstants.IMPORT_SOURCE, type = CommandType.STRING, required = true, description = "Source location for Import" ) private String importSource; + @Parameter(name = ApiConstants.NETWORK_ID, + type = CommandType.UUID, + entityType = NetworkResponse.class, + description = "the network ID") + private Long networkId; + + @Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "Host where local disk is located") + private Long hostId; + + @Parameter(name = ApiConstants.STORAGE_ID, type = CommandType.UUID, entityType = StoragePoolResponse.class, description = "Shared storage pool where disk is located") + private Long storagePoolId; + + @Parameter(name = ApiConstants.TEMP_PATH, + type = CommandType.STRING, + description = "Temp Path on external host for disk image copy" ) + private String tmpPath; + // Import from Vmware to KVM migration parameters @Parameter(name = ApiConstants.EXISTING_VCENTER_ID, @@ -73,7 +129,7 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd { @Parameter(name = ApiConstants.HOST_IP, type = BaseCmd.CommandType.STRING, description = "(only for importing migrated VMs from Vmware to KVM) VMware ESXi host IP/Name.") - private String host; + private String hostip; @Parameter(name = ApiConstants.VCENTER, type = CommandType.STRING, @@ -88,14 +144,6 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd { description = "(only for importing migrated VMs from Vmware to KVM) Name of VMware cluster.") private String clusterName; - @Parameter(name = ApiConstants.USERNAME, type = CommandType.STRING, - description = "(only for importing migrated VMs from Vmware to KVM) The Username required to connect to resource.") - private String username; - - @Parameter(name = ApiConstants.PASSWORD, type = CommandType.STRING, - description = "(only for importing migrated VMs from Vmware to KVM) The password for the specified username.") - private String password; - @Parameter(name = ApiConstants.CONVERT_INSTANCE_HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "(only for importing migrated VMs from Vmware to KVM) optional - the host to perform the virt-v2v migration from VMware to KVM.") private Long convertInstanceHostId; @@ -104,30 +152,20 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd { description = "(only for importing migrated VMs from Vmware to KVM) optional - the temporary storage pool to perform the virt-v2v migration from VMware to KVM.") private Long convertStoragePoolId; - @Override - public String getEventType() { - return EventTypes.EVENT_VM_IMPORT; - } + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// - @Override - public String getEventDescription() { - String vmName = getName(); - if (ObjectUtils.anyNotNull(vcenter, existingVcenterId)) { - String msg = StringUtils.isNotBlank(vcenter) ? - String.format("external vCenter: %s - datacenter: %s", vcenter, datacenterName) : - String.format("existing vCenter Datacenter with ID: %s", existingVcenterId); - return String.format("Importing unmanaged VM: %s from %s - VM: %s", getDisplayName(), msg, vmName); - } - return String.format("Importing unmanaged VM: %s", vmName); + public Long getZoneId() { + return zoneId; } - public Long getExistingVcenterId() { return existingVcenterId; } - public String getHost() { - return host; + public String getHostIp() { + return hostip; } public String getVcenter() { @@ -150,6 +188,10 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd { return password; } + public String getHost() { + return host; + } + public Long getConvertInstanceHostId() { return convertInstanceHostId; } @@ -162,10 +204,47 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd { return hypervisor; } + public String getDiskPath() { + return diskPath; + } + public String getImportSource() { return importSource; } + public Long getHostId() { + return hostId; + } + + public Long getStoragePoolId() { + return storagePoolId; + } + + public String getTmpPath() { + return tmpPath; + } + + public Long getNetworkId() { + return networkId; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_VM_IMPORT; + } + + @Override + public String getEventDescription() { + String vmName = getName(); + if (ObjectUtils.anyNotNull(vcenter, existingVcenterId)) { + String msg = StringUtils.isNotBlank(vcenter) ? + String.format("external vCenter: %s - datacenter: %s", vcenter, datacenterName) : + String.format("existing vCenter Datacenter with ID: %s", existingVcenterId); + return String.format("Importing unmanaged VM: %s from %s - VM: %s", getDisplayName(), msg, vmName); + } + return String.format("Importing unmanaged VM: %s", vmName); + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -176,5 +255,4 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd { response.setResponseName(getCommandName()); setResponseObject(response); } - } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVmsForImportCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVmsForImportCmd.java new file mode 100644 index 00000000000..88df04d9ef5 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVmsForImportCmd.java @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.vm; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.UnmanagedInstanceResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.vm.UnmanagedInstanceTO; +import org.apache.cloudstack.vm.VmImportService; +import org.apache.log4j.Logger; + +import javax.inject.Inject; + +@APICommand(name = "listVmsForImport", + description = "Lists virtual machines on a unmanaged host", + responseObject = UnmanagedInstanceResponse.class, + responseView = ResponseObject.ResponseView.Full, + entityType = {UnmanagedInstanceTO.class}, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = true, + authorized = {RoleType.Admin}, + since = "4.19.0") +public class ListVmsForImportCmd extends BaseListCmd { + public static final Logger LOGGER = Logger.getLogger(ListVmsForImportCmd.class.getName()); + + @Inject + public VmImportService vmImportService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ZONE_ID, + type = CommandType.UUID, + entityType = ZoneResponse.class, + required = true, + description = "the zone ID") + private Long zoneId; + + @Parameter(name = ApiConstants.USERNAME, + type = CommandType.STRING, + description = "the username for the host") + private String username; + + @Parameter(name = ApiConstants.PASSWORD, + type = CommandType.STRING, + description = "the password for the host") + private String password; + + @Parameter(name = ApiConstants.HOST, + type = CommandType.STRING, + required = true, + description = "the host name or IP address") + private String host; + + @Parameter(name = ApiConstants.HYPERVISOR, + type = CommandType.STRING, + required = true, + description = "hypervisor type of the host") + private String hypervisor; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getZoneId() { + return zoneId; + } + + public String getUsername() { + return username; + } + + public String getPassword() { + return password; + } + + public String getHost() { + return host; + } + + public String getHypervisor() { + return hypervisor; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + ListResponse response = vmImportService.listVmsForImport(this); + response.setResponseName(getCommandName()); + setResponseObject(response); + } + + @Override + public long getEntityOwnerId() { + Account account = CallContext.current().getCallingAccount(); + if (account != null) { + return account.getId(); + } + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRuleInstancesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRuleInstancesCmd.java index 77aaa6bc1d3..723e0efec12 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRuleInstancesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRuleInstancesCmd.java @@ -97,52 +97,44 @@ public class ListLoadBalancerRuleInstancesCmd extends BaseListCmd implements Use public void execute() { Pair, List> vmServiceMap = _lbService.listLoadBalancerInstances(this); List result = vmServiceMap.first(); + s_logger.debug(String.format("A total of [%s] user VMs were obtained when listing the load balancer instances: [%s].", result.size(), result)); + List serviceStates = vmServiceMap.second(); + s_logger.debug(String.format("A total of [%s] service states were obtained when listing the load balancer instances: [%s].", serviceStates.size(), serviceStates)); if (!isListLbVmip()) { - // list lb instances - ListResponse response = new ListResponse(); - List vmResponses = new ArrayList(); - if (result != null) { - vmResponses = _responseGenerator.createUserVmResponse(ResponseView.Restricted, "loadbalancerruleinstance", result.toArray(new UserVm[result.size()])); + ListResponse response = new ListResponse<>(); + List vmResponses = _responseGenerator.createUserVmResponse(ResponseView.Restricted, "loadbalancerruleinstance", result.toArray(new UserVm[0])); - - for (int i = 0; i < result.size(); i++) { - vmResponses.get(i).setServiceState(serviceStates.get(i)); - } + for (int i = 0; i < result.size(); i++) { + vmResponses.get(i).setServiceState(serviceStates.get(i)); } + response.setResponses(vmResponses); response.setResponseName(getCommandName()); setResponseObject(response); - - - } else { - ListResponse lbRes = new ListResponse(); - - List vmResponses = new ArrayList(); - List listlbVmRes = new ArrayList(); - - if (result != null) { - vmResponses = _responseGenerator.createUserVmResponse(getResponseView(), "loadbalancerruleinstance", result.toArray(new UserVm[result.size()])); - - - List ipaddr = null; - - for (int i=0;i lbRes = new ListResponse<>(); + + List vmResponses = _responseGenerator.createUserVmResponse(getResponseView(), "loadbalancerruleinstance", result.toArray(new UserVm[0])); + List lbRuleVmMapList = new ArrayList<>(); + + for (int i=0; i nics; + private String vncPassword; + public String getName() { return name; } @@ -167,6 +169,14 @@ public class UnmanagedInstanceTO { this.nics = nics; } + public String getVncPassword() { + return vncPassword; + } + + public void setVncPassword(String vncPassword) { + this.vncPassword = vncPassword; + } + public static class Disk { private String diskId; @@ -192,6 +202,8 @@ public class UnmanagedInstanceTO { private String datastorePath; + private int datastorePort; + private String datastoreType; public String getDiskId() { @@ -297,6 +309,14 @@ public class UnmanagedInstanceTO { public void setDatastoreType(String datastoreType) { this.datastoreType = datastoreType; } + + public void setDatastorePort(int datastorePort) { + this.datastorePort = datastorePort; + } + + public int getDatastorePort() { + return datastorePort; + } } public static class Nic { diff --git a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManager.java b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManager.java index 2876a0127be..53aece94964 100644 --- a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManager.java +++ b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManager.java @@ -17,13 +17,20 @@ package org.apache.cloudstack.vm; +import com.cloud.hypervisor.Hypervisor; import com.cloud.utils.component.PluggableService; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; +import static com.cloud.hypervisor.Hypervisor.HypervisorType.KVM; +import static com.cloud.hypervisor.Hypervisor.HypervisorType.VMware; public interface UnmanagedVMsManager extends VmImportService, UnmanageVMService, PluggableService, Configurable { ConfigKey UnmanageVMPreserveNic = new ConfigKey<>("Advanced", Boolean.class, "unmanage.vm.preserve.nics", "false", "If set to true, do not remove VM nics (and its MAC addresses) when unmanaging a VM, leaving them allocated but not reserved. " + "If set to false, nics are removed and MAC addresses can be reassigned", true, ConfigKey.Scope.Zone); + + static boolean isSupported(Hypervisor.HypervisorType hypervisorType) { + return hypervisorType == VMware || hypervisorType == KVM; + } } diff --git a/api/src/main/java/org/apache/cloudstack/vm/VmImportService.java b/api/src/main/java/org/apache/cloudstack/vm/VmImportService.java index e5b121cd2d6..04ef248fb8a 100644 --- a/api/src/main/java/org/apache/cloudstack/vm/VmImportService.java +++ b/api/src/main/java/org/apache/cloudstack/vm/VmImportService.java @@ -20,6 +20,7 @@ package org.apache.cloudstack.vm; import org.apache.cloudstack.api.command.admin.vm.ImportUnmanagedInstanceCmd; import org.apache.cloudstack.api.command.admin.vm.ImportVmCmd; import org.apache.cloudstack.api.command.admin.vm.ListUnmanagedInstancesCmd; +import org.apache.cloudstack.api.command.admin.vm.ListVmsForImportCmd; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.UnmanagedInstanceResponse; import org.apache.cloudstack.api.response.UserVmResponse; @@ -37,5 +38,8 @@ public interface VmImportService { ListResponse listUnmanagedInstances(ListUnmanagedInstancesCmd cmd); UserVmResponse importUnmanagedInstance(ImportUnmanagedInstanceCmd cmd); + UserVmResponse importVm(ImportVmCmd cmd); + + ListResponse listVmsForImport(ListVmsForImportCmd cmd); } diff --git a/client/pom.xml b/client/pom.xml index 048bddbe549..1706ed0c547 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -71,6 +71,56 @@ mysql mysql-connector-java + + org.apache.cloudstack + cloud-agent + ${project.version} + + + org.apache.cloudstack + cloud-api + ${project.version} + + + org.apache.cloudstack + cloud-core + ${project.version} + + + org.apache.cloudstack + cloud-framework-cluster + ${project.version} + + + org.apache.cloudstack + cloud-framework-config + ${project.version} + + + org.apache.cloudstack + cloud-framework-db + ${project.version} + + + org.apache.cloudstack + cloud-framework-events + ${project.version} + + + org.apache.cloudstack + cloud-framework-jobs + ${project.version} + + + org.apache.cloudstack + cloud-framework-managed-context + ${project.version} + + + org.apache.cloudstack + cloud-framework-security + ${project.version} + org.apache.cloudstack cloud-framework-spring-module @@ -81,6 +131,11 @@ cloud-framework-spring-lifecycle ${project.version} + + org.apache.cloudstack + cloud-plugin-storage-volume-adaptive + ${project.version} + org.apache.cloudstack cloud-plugin-storage-volume-solidfire @@ -111,6 +166,16 @@ cloud-plugin-storage-volume-storpool ${project.version} + + org.apache.cloudstack + cloud-plugin-storage-volume-primera + ${project.version} + + + org.apache.cloudstack + cloud-plugin-storage-volume-flasharray + ${project.version} + org.apache.cloudstack cloud-server @@ -597,6 +662,16 @@ cloud-plugin-storage-object-simulator ${project.version} + + org.apache.cloudstack + cloud-usage + ${project.version} + + + org.apache.cloudstack + cloud-utils + ${project.version} + @@ -906,6 +981,7 @@ mysql:mysql-connector-java org.apache.cloudstack:cloud-plugin-storage-volume-storpool org.apache.cloudstack:cloud-plugin-storage-volume-linstor + org.apache.cloudstack:cloud-usage com.linbit.linstor.api:java-linstor diff --git a/core/src/main/java/com/cloud/agent/api/CheckVolumeAnswer.java b/core/src/main/java/com/cloud/agent/api/CheckVolumeAnswer.java new file mode 100644 index 00000000000..dd136d8642f --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/CheckVolumeAnswer.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.agent.api; + +@LogLevel(LogLevel.Log4jLevel.Trace) +public class CheckVolumeAnswer extends Answer { + + private long size; + + CheckVolumeAnswer() { + } + + public CheckVolumeAnswer(CheckVolumeCommand cmd, String details, long size) { + super(cmd, true, details); + this.size = size; + } + + public long getSize() { + return size; + } + + public String getString() { + return "CheckVolumeAnswer [size=" + size + "]"; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/CheckVolumeCommand.java b/core/src/main/java/com/cloud/agent/api/CheckVolumeCommand.java new file mode 100644 index 00000000000..b4036bebf3a --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/CheckVolumeCommand.java @@ -0,0 +1,59 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import com.cloud.agent.api.to.StorageFilerTO; + +@LogLevel(LogLevel.Log4jLevel.Trace) +public class CheckVolumeCommand extends Command { + + String srcFile; + + StorageFilerTO storageFilerTO; + + + public String getSrcFile() { + return srcFile; + } + + public void setSrcFile(String srcFile) { + this.srcFile = srcFile; + } + + public CheckVolumeCommand() { + } + + @Override + public boolean executeInSequence() { + return false; + } + + public String getString() { + return "CheckVolumeCommand [srcFile=" + srcFile + "]"; + } + + public StorageFilerTO getStorageFilerTO() { + return storageFilerTO; + } + + public void setStorageFilerTO(StorageFilerTO storageFilerTO) { + this.storageFilerTO = storageFilerTO; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeAnswer.java b/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeAnswer.java new file mode 100644 index 00000000000..f6d7cab4596 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeAnswer.java @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.agent.api; + +@LogLevel(LogLevel.Log4jLevel.Trace) +public class CopyRemoteVolumeAnswer extends Answer { + + private String remoteIp; + private String filename; + + private long size; + + CopyRemoteVolumeAnswer() { + } + + public CopyRemoteVolumeAnswer(CopyRemoteVolumeCommand cmd, String details, String filename, long size) { + super(cmd, true, details); + this.remoteIp = cmd.getRemoteIp(); + this.filename = filename; + this.size = size; + } + + public String getRemoteIp() { + return remoteIp; + } + + public void setRemoteIp(String remoteIp) { + this.remoteIp = remoteIp; + } + + public void setFilename(String filename) { + this.filename = filename; + } + + public String getFilename() { + return filename; + } + + public long getSize() { + return size; + } + + public String getString() { + return "CopyRemoteVolumeAnswer [remoteIp=" + remoteIp + "]"; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeCommand.java b/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeCommand.java new file mode 100644 index 00000000000..82bc4d7cb45 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeCommand.java @@ -0,0 +1,101 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import com.cloud.agent.api.to.StorageFilerTO; + +@LogLevel(LogLevel.Log4jLevel.Trace) +public class CopyRemoteVolumeCommand extends Command { + + String remoteIp; + String username; + String password; + String srcFile; + + String tmpPath; + + StorageFilerTO storageFilerTO; + + public CopyRemoteVolumeCommand(String remoteIp, String username, String password) { + this.remoteIp = remoteIp; + this.username = username; + this.password = password; + } + + public String getRemoteIp() { + return remoteIp; + } + + public void setRemoteIp(String remoteIp) { + this.remoteIp = remoteIp; + } + + public String getUsername() { + return username; + } + + public void setUsername(String username) { + this.username = username; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public String getSrcFile() { + return srcFile; + } + + public void setSrcFile(String srcFile) { + this.srcFile = srcFile; + } + + public CopyRemoteVolumeCommand() { + } + + @Override + public boolean executeInSequence() { + return false; + } + + public String getString() { + return "CopyRemoteVolumeCommand [remoteIp=" + remoteIp + "]"; + } + + public void setTempPath(String tmpPath) { + this.tmpPath = tmpPath; + } + + public String getTmpPath() { + return tmpPath; + } + + public StorageFilerTO getStorageFilerTO() { + return storageFilerTO; + } + + public void setStorageFilerTO(StorageFilerTO storageFilerTO) { + this.storageFilerTO = storageFilerTO; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/GetRemoteVmsAnswer.java b/core/src/main/java/com/cloud/agent/api/GetRemoteVmsAnswer.java new file mode 100644 index 00000000000..8cd072f1da1 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/GetRemoteVmsAnswer.java @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.agent.api; + +import org.apache.cloudstack.vm.UnmanagedInstanceTO; + +import java.util.HashMap; +import java.util.List; + +@LogLevel(LogLevel.Log4jLevel.Trace) +public class GetRemoteVmsAnswer extends Answer { + + private String remoteIp; + private HashMap unmanagedInstances; + + List vmNames; + + GetRemoteVmsAnswer() { + } + + public GetRemoteVmsAnswer(GetRemoteVmsCommand cmd, String details, HashMap unmanagedInstances) { + super(cmd, true, details); + this.remoteIp = cmd.getRemoteIp(); + this.unmanagedInstances = unmanagedInstances; + } + + public GetRemoteVmsAnswer(GetRemoteVmsCommand cmd, String details, List vmNames) { + super(cmd, true, details); + this.remoteIp = cmd.getRemoteIp(); + this.vmNames = vmNames; + } + + public String getRemoteIp() { + return remoteIp; + } + + public void setRemoteIp(String remoteIp) { + this.remoteIp = remoteIp; + } + + public HashMap getUnmanagedInstances() { + return unmanagedInstances; + } + + public void setUnmanagedInstances(HashMap unmanagedInstances) { + this.unmanagedInstances = unmanagedInstances; + } + + public List getVmNames() { + return vmNames; + } + + public void setVmNames(List vmNames) { + this.vmNames = vmNames; + } + + public String getString() { + return "GetRemoteVmsAnswer [remoteIp=" + remoteIp + "]"; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/GetRemoteVmsCommand.java b/core/src/main/java/com/cloud/agent/api/GetRemoteVmsCommand.java new file mode 100644 index 00000000000..5c71d12dbd0 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/GetRemoteVmsCommand.java @@ -0,0 +1,70 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +@LogLevel(LogLevel.Log4jLevel.Trace) +public class GetRemoteVmsCommand extends Command { + + String remoteIp; + String username; + String password; + + public GetRemoteVmsCommand(String remoteIp, String username, String password) { + this.remoteIp = remoteIp; + this.username = username; + this.password = password; + } + + public String getRemoteIp() { + return remoteIp; + } + + public void setRemoteIp(String remoteIp) { + this.remoteIp = remoteIp; + } + + public String getUsername() { + return username; + } + + public void setUsername(String username) { + this.username = username; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public GetRemoteVmsCommand() { + } + + @Override + public boolean executeInSequence() { + return false; + } + + public String getString() { + return "GetRemoteVmsCommand [remoteIp=" + remoteIp + "]"; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/GetUnmanagedInstancesAnswer.java b/core/src/main/java/com/cloud/agent/api/GetUnmanagedInstancesAnswer.java index 3c6118d426e..771d472be2a 100644 --- a/core/src/main/java/com/cloud/agent/api/GetUnmanagedInstancesAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/GetUnmanagedInstancesAnswer.java @@ -30,6 +30,10 @@ public class GetUnmanagedInstancesAnswer extends Answer { GetUnmanagedInstancesAnswer() { } + public GetUnmanagedInstancesAnswer(GetUnmanagedInstancesCommand cmd, String details) { + super(cmd, false, details); + } + public GetUnmanagedInstancesAnswer(GetUnmanagedInstancesCommand cmd, String details, HashMap unmanagedInstances) { super(cmd, true, details); this.instanceName = cmd.getInstanceName(); diff --git a/core/src/main/java/com/cloud/agent/api/MigrateCommand.java b/core/src/main/java/com/cloud/agent/api/MigrateCommand.java index 27251f4bb78..3acdb9c351b 100644 --- a/core/src/main/java/com/cloud/agent/api/MigrateCommand.java +++ b/core/src/main/java/com/cloud/agent/api/MigrateCommand.java @@ -40,6 +40,9 @@ public class MigrateCommand extends Command { private boolean executeInSequence = false; private List migrateDiskInfoList = new ArrayList<>(); private Map dpdkInterfaceMapping = new HashMap<>(); + + private int newVmCpuShares; + Map vlanToPersistenceMap = new HashMap<>(); public Map getDpdkInterfaceMapping() { @@ -138,6 +141,14 @@ public class MigrateCommand extends Command { this.migrateDiskInfoList = migrateDiskInfoList; } + public int getNewVmCpuShares() { + return newVmCpuShares; + } + + public void setNewVmCpuShares(int newVmCpuShares) { + this.newVmCpuShares = newVmCpuShares; + } + public static class MigrateDiskInfo { public enum DiskType { FILE, BLOCK; diff --git a/core/src/main/java/com/cloud/agent/api/PrepareForMigrationAnswer.java b/core/src/main/java/com/cloud/agent/api/PrepareForMigrationAnswer.java index d0a544ba081..190e844ddc5 100644 --- a/core/src/main/java/com/cloud/agent/api/PrepareForMigrationAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/PrepareForMigrationAnswer.java @@ -28,6 +28,8 @@ public class PrepareForMigrationAnswer extends Answer { private Map dpdkInterfaceMapping = new HashMap<>(); + private Integer newVmCpuShares = null; + protected PrepareForMigrationAnswer() { } @@ -50,4 +52,12 @@ public class PrepareForMigrationAnswer extends Answer { public Map getDpdkInterfaceMapping() { return this.dpdkInterfaceMapping; } + + public Integer getNewVmCpuShares() { + return newVmCpuShares; + } + + public void setNewVmCpuShares(Integer newVmCpuShares) { + this.newVmCpuShares = newVmCpuShares; + } } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java index 15f5b231be2..01123401fac 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java @@ -168,6 +168,9 @@ public interface VolumeOrchestrationService { DiskProfile importVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner, Long deviceId, Long poolId, String path, String chainInfo); + DiskProfile updateImportedVolume(Type type, DiskOffering offering, VirtualMachine vm, VirtualMachineTemplate template, + Long deviceId, Long poolId, String path, String chainInfo, DiskProfile diskProfile); + /** * Unmanage VM volumes */ diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 78b94912a0c..1b19ffee2b5 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -54,6 +54,7 @@ import com.cloud.network.vpc.VpcVO; import com.cloud.network.vpc.dao.VpcDao; import com.cloud.user.dao.AccountDao; import com.cloud.event.ActionEventUtils; +import com.google.gson.Gson; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; @@ -2854,23 +2855,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } boolean migrated = false; - Map dpdkInterfaceMapping = null; + Map dpdkInterfaceMapping = new HashMap<>(); try { - final boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); - Map vlanToPersistenceMap = getVlanToPersistenceMapForVM(vm.getId()); - final MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows, to, getExecuteInSequence(vm.getHypervisorType())); - if (MapUtils.isNotEmpty(vlanToPersistenceMap)) { - mc.setVlanToPersistenceMap(vlanToPersistenceMap); - } - - boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value(); - mc.setAutoConvergence(kvmAutoConvergence); - mc.setHostGuid(dest.getHost().getGuid()); - - dpdkInterfaceMapping = ((PrepareForMigrationAnswer) pfma).getDpdkInterfaceMapping(); - if (MapUtils.isNotEmpty(dpdkInterfaceMapping)) { - mc.setDpdkInterfaceMapping(dpdkInterfaceMapping); - } + final MigrateCommand mc = buildMigrateCommand(vm, to, dest, pfma, dpdkInterfaceMapping); try { final Answer ma = _agentMgr.send(vm.getLastHostId(), mc); @@ -2942,6 +2929,43 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } + /** + * Create and set parameters for the {@link MigrateCommand} used in the migration and scaling of VMs. + */ + protected MigrateCommand buildMigrateCommand(VMInstanceVO vmInstance, VirtualMachineTO virtualMachineTO, DeployDestination destination, Answer answer, + Map dpdkInterfaceMapping) { + final boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vmInstance.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); + final MigrateCommand migrateCommand = new MigrateCommand(vmInstance.getInstanceName(), destination.getHost().getPrivateIpAddress(), isWindows, virtualMachineTO, + getExecuteInSequence(vmInstance.getHypervisorType())); + + Map vlanToPersistenceMap = getVlanToPersistenceMapForVM(vmInstance.getId()); + if (MapUtils.isNotEmpty(vlanToPersistenceMap)) { + s_logger.debug(String.format("Setting VLAN persistence to [%s] as part of migrate command for VM [%s].", new Gson().toJson(vlanToPersistenceMap), virtualMachineTO)); + migrateCommand.setVlanToPersistenceMap(vlanToPersistenceMap); + } + + migrateCommand.setAutoConvergence(StorageManager.KvmAutoConvergence.value()); + migrateCommand.setHostGuid(destination.getHost().getGuid()); + + PrepareForMigrationAnswer prepareForMigrationAnswer = (PrepareForMigrationAnswer) answer; + + Map answerDpdkInterfaceMapping = prepareForMigrationAnswer.getDpdkInterfaceMapping(); + if (MapUtils.isNotEmpty(answerDpdkInterfaceMapping) && dpdkInterfaceMapping != null) { + s_logger.debug(String.format("Setting DPDK interface mapping to [%s] as part of migrate command for VM [%s].", new Gson().toJson(vlanToPersistenceMap), + virtualMachineTO)); + dpdkInterfaceMapping.putAll(answerDpdkInterfaceMapping); + migrateCommand.setDpdkInterfaceMapping(dpdkInterfaceMapping); + } + + Integer newVmCpuShares = prepareForMigrationAnswer.getNewVmCpuShares(); + if (newVmCpuShares != null) { + s_logger.debug(String.format("Setting CPU shares to [%d] as part of migrate command for VM [%s].", newVmCpuShares, virtualMachineTO)); + migrateCommand.setNewVmCpuShares(newVmCpuShares); + } + + return migrateCommand; + } + private void updateVmPod(VMInstanceVO vm, long dstHostId) { // update the VMs pod HostVO host = _hostDao.findById(dstHostId); @@ -3021,6 +3045,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac *
    *
  • If the current storage pool of the volume is not a managed storage, we do not need to validate anything here. *
  • If the current storage pool is a managed storage and the target storage pool ID is different from the current one, we throw an exception. + *
  • If the current storage pool is a managed storage and explicitly declared its capable of migration to alternate storage pools *
*/ protected void executeManagedStorageChecksWhenTargetStoragePoolProvided(StoragePoolVO currentPool, VolumeVO volume, StoragePoolVO targetPool) { @@ -3030,6 +3055,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (currentPool.getId() == targetPool.getId()) { return; } + + Map details = _storagePoolDao.getDetails(currentPool.getId()); + if (details != null && Boolean.parseBoolean(details.get(Storage.Capability.ALLOW_MIGRATE_OTHER_POOLS.toString()))) { + return; + } throw new CloudRuntimeException(String.format("Currently, a volume on managed storage can only be 'migrated' to itself " + "[volumeId=%s, currentStoragePoolId=%s, targetStoragePoolId=%s].", volume.getUuid(), currentPool.getUuid(), targetPool.getUuid())); } @@ -4459,16 +4489,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac boolean migrated = false; try { - Map vlanToPersistenceMap = getVlanToPersistenceMapForVM(vm.getId()); - final boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); - final MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows, to, getExecuteInSequence(vm.getHypervisorType())); - if (MapUtils.isNotEmpty(vlanToPersistenceMap)) { - mc.setVlanToPersistenceMap(vlanToPersistenceMap); - } - - boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value(); - mc.setAutoConvergence(kvmAutoConvergence); - mc.setHostGuid(dest.getHost().getGuid()); + final MigrateCommand mc = buildMigrateCommand(vm, to, dest, pfma, null); try { final Answer ma = _agentMgr.send(vm.getLastHostId(), mc); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 6f945479bd4..b8f3e5a10e5 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -2224,6 +2224,51 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati return toDiskProfile(vol, offering); } + @Override + public DiskProfile updateImportedVolume(Type type, DiskOffering offering, VirtualMachine vm, VirtualMachineTemplate template, + Long deviceId, Long poolId, String path, String chainInfo, DiskProfile diskProfile) { + + VolumeVO vol = _volsDao.findById(diskProfile.getVolumeId()); + if (vm != null) { + vol.setInstanceId(vm.getId()); + } + + if (deviceId != null) { + vol.setDeviceId(deviceId); + } else if (type.equals(Type.ROOT)) { + vol.setDeviceId(0l); + } else { + vol.setDeviceId(1l); + } + + if (template != null) { + if (ImageFormat.ISO.equals(template.getFormat())) { + vol.setIsoId(template.getId()); + } else if (Storage.TemplateType.DATADISK.equals(template.getTemplateType())) { + vol.setTemplateId(template.getId()); + } + if (type == Type.ROOT) { + vol.setTemplateId(template.getId()); + } + } + + // display flag matters only for the User vms + if (VirtualMachine.Type.User.equals(vm.getType())) { + UserVmVO userVm = _userVmDao.findById(vm.getId()); + vol.setDisplayVolume(userVm.isDisplayVm()); + } + + vol.setFormat(getSupportedImageFormatForCluster(vm.getHypervisorType())); + vol.setPoolId(poolId); + vol.setPath(path); + vol.setChainInfo(chainInfo); + vol.setSize(diskProfile.getSize()); + vol.setState(Volume.State.Ready); + vol.setAttached(new Date()); + _volsDao.update(vol.getId(), vol); + return toDiskProfile(vol, offering); + } + @Override public void unmanageVolumes(long vmId) { if (s_logger.isDebugEnabled()) { diff --git a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDomainDao.java b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDomainDao.java index f4d0ad71b8c..43d4d25305e 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDomainDao.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDomainDao.java @@ -17,6 +17,7 @@ package com.cloud.network.dao; import java.util.List; +import java.util.Map; import com.cloud.utils.db.GenericDao; @@ -26,4 +27,6 @@ public interface NetworkDomainDao extends GenericDao { NetworkDomainVO getDomainNetworkMapByNetworkId(long networkId); List listNetworkIdsByDomain(long domainId); + + Map> listDomainsOfSharedNetworksUsedByDomainPath(String domainPath); } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDomainDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDomainDaoImpl.java index 188f306cecc..ce86a8636a1 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDomainDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDomainDaoImpl.java @@ -16,10 +16,17 @@ // under the License. package com.cloud.network.dao; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; import java.util.List; +import java.util.Map; - +import com.cloud.utils.db.TransactionLegacy; +import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.DB; @@ -31,9 +38,23 @@ import com.cloud.utils.db.SearchCriteria.Op; @Component @DB() public class NetworkDomainDaoImpl extends GenericDaoBase implements NetworkDomainDao { + public static Logger logger = Logger.getLogger(NetworkDomainDaoImpl.class.getName()); final SearchBuilder AllFieldsSearch; final SearchBuilder DomainsSearch; + private static final String LIST_DOMAINS_OF_SHARED_NETWORKS_USED_BY_DOMAIN_PATH = "SELECT shared_nw.domain_id, \n" + + "GROUP_CONCAT('VM:', vm.uuid, ' | NW:' , network.uuid) \n" + + "FROM cloud.domain_network_ref AS shared_nw\n" + + "INNER JOIN cloud.nics AS nic ON (nic.network_id = shared_nw.network_id AND nic.removed IS NULL)\n" + + "INNER JOIN cloud.vm_instance AS vm ON (vm.id = nic.instance_id)\n" + + "INNER JOIN cloud.domain AS domain ON (domain.id = vm.domain_id)\n" + + "INNER JOIN cloud.domain AS domain_sn ON (domain_sn.id = shared_nw.domain_id)\n" + + "INNER JOIN cloud.networks AS network ON (shared_nw.network_id = network.id)\n" + + "WHERE shared_nw.subdomain_access = 1\n" + + "AND domain.path LIKE ?\n" + + "AND domain_sn.path NOT LIKE ?\n" + + "GROUP BY shared_nw.network_id"; + protected NetworkDomainDaoImpl() { super(); @@ -71,4 +92,37 @@ public class NetworkDomainDaoImpl extends GenericDaoBase } return networkIdsToReturn; } + + @Override + public Map> listDomainsOfSharedNetworksUsedByDomainPath(String domainPath) { + logger.debug(String.format("Retrieving the domains of the shared networks with subdomain access used by domain with path [%s].", domainPath)); + + TransactionLegacy txn = TransactionLegacy.currentTxn(); + try (PreparedStatement pstmt = txn.prepareStatement(LIST_DOMAINS_OF_SHARED_NETWORKS_USED_BY_DOMAIN_PATH)) { + Map> domainsOfSharedNetworksUsedByDomainPath = new HashMap<>(); + + String domainSearch = domainPath.concat("%"); + pstmt.setString(1, domainSearch); + pstmt.setString(2, domainSearch); + + try (ResultSet rs = pstmt.executeQuery()) { + while (rs.next()) { + Long domainId = rs.getLong(1); + List vmUuidsAndNetworkUuids = Arrays.asList(rs.getString(2).split(",")); + + domainsOfSharedNetworksUsedByDomainPath.put(domainId, vmUuidsAndNetworkUuids); + } + } + + return domainsOfSharedNetworksUsedByDomainPath; + } catch (SQLException e) { + logger.error(String.format("Failed to retrieve the domains of the shared networks with subdomain access used by domain with path [%s] due to [%s]. Returning an empty " + + "list of domains.", domainPath, e.getMessage())); + + logger.debug(String.format("Failed to retrieve the domains of the shared networks with subdomain access used by domain with path [%s]. Returning an empty " + + "list of domains.", domainPath), e); + + return new HashMap<>(); + } + } } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java index 79899b7119e..be6588e3189 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java @@ -152,5 +152,7 @@ public interface VolumeDao extends GenericDao, StateDao listByPoolIdAndPaths(long id, List pathList); + VolumeVO findByPoolIdAndPath(long id, String path); + List listByIds(List ids); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java index 056b7206d72..bf556622463 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java @@ -71,6 +71,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol protected GenericSearchBuilder primaryStorageSearch; protected GenericSearchBuilder primaryStorageSearch2; protected GenericSearchBuilder secondaryStorageSearch; + private final SearchBuilder poolAndPathSearch; @Inject ResourceTagDao _tagsDao; @@ -487,6 +488,11 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol volumeIdSearch.and("idIN", volumeIdSearch.entity().getId(), Op.IN); volumeIdSearch.done(); + poolAndPathSearch = createSearchBuilder(); + poolAndPathSearch.and("poolId", poolAndPathSearch.entity().getPoolId(), Op.EQ); + poolAndPathSearch.and("path", poolAndPathSearch.entity().getPath(), Op.EQ); + poolAndPathSearch.done(); + } @Override @@ -802,6 +808,14 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol return listBy(sc); } + @Override + public VolumeVO findByPoolIdAndPath(long id, String path) { + SearchCriteria sc = poolAndPathSearch.create(); + sc.setParameters("poolId", id); + sc.setParameters("path", path); + return findOneBy(sc); + } + @Override public List listByIds(List ids) { if (CollectionUtils.isEmpty(ids)) { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java index 0b38acb5c21..de161afea07 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java @@ -21,6 +21,7 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; public class DatabaseAccessObject { @@ -85,8 +86,8 @@ public class DatabaseAccessObject { return columnExists; } - public String generateIndexName(String tableName, String columnName) { - return String.format("i_%s__%s", tableName, columnName); + public String generateIndexName(String tableName, String... columnName) { + return String.format("i_%s__%s", tableName, StringUtils.join(columnName, "__")); } public boolean indexExists(Connection conn, String tableName, String indexName) { @@ -101,8 +102,8 @@ public class DatabaseAccessObject { return false; } - public void createIndex(Connection conn, String tableName, String columnName, String indexName) { - String stmt = String.format("CREATE INDEX %s on %s (%s)", indexName, tableName, columnName); + public void createIndex(Connection conn, String tableName, String indexName, String... columnNames) { + String stmt = String.format("CREATE INDEX %s ON %s (%s)", indexName, tableName, StringUtils.join(columnNames, ", ")); s_logger.debug("Statement: " + stmt); try (PreparedStatement pstmt = conn.prepareStatement(stmt)) { pstmt.execute(); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java index 6b4e1814de0..51e6ac7b9a1 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java @@ -23,11 +23,11 @@ public class DbUpgradeUtils { private static DatabaseAccessObject dao = new DatabaseAccessObject(); - public static void addIndexIfNeeded(Connection conn, String tableName, String columnName) { - String indexName = dao.generateIndexName(tableName, columnName); + public static void addIndexIfNeeded(Connection conn, String tableName, String... columnNames) { + String indexName = dao.generateIndexName(tableName, columnNames); if (!dao.indexExists(conn, tableName, indexName)) { - dao.createIndex(conn, tableName, columnName, indexName); + dao.createIndex(conn, tableName, indexName, columnNames); } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41810to41900.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41810to41900.java index fd44e79e7cf..bdfe58cbf89 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41810to41900.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41810to41900.java @@ -76,6 +76,7 @@ public class Upgrade41810to41900 implements DbUpgrade, DbUpgradeSystemVmTemplate public void performDataMigration(Connection conn) { decryptConfigurationValuesFromAccountAndDomainScopesNotInSecureHiddenCategories(conn); migrateBackupDates(conn); + addIndexes(conn); } @Override @@ -254,4 +255,11 @@ public class Upgrade41810to41900 implements DbUpgrade, DbUpgradeSystemVmTemplate } } + private void addIndexes(Connection conn) { + DbUpgradeUtils.addIndexIfNeeded(conn, "alert", "archived", "created"); + DbUpgradeUtils.addIndexIfNeeded(conn, "alert", "type", "data_center_id", "pod_id"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "event", "resource_type", "resource_id"); + } + } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDomainMapDao.java b/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDomainMapDao.java index 07be976f202..27040c2f54e 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDomainMapDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDomainMapDao.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.affinity.dao; import java.util.List; +import java.util.Map; import org.apache.cloudstack.affinity.AffinityGroupDomainMapVO; @@ -28,4 +29,6 @@ public interface AffinityGroupDomainMapDao extends GenericDao listByDomain(Object... domainId); + Map> listDomainsOfAffinityGroupsUsedByDomainPath(String domainPath); + } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDomainMapDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDomainMapDaoImpl.java index 5ecf63d6a6c..1dd22df46d7 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDomainMapDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDomainMapDaoImpl.java @@ -16,23 +16,46 @@ // under the License. package org.apache.cloudstack.affinity.dao; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.HashMap; import java.util.List; +import java.util.Map; import javax.annotation.PostConstruct; import org.apache.cloudstack.affinity.AffinityGroupDomainMapVO; +import org.apache.log4j.Logger; +import com.cloud.network.dao.NetworkDomainDaoImpl; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.TransactionLegacy; public class AffinityGroupDomainMapDaoImpl extends GenericDaoBase implements AffinityGroupDomainMapDao { + public static Logger logger = Logger.getLogger(NetworkDomainDaoImpl.class.getName()); private SearchBuilder ListByAffinityGroup; private SearchBuilder DomainsSearch; + private static final String LIST_DOMAINS_WITH_AFFINITY_GROUPS_WITH_SUBDOMAIN_ACCESS_USED_BY_DOMAIN_PATH = "SELECT affinity_group_domain_map.domain_id, \n" + + "GROUP_CONCAT('VM:', vm.uuid, ' | AG:' , affinity_group.uuid) \n" + + "FROM cloud.affinity_group_domain_map AS affinity_group_domain_map\n" + + "INNER JOIN cloud.affinity_group_vm_map AS affinity_group_vm_map ON (cloud.affinity_group_domain_map.affinity_group_id = affinity_group_vm_map.affinity_group_id)\n" + + "INNER JOIN cloud.vm_instance AS vm ON (vm.id = affinity_group_vm_map.instance_id)\n" + + "INNER JOIN cloud.domain AS domain ON (domain.id = vm.domain_id)\n" + + "INNER JOIN cloud.domain AS domain_sn ON (domain_sn.id = affinity_group_domain_map.domain_id)\n" + + "INNER JOIN cloud.affinity_group AS affinity_group ON (affinity_group.id = affinity_group_domain_map.affinity_group_id)\n" + + "WHERE affinity_group_domain_map.subdomain_access = 1\n" + + "AND domain.path LIKE ?\n" + + "AND domain_sn.path NOT LIKE ?\n" + + "GROUP BY affinity_group.id"; + public AffinityGroupDomainMapDaoImpl() { } @@ -62,4 +85,38 @@ public class AffinityGroupDomainMapDaoImpl extends GenericDaoBase> listDomainsOfAffinityGroupsUsedByDomainPath(String domainPath) { + logger.debug(String.format("Retrieving the domains of the affinity groups with subdomain access used by domain with path [%s].", domainPath)); + + TransactionLegacy txn = TransactionLegacy.currentTxn(); + try (PreparedStatement pstmt = txn.prepareStatement(LIST_DOMAINS_WITH_AFFINITY_GROUPS_WITH_SUBDOMAIN_ACCESS_USED_BY_DOMAIN_PATH)) { + Map> domainsOfAffinityGroupsUsedByDomainPath = new HashMap<>(); + + String domainSearch = domainPath.concat("%"); + pstmt.setString(1, domainSearch); + pstmt.setString(2, domainSearch); + + + try (ResultSet rs = pstmt.executeQuery()) { + while (rs.next()) { + Long domainId = rs.getLong(1); + List vmUuidsAndAffinityGroupUuids = Arrays.asList(rs.getString(2).split(",")); + + domainsOfAffinityGroupsUsedByDomainPath.put(domainId, vmUuidsAndAffinityGroupUuids); + } + } + + return domainsOfAffinityGroupsUsedByDomainPath; + } catch (SQLException e) { + logger.error(String.format("Failed to retrieve the domains of the affinity groups with subdomain access used by domain with path [%s] due to [%s]. Returning an " + + "empty list of domains.", domainPath, e.getMessage())); + + logger.debug(String.format("Failed to retrieve the domains of the affinity groups with subdomain access used by domain with path [%s]. Returning an empty " + + "list of domains.", domainPath), e); + + return new HashMap<>(); + } + } + } diff --git a/engine/schema/src/test/java/com/cloud/upgrade/dao/DatabaseAccessObjectTest.java b/engine/schema/src/test/java/com/cloud/upgrade/dao/DatabaseAccessObjectTest.java index 8f1ee3e0450..bd05fbe3c4c 100644 --- a/engine/schema/src/test/java/com/cloud/upgrade/dao/DatabaseAccessObjectTest.java +++ b/engine/schema/src/test/java/com/cloud/upgrade/dao/DatabaseAccessObjectTest.java @@ -93,8 +93,8 @@ public class DatabaseAccessObjectTest { @Test public void generateIndexNameTest() { - String indexName = dao.generateIndexName("mytable","mycolumn"); - Assert.assertEquals( "i_mytable__mycolumn", indexName); + String indexName = dao.generateIndexName("mytable","mycolumn1", "mycolumn2"); + Assert.assertEquals( "i_mytable__mycolumn1__mycolumn2", indexName); } @Test @@ -136,10 +136,11 @@ public class DatabaseAccessObjectTest { Connection conn = connectionMock; String tableName = "mytable"; - String columnName = "mycolumn"; + String columnName1 = "mycolumn1"; + String columnName2 = "mycolumn2"; String indexName = "myindex"; - dao.createIndex(conn, tableName, columnName, indexName); + dao.createIndex(conn, tableName, indexName, columnName1, columnName2); verify(connectionMock, times(1)).prepareStatement(anyString()); verify(preparedStatementMock, times(1)).execute(); verify(preparedStatementMock, times(1)).close(); diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java index e450addb261..370753ed923 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java @@ -193,7 +193,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { destData.getType() == DataObjectType.TEMPLATE)) { // volume transfer from primary to secondary. Volume transfer between primary pools are already handled by copyVolumeBetweenPools // Delete cache in order to certainly transfer a latest image. - s_logger.debug("Delete " + cacheType + " cache(id: " + cacheId + + if (s_logger.isDebugEnabled()) s_logger.debug("Delete " + cacheType + " cache(id: " + cacheId + ", uuid: " + cacheUuid + ")"); cacheMgr.deleteCacheObject(srcForCopy); } else { @@ -205,7 +205,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { ", uuid: " + cacheUuid + ")"); cacheMgr.deleteCacheObject(srcForCopy); } else { - s_logger.debug("Decrease reference count of " + cacheType + + if (s_logger.isDebugEnabled()) s_logger.debug("Decrease reference count of " + cacheType + " cache(id: " + cacheId + ", uuid: " + cacheUuid + ")"); cacheMgr.releaseCacheObject(srcForCopy); } @@ -213,7 +213,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } return answer; } catch (Exception e) { - s_logger.debug("copy object failed: ", e); + if (s_logger.isDebugEnabled()) s_logger.debug("copy object failed: ", e); if (cacheData != null) { cacheMgr.deleteCacheObject(cacheData); } @@ -331,7 +331,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } return answer; } catch (Exception e) { - s_logger.debug("Failed to send to storage pool", e); + if (s_logger.isDebugEnabled()) s_logger.debug("Failed to send to storage pool", e); throw new CloudRuntimeException("Failed to send to storage pool", e); } } @@ -388,7 +388,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { if (answer == null || !answer.getResult()) { if (answer != null) { - s_logger.debug("copy to image store failed: " + answer.getDetails()); + if (s_logger.isDebugEnabled()) s_logger.debug("copy to image store failed: " + answer.getDetails()); } objOnImageStore.processEvent(Event.OperationFailed); imageStore.delete(objOnImageStore); @@ -411,7 +411,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { if (answer == null || !answer.getResult()) { if (answer != null) { - s_logger.debug("copy to primary store failed: " + answer.getDetails()); + if (s_logger.isDebugEnabled()) s_logger.debug("copy to primary store failed: " + answer.getDetails()); } objOnImageStore.processEvent(Event.OperationFailed); imageStore.delete(objOnImageStore); @@ -471,13 +471,17 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { s_logger.error(errMsg); answer = new Answer(command, false, errMsg); } else { + if (s_logger.isDebugEnabled()) s_logger.debug("Sending MIGRATE_COPY request to node " + ep); answer = ep.sendMessage(command); + if (s_logger.isDebugEnabled()) s_logger.debug("Received MIGRATE_COPY response from node with answer: " + answer); } if (answer == null || !answer.getResult()) { throw new CloudRuntimeException("Failed to migrate volume " + volume + " to storage pool " + destPool); } else { // Update the volume details after migration. + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE_COPY updating volume"); + VolumeVO volumeVo = volDao.findById(volume.getId()); Long oldPoolId = volume.getPoolId(); volumeVo.setPath(((MigrateVolumeAnswer)answer).getVolumePath()); @@ -496,6 +500,8 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } volumeVo.setFolder(folder); volDao.update(volume.getId(), volumeVo); + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE_COPY update volume data complete"); + } return answer; @@ -507,7 +513,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { Answer answer = null; String errMsg = null; try { - s_logger.debug("copyAsync inspecting src type " + srcData.getType().toString() + " copyAsync inspecting dest type " + destData.getType().toString()); + if (s_logger.isDebugEnabled()) s_logger.debug("copyAsync inspecting src type " + srcData.getType().toString() + " copyAsync inspecting dest type " + destData.getType().toString()); if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.VOLUME) { answer = copyVolumeFromSnapshot(srcData, destData); } else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.TEMPLATE) { @@ -516,11 +522,16 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { answer = cloneVolume(srcData, destData); } else if (destData.getType() == DataObjectType.VOLUME && srcData.getType() == DataObjectType.VOLUME && srcData.getDataStore().getRole() == DataStoreRole.Primary && destData.getDataStore().getRole() == DataStoreRole.Primary) { + if (s_logger.isDebugEnabled()) s_logger.debug("About to MIGRATE copy between datasources"); if (srcData.getId() == destData.getId()) { // The volume has to be migrated across storage pools. + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using migrateVolumeToPool STARTING"); answer = migrateVolumeToPool(srcData, destData); + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using migrateVolumeToPool DONE: " + answer.getResult()); } else { + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using copyVolumeBetweenPools STARTING"); answer = copyVolumeBetweenPools(srcData, destData); + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using copyVolumeBetweenPools DONE: " + answer.getResult()); } } else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.SNAPSHOT) { answer = copySnapshot(srcData, destData); @@ -532,7 +543,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { errMsg = answer.getDetails(); } } catch (Exception e) { - s_logger.debug("copy failed", e); + if (s_logger.isDebugEnabled()) s_logger.debug("copy failed", e); errMsg = e.toString(); } CopyCommandResult result = new CopyCommandResult(null, answer); @@ -627,7 +638,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } return answer; } catch (Exception e) { - s_logger.debug("copy snasphot failed: ", e); + if (s_logger.isDebugEnabled()) s_logger.debug("copy snasphot failed: ", e); if (cacheData != null) { cacheMgr.deleteCacheObject(cacheData); } diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 1419ae36d25..a93f624aa53 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -31,6 +31,7 @@ import java.util.concurrent.TimeUnit; import javax.inject.Inject; +import com.cloud.agent.api.PrepareForMigrationAnswer; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; @@ -106,6 +107,7 @@ import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.Storage.ProvisioningType; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; @@ -186,6 +188,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { private EndPointSelector selector; @Inject VMTemplatePoolDao templatePoolDao; + @Inject + private VolumeDataFactory _volFactory; @Override public StrategyPriority canHandle(DataObject srcData, DataObject destData) { @@ -400,15 +404,15 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } else if (!isVolumeOnManagedStorage(destVolumeInfo)) { handleVolumeMigrationFromManagedStorageToNonManagedStorage(srcVolumeInfo, destVolumeInfo, callback); } else { - String errMsg = "The source volume to migrate and the destination volume are both on managed storage. " + - "Migration in this case is not yet supported."; - - handleError(errMsg, callback); + handleVolumeMigrationFromManagedStorageToManagedStorage(srcVolumeInfo, destVolumeInfo, callback); } } else if (!isVolumeOnManagedStorage(destVolumeInfo)) { - String errMsg = "The 'StorageSystemDataMotionStrategy' does not support this migration use case."; - - handleError(errMsg, callback); + if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) { + String errMsg = String.format("Currently migrating volumes between managed storage providers is not supported on %s hypervisor", srcVolumeInfo.getHypervisorType().toString()); + handleError(errMsg, callback); + } else { + handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback); + } } else { handleVolumeMigrationFromNonManagedStorageToManagedStorage(srcVolumeInfo, destVolumeInfo, callback); } @@ -453,7 +457,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { String volumePath = null; try { - if (!ImageFormat.QCOW2.equals(srcVolumeInfo.getFormat())) { + if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) { throw new CloudRuntimeException("Currently, only the KVM hypervisor type is supported for the migration of a volume " + "from managed storage to non-managed storage."); } @@ -485,7 +489,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeCopyFromManagedStorageToSecondaryStorage': " + ex.getMessage(); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { CopyCmdAnswer copyCmdAnswer; @@ -512,12 +516,22 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } } + private void handleVolumeMigrationFromManagedStorageToManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, + AsyncCompletionCallback callback) { + if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) { + String errMsg = String.format("Currently migrating volumes between managed storage providers is not supported on %s hypervisor", srcVolumeInfo.getHypervisorType().toString()); + handleError(errMsg, callback); + } else { + handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback); + } + } + private void handleVolumeMigrationFromManagedStorageToNonManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, AsyncCompletionCallback callback) { String errMsg = null; try { - if (!ImageFormat.QCOW2.equals(srcVolumeInfo.getFormat())) { + if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) { throw new CloudRuntimeException("Currently, only the KVM hypervisor type is supported for the migration of a volume " + "from managed storage to non-managed storage."); } @@ -525,10 +539,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { HypervisorType hypervisorType = HypervisorType.KVM; VirtualMachine vm = srcVolumeInfo.getAttachedVM(); - if (vm != null && vm.getState() != VirtualMachine.State.Stopped) { - throw new CloudRuntimeException("Currently, if a volume to migrate from managed storage to non-managed storage is attached to " + - "a VM, the VM must be in the Stopped state."); - } + checkAvailableForMigration(vm); long destStoragePoolId = destVolumeInfo.getPoolId(); StoragePoolVO destStoragePoolVO = _storagePoolDao.findById(destStoragePoolId); @@ -553,7 +564,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromManagedStorageToNonManagedStorage': " + ex.getMessage(); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { CopyCmdAnswer copyCmdAnswer; @@ -579,9 +590,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { private void verifyFormatWithPoolType(ImageFormat imageFormat, StoragePoolType poolType) { if (imageFormat != ImageFormat.VHD && imageFormat != ImageFormat.OVA && imageFormat != ImageFormat.QCOW2 && - !(imageFormat == ImageFormat.RAW && StoragePoolType.PowerFlex == poolType)) { - throw new CloudRuntimeException("Only the following image types are currently supported: " + - ImageFormat.VHD.toString() + ", " + ImageFormat.OVA.toString() + ", " + ImageFormat.QCOW2.toString() + ", and " + ImageFormat.RAW.toString() + "(for PowerFlex)"); + !(imageFormat == ImageFormat.RAW && (StoragePoolType.PowerFlex == poolType || + StoragePoolType.FiberChannel == poolType))) { + throw new CloudRuntimeException(String.format("Only the following image types are currently supported: %s, %s, %s, %s (for PowerFlex and FiberChannel)", + ImageFormat.VHD.toString(), ImageFormat.OVA.toString(), ImageFormat.QCOW2.toString(), ImageFormat.RAW.toString())); } } @@ -685,14 +697,14 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { handleVolumeMigrationForXenServer(srcVolumeInfo, destVolumeInfo); } else { - handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo); + handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback); } } catch (Exception ex) { errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromNonManagedStorageToManagedStorage': " + ex.getMessage(); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { CopyCmdAnswer copyCmdAnswer; @@ -826,24 +838,73 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { _volumeDao.update(srcVolumeInfo.getId(), volumeVO); } - private void handleVolumeMigrationForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) { + private void handleVolumeMigrationForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, AsyncCompletionCallback callback) { VirtualMachine vm = srcVolumeInfo.getAttachedVM(); - if (vm != null && vm.getState() != VirtualMachine.State.Stopped) { - throw new CloudRuntimeException("Currently, if a volume to migrate from non-managed storage to managed storage on KVM is attached to " + - "a VM, the VM must be in the Stopped state."); + checkAvailableForMigration(vm); + + String errMsg = null; + try { + destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null); + VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId()); + updatePathFromScsiName(volumeVO); + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + HostVO hostVO = getHostOnWhichToExecuteMigrationCommand(srcVolumeInfo, destVolumeInfo); + + // migrate the volume via the hypervisor + String path = migrateVolumeForKVM(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage"); + + updateVolumePath(destVolumeInfo.getId(), path); + volumeVO = _volumeDao.findById(destVolumeInfo.getId()); + // only set this if it was not set. default to QCOW2 for KVM + if (volumeVO.getFormat() == null) { + volumeVO.setFormat(ImageFormat.QCOW2); + _volumeDao.update(volumeVO.getId(), volumeVO); + } + } catch (Exception ex) { + errMsg = "Primary storage migration failed due to an unexpected error: " + + ex.getMessage(); + if (ex instanceof CloudRuntimeException) { + throw ex; + } else { + throw new CloudRuntimeException(errMsg, ex); + } + } finally { + CopyCmdAnswer copyCmdAnswer; + if (errMsg != null) { + copyCmdAnswer = new CopyCmdAnswer(errMsg); + } + else { + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + DataTO dataTO = destVolumeInfo.getTO(); + copyCmdAnswer = new CopyCmdAnswer(dataTO); + } + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + result.setResult(errMsg); + callback.complete(result); } + } - destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null); + private void checkAvailableForMigration(VirtualMachine vm) { + if (vm != null && (vm.getState() != VirtualMachine.State.Stopped && vm.getState() != VirtualMachine.State.Migrating)) { + throw new CloudRuntimeException("Currently, if a volume to migrate from non-managed storage to managed storage on KVM is attached to " + + "a VM, the VM must be in the Stopped or Migrating state."); + } + } - VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId()); - - volumeVO.setPath(volumeVO.get_iScsiName()); - - _volumeDao.update(volumeVO.getId(), volumeVO); - - destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + /** + * Only update the path from the iscsiName if the iscsiName is set. Otherwise take no action to avoid nullifying the path + * with a previously set path value. + */ + private void updatePathFromScsiName(VolumeVO volumeVO) { + if (volumeVO.get_iScsiName() != null) { + volumeVO.setPath(volumeVO.get_iScsiName()); + _volumeDao.update(volumeVO.getId(), volumeVO); + } + } + private HostVO getHostOnWhichToExecuteMigrationCommand(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) { long srcStoragePoolId = srcVolumeInfo.getPoolId(); StoragePoolVO srcStoragePoolVO = _storagePoolDao.findById(srcStoragePoolId); @@ -856,14 +917,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { hostVO = getHost(destVolumeInfo.getDataCenterId(), HypervisorType.KVM, false); } - // migrate the volume via the hypervisor - migrateVolumeForKVM(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage"); - - volumeVO = _volumeDao.findById(destVolumeInfo.getId()); - - volumeVO.setFormat(ImageFormat.QCOW2); - - _volumeDao.update(volumeVO.getId(), volumeVO); + return hostVO; } /** @@ -1075,7 +1129,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { catch (Exception ex) { errMsg = ex.getMessage(); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { if (usingBackendSnapshot) { @@ -1293,7 +1347,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { catch (Exception ex) { errMsg = "Copy operation failed in 'StorageSystemDataMotionStrategy.handleCreateManagedVolumeFromNonManagedSnapshot': " + ex.getMessage(); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.NO_MIGRATION); @@ -1674,6 +1728,42 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { return copyCmdAnswer; } + /** + * Use normal volume semantics (create a volume known to cloudstack, ask the storage driver to create it as a copy of the snapshot) + + * @param volumeVO + * @param snapshotInfo + */ + public void prepTempVolumeForCopyFromSnapshot(SnapshotInfo snapshotInfo) { + VolumeVO volumeVO = null; + try { + volumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP", + snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, ""); + volumeVO.setPoolId(snapshotInfo.getDataStore().getId()); + _volumeDao.persist(volumeVO); + VolumeInfo tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); + + if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) { + snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null); + // refresh volume info as data could have changed + tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); + // save the "temp" volume info into the snapshot details (we need this to clean up at the end) + _snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID", tempVolumeInfo.getUuid(), true); + _snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyPath", tempVolumeInfo.getPath(), true); + // NOTE: for this to work, the Driver must return a custom SnapshotObjectTO object from getTO() + // whenever the TemporaryVolumeCopyPath is set. + } else { + throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so"); + } + } catch (Throwable e) { + // cleanup temporary volume + if (volumeVO != null) { + _volumeDao.remove(volumeVO.getId()); + } + throw e; + } + } + /** * If the underlying storage system is making use of read-only snapshots, this gives the storage system the opportunity to * create a volume from the snapshot so that we can copy the VHD file that should be inside of the snapshot to secondary storage. @@ -1685,8 +1775,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { * resign the SR and the VDI that should be inside of the snapshot before copying the VHD file to secondary storage. */ private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo) { - SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create"); + if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) { + prepTempVolumeForCopyFromSnapshot(snapshotInfo); + return; + } + + SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create"); try { snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); } @@ -1701,6 +1796,24 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { * invocation of createVolumeFromSnapshot(SnapshotInfo). */ private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) { + VolumeVO volumeVO = null; + // cleanup any temporary volume previously created for copy from a snapshot + if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) { + SnapshotDetailsVO tempUuid = null; + tempUuid = _snapshotDetailsDao.findDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID"); + if (tempUuid == null || tempUuid.getValue() == null) { + return; + } + + volumeVO = _volumeDao.findByUuid(tempUuid.getValue()); + if (volumeVO != null) { + _volumeDao.remove(volumeVO.getId()); + } + _snapshotDetailsDao.remove(tempUuid.getId()); + _snapshotDetailsDao.removeDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID"); + return; + } + SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete"); try { @@ -1884,9 +1997,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO); + Answer pfma; try { - Answer pfma = agentManager.send(destHost.getId(), pfmc); + pfma = agentManager.send(destHost.getId(), pfmc); if (pfma == null || !pfma.getResult()) { String details = pfma != null ? pfma.getDetails() : "null answer returned"; @@ -1894,8 +2008,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { throw new AgentUnavailableException(msg, destHost.getId()); } - } - catch (final OperationTimedoutException e) { + } catch (final OperationTimedoutException e) { throw new AgentUnavailableException("Operation timed out", destHost.getId()); } @@ -1911,6 +2024,12 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { migrateCommand.setMigrateStorageManaged(managedStorageDestination); migrateCommand.setMigrateNonSharedInc(migrateNonSharedInc); + Integer newVmCpuShares = ((PrepareForMigrationAnswer) pfma).getNewVmCpuShares(); + if (newVmCpuShares != null) { + LOGGER.debug(String.format("Setting CPU shares to [%d] as part of migrate VM with volumes command for VM [%s].", newVmCpuShares, vmTO)); + migrateCommand.setNewVmCpuShares(newVmCpuShares); + } + boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value(); migrateCommand.setAutoConvergence(kvmAutoConvergence); @@ -2363,7 +2482,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { try { StoragePoolVO storagePoolVO = _storagePoolDao.findById(volumeInfo.getPoolId()); - if (!ImageFormat.QCOW2.equals(volumeInfo.getFormat()) && !(ImageFormat.RAW.equals(volumeInfo.getFormat()) && StoragePoolType.PowerFlex == storagePoolVO.getPoolType())) { + if (!ImageFormat.QCOW2.equals(volumeInfo.getFormat()) && + !(ImageFormat.RAW.equals(volumeInfo.getFormat()) && ( + StoragePoolType.PowerFlex == storagePoolVO.getPoolType() || + StoragePoolType.FiberChannel == storagePoolVO.getPoolType()))) { throw new CloudRuntimeException("When using managed storage, you can only create a template from a volume on KVM currently."); } @@ -2506,7 +2628,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { long snapshotId = snapshotInfo.getId(); - if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex) { + // if the snapshot required a temporary volume be created check if the UUID is set so we can + // retrieve the temporary volume's path to use during remote copy + List storedDetails = _snapshotDetailsDao.findDetails(snapshotInfo.getId(), "TemporaryVolumeCopyPath"); + if (storedDetails != null && storedDetails.size() > 0) { + String value = storedDetails.get(0).getValue(); + snapshotDetails.put(DiskTO.PATH, value); + } else if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex || storagePoolVO.getPoolType() == StoragePoolType.FiberChannel) { snapshotDetails.put(DiskTO.IQN, snapshotInfo.getPath()); } else { snapshotDetails.put(DiskTO.IQN, getSnapshotProperty(snapshotId, DiskTO.IQN)); @@ -2718,8 +2846,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } private String migrateVolumeForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, HostVO hostVO, String errMsg) { - boolean srcVolumeDetached = srcVolumeInfo.getAttachedVM() == null; - try { Map srcDetails = getVolumeDetails(srcVolumeInfo); Map destDetails = getVolumeDetails(destVolumeInfo); @@ -2727,16 +2853,11 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { MigrateVolumeCommand migrateVolumeCommand = new MigrateVolumeCommand(srcVolumeInfo.getTO(), destVolumeInfo.getTO(), srcDetails, destDetails, StorageManager.KvmStorageOfflineMigrationWait.value()); - if (srcVolumeDetached) { - _volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); - } - + _volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); - _volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); MigrateVolumeAnswer migrateVolumeAnswer = (MigrateVolumeAnswer)agentManager.send(hostVO.getId(), migrateVolumeCommand); - if (migrateVolumeAnswer == null || !migrateVolumeAnswer.getResult()) { if (migrateVolumeAnswer != null && StringUtils.isNotEmpty(migrateVolumeAnswer.getDetails())) { throw new CloudRuntimeException(migrateVolumeAnswer.getDetails()); @@ -2745,42 +2866,22 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { throw new CloudRuntimeException(errMsg); } } - - if (srcVolumeDetached) { - _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); - } - - try { - _volumeService.revokeAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); - } - catch (Exception e) { - // This volume should be deleted soon, so just log a warning here. - LOGGER.warn(e.getMessage(), e); - } - return migrateVolumeAnswer.getVolumePath(); - } - catch (Exception ex) { + } catch (CloudRuntimeException ex) { + throw ex; + } catch (Exception ex) { + throw new CloudRuntimeException("Unexpected error during volume migration: " + ex.getMessage(), ex); + } finally { try { - _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); - } - catch (Exception e) { - // This volume should be deleted soon, so just log a warning here. - LOGGER.warn(e.getMessage(), e); - } - - if (srcVolumeDetached) { _volumeService.revokeAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); + _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.NO_MIGRATION); + } catch (Throwable e) { + LOGGER.warn("During cleanup post-migration and exception occured: " + e); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Exception during post-migration cleanup.", e); + } } - - String msg = "Failed to perform volume migration : "; - - LOGGER.warn(msg, ex); - - throw new CloudRuntimeException(msg + ex.getMessage(), ex); - } - finally { - handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.NO_MIGRATION); } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index ffc12b98c84..c0ef227251c 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -882,9 +882,7 @@ public class VolumeServiceImpl implements VolumeService { */ private TemplateInfo createManagedTemplateVolume(TemplateInfo srcTemplateInfo, PrimaryDataStore destPrimaryDataStore) { // create a template volume on primary storage - AsyncCallFuture createTemplateFuture = new AsyncCallFuture<>(); TemplateInfo templateOnPrimary = (TemplateInfo)destPrimaryDataStore.create(srcTemplateInfo, srcTemplateInfo.getDeployAsIsConfiguration()); - VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), srcTemplateInfo.getDeployAsIsConfiguration()); if (templatePoolRef == null) { @@ -897,7 +895,6 @@ public class VolumeServiceImpl implements VolumeService { // At this point, we have an entry in the DB that points to our cached template. // We need to lock it as there may be other VMs that may get started using the same template. // We want to avoid having to create multiple cache copies of the same template. - int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600); long templatePoolRefId = templatePoolRef.getId(); @@ -909,28 +906,27 @@ public class VolumeServiceImpl implements VolumeService { try { // create a cache volume on the back-end - templateOnPrimary.processEvent(Event.CreateOnlyRequested); + CreateAsyncCompleteCallback callback = new CreateAsyncCompleteCallback(); - CreateVolumeContext createContext = new CreateVolumeContext<>(null, templateOnPrimary, createTemplateFuture); - AsyncCallbackDispatcher createCaller = AsyncCallbackDispatcher.create(this); - - createCaller.setCallback(createCaller.getTarget().createManagedTemplateImageCallback(null, null)).setContext(createContext); - - destPrimaryDataStore.getDriver().createAsync(destPrimaryDataStore, templateOnPrimary, createCaller); - - VolumeApiResult result = createTemplateFuture.get(); - - if (result.isFailed()) { - String errMesg = result.getResult(); - + destPrimaryDataStore.getDriver().createAsync(destPrimaryDataStore, templateOnPrimary, callback); + // validate we got a good result back + if (callback.result == null || callback.result.isFailed()) { + String errMesg; + if (callback.result == null) { + errMesg = "Unknown/unable to determine result"; + } else { + errMesg = callback.result.getResult(); + } + templateOnPrimary.processEvent(Event.OperationFailed); throw new CloudRuntimeException("Unable to create template " + templateOnPrimary.getId() + " on primary storage " + destPrimaryDataStore.getId() + ":" + errMesg); } + + templateOnPrimary.processEvent(Event.OperationSuccessed); + } catch (Throwable e) { s_logger.debug("Failed to create template volume on storage", e); - templateOnPrimary.processEvent(Event.OperationFailed); - throw new CloudRuntimeException(e.getMessage()); } finally { _tmpltPoolDao.releaseFromLockTable(templatePoolRefId); @@ -939,6 +935,17 @@ public class VolumeServiceImpl implements VolumeService { return templateOnPrimary; } + private static class CreateAsyncCompleteCallback implements AsyncCompletionCallback { + + public CreateCmdResult result; + + @Override + public void complete(CreateCmdResult result) { + this.result = result; + } + + } + /** * This function copies a template from secondary storage to a template volume * created on managed storage. This template volume will be used as a cache. @@ -1464,6 +1471,16 @@ public class VolumeServiceImpl implements VolumeService { if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { copyTemplateToManagedTemplateVolume(srcTemplateInfo, templateOnPrimary, templatePoolRef, destPrimaryDataStore, destHost); } + } catch (Exception e) { + if (templateOnPrimary != null) { + templateOnPrimary.processEvent(Event.OperationFailed); + } + VolumeApiResult result = new VolumeApiResult(volumeInfo); + result.setResult(e.getLocalizedMessage()); + result.setSuccess(false); + future.complete(result); + s_logger.warn("Failed to create template on primary storage", e); + return future; } finally { if (lock != null) { lock.unlock(); @@ -1478,8 +1495,8 @@ public class VolumeServiceImpl implements VolumeService { createManagedVolumeCloneTemplateAsync(volumeInfo, templateOnPrimary, destPrimaryDataStore, future); } else { // We have a template on PowerFlex primary storage. Create new volume and copy to it. - s_logger.debug("Copying the template to the volume on primary storage"); - createManagedVolumeCopyManagedTemplateAsync(volumeInfo, destPrimaryDataStore, templateOnPrimary, destHost, future); + createManagedVolumeCopyManagedTemplateAsyncWithLock(volumeInfo, destPrimaryDataStore, templateOnPrimary, + destHost, future, destDataStoreId, srcTemplateInfo.getId()); } } else { s_logger.debug("Primary storage does not support cloning or no support for UUID resigning on the host side; copying the template normally"); @@ -1490,6 +1507,32 @@ public class VolumeServiceImpl implements VolumeService { return future; } + private void createManagedVolumeCopyManagedTemplateAsyncWithLock(VolumeInfo volumeInfo, PrimaryDataStore destPrimaryDataStore, TemplateInfo templateOnPrimary, + Host destHost, AsyncCallFuture future, long destDataStoreId, long srcTemplateId) { + GlobalLock lock = null; + try { + String tmplIdManagedPoolIdDestinationHostLockString = "tmplId:" + srcTemplateId + "managedPoolId:" + destDataStoreId + "destinationHostId:" + destHost.getId(); + lock = GlobalLock.getInternLock(tmplIdManagedPoolIdDestinationHostLockString); + if (lock == null) { + throw new CloudRuntimeException("Unable to create volume from template, couldn't get global lock on " + tmplIdManagedPoolIdDestinationHostLockString); + } + + int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600); + if (!lock.lock(storagePoolMaxWaitSeconds)) { + s_logger.debug("Unable to create volume from template, couldn't lock on " + tmplIdManagedPoolIdDestinationHostLockString); + throw new CloudRuntimeException("Unable to create volume from template, couldn't lock on " + tmplIdManagedPoolIdDestinationHostLockString); + } + + s_logger.debug("Copying the template to the volume on primary storage"); + createManagedVolumeCopyManagedTemplateAsync(volumeInfo, destPrimaryDataStore, templateOnPrimary, destHost, future); + } finally { + if (lock != null) { + lock.unlock(); + lock.releaseRef(); + } + } + } + private boolean computeSupportsVolumeClone(long zoneId, HypervisorType hypervisorType) { if (HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType)) { return true; diff --git a/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java b/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java index 9363ebd2379..0306a062df9 100644 --- a/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java +++ b/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java @@ -61,7 +61,9 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP @Override public boolean isEnabled() { if (!roleService.isEnabled()) { - LOGGER.trace("RoleService is disabled. We will not use ProjectRoleBasedApiAccessChecker."); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("RoleService is disabled. We will not use ProjectRoleBasedApiAccessChecker."); + } } return roleService.isEnabled(); } @@ -119,7 +121,9 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP Account userAccount = accountService.getAccount(user.getAccountId()); if (accountService.isRootAdmin(userAccount.getId()) || accountService.isDomainAdmin(userAccount.getAccountId())) { - LOGGER.info(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", userAccount.getAccountName())); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", userAccount.getAccountName())); + } return true; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 5d4b29a9b4f..60e6bcffeb6 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -73,6 +73,7 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang.BooleanUtils; import org.apache.commons.lang.math.NumberUtils; +import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; import org.apache.xerces.impl.xpath.regex.Match; @@ -485,6 +486,14 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv */ private static final String COMMAND_SET_MEM_BALLOON_STATS_PERIOD = "virsh dommemstat %s --period %s --live"; + private static int hostCpuMaxCapacity = 0; + + private static final int CGROUP_V2_UPPER_LIMIT = 10000; + + private static final String COMMAND_GET_CGROUP_HOST_VERSION = "stat -fc %T /sys/fs/cgroup/"; + + public static final String CGROUP_V2 = "cgroup2fs"; + protected long getHypervisorLibvirtVersion() { return hypervisorLibvirtVersion; } @@ -565,6 +574,18 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return new ExecutionResult(true, null); } + /** + * @return the host CPU max capacity according to the method {@link LibvirtComputingResource#calculateHostCpuMaxCapacity(int, Long)}; if the host utilizes cgroup v1, this + * value is 0. + */ + public int getHostCpuMaxCapacity() { + return hostCpuMaxCapacity; + } + + public void setHostCpuMaxCapacity(int hostCpuMaxCapacity) { + LibvirtComputingResource.hostCpuMaxCapacity = hostCpuMaxCapacity; + } + public LibvirtKvmAgentHook getTransformer() throws IOException { return new LibvirtKvmAgentHook(agentHooksBasedir, agentHooksLibvirtXmlScript, agentHooksLibvirtXmlMethod); } @@ -1044,7 +1065,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } } - enableSSLForKvmAgent(params); + enableSSLForKvmAgent(); configureLocalStorage(); /* Directory to use for Qemu sockets like for the Qemu Guest Agent */ @@ -1353,13 +1374,13 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } } - private void enableSSLForKvmAgent(final Map params) { + private void enableSSLForKvmAgent() { final File keyStoreFile = PropertiesUtil.findConfigFile(KeyStoreUtils.KS_FILENAME); if (keyStoreFile == null) { s_logger.info("Failed to find keystore file: " + KeyStoreUtils.KS_FILENAME); return; } - String keystorePass = (String)params.get(KeyStoreUtils.KS_PASSPHRASE_PROPERTY); + String keystorePass = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KEYSTORE_PASSPHRASE); if (StringUtils.isBlank(keystorePass)) { s_logger.info("Failed to find passphrase for keystore: " + KeyStoreUtils.KS_FILENAME); return; @@ -2274,7 +2295,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return new Pair, Integer>(macAddressToNicNum, devNum); } - protected PowerState convertToPowerState(final DomainState ps) { + public PowerState convertToPowerState(final DomainState ps) { final PowerState state = POWER_STATES_TABLE.get(ps); return state == null ? PowerState.PowerUnknown : state; } @@ -2707,12 +2728,41 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv */ protected CpuTuneDef createCpuTuneDef(VirtualMachineTO vmTO) { CpuTuneDef ctd = new CpuTuneDef(); - int shares = vmTO.getCpus() * (vmTO.getMinSpeed() != null ? vmTO.getMinSpeed() : vmTO.getSpeed()); - ctd.setShares(shares); + ctd.setShares(calculateCpuShares(vmTO)); setQuotaAndPeriod(vmTO, ctd); return ctd; } + /** + * Calculates the VM CPU shares considering the cgroup version of the host. + *
    + *
  • + * If the host utilize cgroup v1, then, the CPU shares is calculated as VM CPU shares = CPU cores * CPU frequency. + *
  • + *
  • + * If the host utilize cgroup v2, the CPU shares calculation considers the cgroup v2 upper limit of 10,000, and a linear scale conversion is applied + * considering the maximum host CPU shares (i.e. using the number of CPU cores and CPU nominal frequency of the host). Therefore, the VM CPU shares is calculated as + * VM CPU shares = (VM requested shares * cgroup upper limit) / host max shares. + *
  • + *
+ */ + public int calculateCpuShares(VirtualMachineTO vmTO) { + int vCpus = vmTO.getCpus(); + int cpuSpeed = ObjectUtils.defaultIfNull(vmTO.getMinSpeed(), vmTO.getSpeed()); + int requestedCpuShares = vCpus * cpuSpeed; + int hostCpuMaxCapacity = getHostCpuMaxCapacity(); + + if (hostCpuMaxCapacity > 0) { + int updatedCpuShares = (int) Math.ceil((requestedCpuShares * CGROUP_V2_UPPER_LIMIT) / (double) hostCpuMaxCapacity); + s_logger.debug(String.format("This host utilizes cgroupv2 (as the max shares value is [%s]), thus, the VM requested shares of [%s] will be converted to " + + "consider the host limits; the new CPU shares value is [%s].", hostCpuMaxCapacity, requestedCpuShares, updatedCpuShares)); + return updatedCpuShares; + } + s_logger.debug(String.format("This host does not have a maximum CPU shares set; therefore, this host utilizes cgroupv1 and the VM requested CPU shares [%s] will not be " + + "converted.", requestedCpuShares)); + return requestedCpuShares; + } + private CpuModeDef createCpuModeDef(VirtualMachineTO vmTO, int vcpus) { final CpuModeDef cmd = new CpuModeDef(); cmd.setMode(guestCpuMode); @@ -3548,8 +3598,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv @Override public StartupCommand[] initialize() { - final KVMHostInfo info = new KVMHostInfo(dom0MinMem, dom0OvercommitMem, manualCpuSpeed, dom0MinCpuCores); + calculateHostCpuMaxCapacity(info.getAllocatableCpus(), info.getCpuSpeed()); String capabilities = String.join(",", info.getCapabilities()); if (dpdkSupport) { @@ -3597,6 +3647,32 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return startupCommandsArray; } + /** + * Calculates and sets the host CPU max capacity according to the cgroup version of the host. + *
    + *
  • + * cgroup v1: the max CPU capacity for the host is set to 0. + *
  • + *
  • + * cgroup v2: the max CPU capacity for the host is the value of cpuCores * cpuSpeed. + *
  • + *
+ */ + protected void calculateHostCpuMaxCapacity(int cpuCores, Long cpuSpeed) { + String output = Script.runSimpleBashScript(COMMAND_GET_CGROUP_HOST_VERSION); + s_logger.info(String.format("Host uses control group [%s].", output)); + + if (!CGROUP_V2.equals(output)) { + s_logger.info(String.format("Setting host CPU max capacity to 0, as it uses cgroup v1.", getHostCpuMaxCapacity())); + setHostCpuMaxCapacity(0); + return; + } + + s_logger.info(String.format("Calculating the max shares of the host.")); + setHostCpuMaxCapacity(cpuCores * cpuSpeed.intValue()); + s_logger.info(String.format("The max shares of the host is [%d].", getHostCpuMaxCapacity())); + } + private StartupStorageCommand createLocalStoragePool(String localStoragePath, String localStorageUUID, StartupRoutingCommand cmd) { StartupStorageCommand sscmd = null; try { @@ -3701,7 +3777,39 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } } - protected List getAllVmNames(final Connect conn) { + /** + * Given a disk path on KVM host, attempts to find source host and path using mount command + * @param diskPath KVM host path for virtual disk + * @return Pair with IP of host and path + */ + public Pair getSourceHostPath(String diskPath) { + String sourceHostIp = null; + String sourcePath = null; + try { + String mountResult = Script.runSimpleBashScript("mount | grep \"" + diskPath + "\""); + s_logger.debug("Got mount result for " + diskPath + "\n\n" + mountResult); + if (StringUtils.isNotEmpty(mountResult)) { + String[] res = mountResult.strip().split(" "); + if (res[0].contains(":")) { + res = res[0].split(":"); + sourceHostIp = res[0].strip(); + sourcePath = res[1].strip(); + } else { + // Assume local storage + sourceHostIp = getPrivateIp(); + sourcePath = diskPath; + } + } + if (StringUtils.isNotEmpty(sourceHostIp) && StringUtils.isNotEmpty(sourcePath)) { + return new Pair<>(sourceHostIp, sourcePath); + } + } catch (Exception ex) { + s_logger.warn("Failed to list source host and IP for " + diskPath + ex.toString()); + } + return null; + } + + public List getAllVmNames(final Connect conn) { final ArrayList la = new ArrayList(); try { final String names[] = conn.listDefinedDomains(); @@ -5263,4 +5371,25 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } } } + + /* + Scp volume from remote host to local directory + */ + public String copyVolume(String srcIp, String username, String password, String localDir, String remoteFile, String tmpPath) { + try { + String outputFile = UUID.randomUUID().toString(); + StringBuilder command = new StringBuilder("qemu-img convert -O qcow2 "); + command.append(remoteFile); + command.append(" "+tmpPath); + command.append(outputFile); + s_logger.debug("Converting remoteFile: "+remoteFile); + SshHelper.sshExecute(srcIp, 22, username, null, password, command.toString()); + s_logger.debug("Copying remoteFile to: "+localDir); + SshHelper.scpFrom(srcIp, 22, username, null, password, localDir, tmpPath+outputFile); + s_logger.debug("Successfully copyied remoteFile to: "+localDir+"/"+outputFile); + return outputFile; + } catch (Exception e) { + throw new RuntimeException(e); + } + } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java index a5565c2de34..f165796adef 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java @@ -57,8 +57,10 @@ public class LibvirtDomainXMLParser { private final List channels = new ArrayList(); private final List watchDogDefs = new ArrayList(); private Integer vncPort; + private String vncPasswd; private String desc; - + private LibvirtVMDef.CpuTuneDef cpuTuneDef; + private LibvirtVMDef.CpuModeDef cpuModeDef; private String name; public boolean parseDomainXML(String domXML) { @@ -278,6 +280,14 @@ public class LibvirtDomainXMLParser { String name = getAttrValue("target", "name", channel); String state = getAttrValue("target", "state", channel); + if (ChannelDef.ChannelType.valueOf(type.toUpperCase()).equals(ChannelDef.ChannelType.SPICEVMC)) { + continue; + } + + if (path == null) { + path = ""; + } + ChannelDef def = null; if (StringUtils.isBlank(state)) { def = new ChannelDef(name, ChannelDef.ChannelType.valueOf(type.toUpperCase()), new File(path)); @@ -305,6 +315,12 @@ public class LibvirtDomainXMLParser { vncPort = null; } } + + String passwd = graphic.getAttribute("passwd"); + if (passwd != null) { + vncPasswd = passwd; + } + } NodeList rngs = devices.getElementsByTagName("rng"); @@ -317,6 +333,26 @@ public class LibvirtDomainXMLParser { String period = getAttrValue("rate", "period", rng); if (StringUtils.isAnyEmpty(bytes, period)) { s_logger.debug(String.format("Bytes and period in the rng section should not be null, please check the VM %s", name)); + } + + if (bytes == null) { + bytes = "0"; + } + + if (period == null) { + period = "0"; + } + + if (bytes == null) { + bytes = "0"; + } + + if (period == null) { + period = "0"; + } + + if (StringUtils.isEmpty(backendModel)) { + def = new RngDef(path, Integer.parseInt(bytes), Integer.parseInt(period)); } else { if (StringUtils.isEmpty(backendModel)) { def = new RngDef(path, Integer.parseInt(bytes), Integer.parseInt(period)); @@ -350,7 +386,8 @@ public class LibvirtDomainXMLParser { watchDogDefs.add(def); } - + extractCpuTuneDef(rootElement); + extractCpuModeDef(rootElement); return true; } catch (ParserConfigurationException e) { s_logger.debug(e.toString()); @@ -411,6 +448,10 @@ public class LibvirtDomainXMLParser { return interfaces; } + public String getVncPasswd() { + return vncPasswd; + } + public MemBalloonDef getMemBalloon() { return memBalloonDef; } @@ -438,4 +479,65 @@ public class LibvirtDomainXMLParser { public String getName() { return name; } + + public LibvirtVMDef.CpuTuneDef getCpuTuneDef() { + return cpuTuneDef; + } + + public LibvirtVMDef.CpuModeDef getCpuModeDef() { + return cpuModeDef; + } + + private void extractCpuTuneDef(final Element rootElement) { + NodeList cpuTunesList = rootElement.getElementsByTagName("cputune"); + if (cpuTunesList.getLength() > 0) { + cpuTuneDef = new LibvirtVMDef.CpuTuneDef(); + final Element cpuTuneDefElement = (Element) cpuTunesList.item(0); + final String cpuShares = getTagValue("shares", cpuTuneDefElement); + if (StringUtils.isNotBlank(cpuShares)) { + cpuTuneDef.setShares((Integer.parseInt(cpuShares))); + } + + final String quota = getTagValue("quota", cpuTuneDefElement); + if (StringUtils.isNotBlank(quota)) { + cpuTuneDef.setQuota((Integer.parseInt(quota))); + } + + final String period = getTagValue("period", cpuTuneDefElement); + if (StringUtils.isNotBlank(period)) { + cpuTuneDef.setPeriod((Integer.parseInt(period))); + } + } + } + + private void extractCpuModeDef(final Element rootElement){ + NodeList cpuModeList = rootElement.getElementsByTagName("cpu"); + if (cpuModeList.getLength() > 0){ + cpuModeDef = new LibvirtVMDef.CpuModeDef(); + final Element cpuModeDefElement = (Element) cpuModeList.item(0); + final String cpuModel = getTagValue("model", cpuModeDefElement); + if (StringUtils.isNotBlank(cpuModel)){ + cpuModeDef.setModel(cpuModel); + } + NodeList cpuFeatures = cpuModeDefElement.getElementsByTagName("features"); + if (cpuFeatures.getLength() > 0) { + final ArrayList features = new ArrayList<>(cpuFeatures.getLength()); + for (int i = 0; i < cpuFeatures.getLength(); i++) { + final Element feature = (Element)cpuFeatures.item(i); + final String policy = feature.getAttribute("policy"); + String featureName = feature.getAttribute("name"); + if ("disable".equals(policy)) { + featureName = "-" + featureName; + } + features.add(featureName); + } + cpuModeDef.setFeatures(features); + } + final String sockets = getAttrValue("topology", "sockets", cpuModeDefElement); + final String cores = getAttrValue("topology", "cores", cpuModeDefElement); + if (StringUtils.isNotBlank(sockets) && StringUtils.isNotBlank(cores)) { + cpuModeDef.setTopology(Integer.parseInt(cores), Integer.parseInt(sockets)); + } + } + } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java index d31a6ab38db..6b5fac0e942 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java @@ -1072,6 +1072,18 @@ public class LibvirtVMDef { public LibvirtDiskEncryptDetails getLibvirtDiskEncryptDetails() { return this.encryptDetails; } + public String getSourceHost() { + return _sourceHost; + } + + public int getSourceHostPort() { + return _sourcePort; + } + + public String getSourcePath() { + return _sourcePath; + } + @Override public String toString() { StringBuilder diskBuilder = new StringBuilder(); @@ -1737,6 +1749,10 @@ public class LibvirtVMDef { modeBuilder.append(""); return modeBuilder.toString(); } + + public int getCoresPerSocket() { + return _coresPerSocket; + } } public static class SerialDef { @@ -1793,7 +1809,7 @@ public class LibvirtVMDef { public final static class ChannelDef { enum ChannelType { - UNIX("unix"), SERIAL("serial"); + UNIX("unix"), SERIAL("serial"), SPICEVMC("spicevmc"); String type; ChannelType(String type) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckVolumeCommandWrapper.java new file mode 100644 index 00000000000..8b0a5aab461 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckVolumeCommandWrapper.java @@ -0,0 +1,86 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CheckVolumeAnswer; +import com.cloud.agent.api.CheckVolumeCommand; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.storage.Storage; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.log4j.Logger; +import org.libvirt.LibvirtException; + +import java.util.Map; + +@ResourceWrapper(handles = CheckVolumeCommand.class) +public final class LibvirtCheckVolumeCommandWrapper extends CommandWrapper { + + private static final Logger s_logger = Logger.getLogger(LibvirtCheckVolumeCommandWrapper.class); + + @Override + public Answer execute(final CheckVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) { + String result = null; + String srcFile = command.getSrcFile(); + StorageFilerTO storageFilerTO = command.getStorageFilerTO(); + KVMStoragePoolManager poolMgr = libvirtComputingResource.getStoragePoolMgr(); + KVMStoragePool pool = poolMgr.getStoragePool(storageFilerTO.getType(), storageFilerTO.getUuid()); + + try { + if (storageFilerTO.getType() == Storage.StoragePoolType.Filesystem || + storageFilerTO.getType() == Storage.StoragePoolType.NetworkFilesystem) { + final KVMPhysicalDisk vol = pool.getPhysicalDisk(srcFile); + final String path = vol.getPath(); + long size = getVirtualSizeFromFile(path); + return new CheckVolumeAnswer(command, "", size); + } else { + return new Answer(command, false, "Unsupported Storage Pool"); + } + + } catch (final Exception e) { + s_logger.error("Error while locating disk: "+ e.getMessage()); + return new Answer(command, false, result); + } + } + + private long getVirtualSizeFromFile(String path) { + try { + QemuImg qemu = new QemuImg(0); + QemuImgFile qemuFile = new QemuImgFile(path); + Map info = qemu.info(qemuFile); + if (info.containsKey(QemuImg.VIRTUAL_SIZE)) { + return Long.parseLong(info.get(QemuImg.VIRTUAL_SIZE)); + } else { + throw new CloudRuntimeException("Unable to determine virtual size of volume at path " + path); + } + } catch (QemuImgException | LibvirtException ex) { + throw new CloudRuntimeException("Error when inspecting volume at path " + path, ex); + } + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java new file mode 100644 index 00000000000..e48edd8eec0 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java @@ -0,0 +1,93 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CopyRemoteVolumeAnswer; +import com.cloud.agent.api.CopyRemoteVolumeCommand; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.storage.Storage; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.log4j.Logger; +import org.libvirt.LibvirtException; + +import java.util.Map; + +@ResourceWrapper(handles = CopyRemoteVolumeCommand.class) +public final class LibvirtCopyRemoteVolumeCommandWrapper extends CommandWrapper { + + private static final Logger s_logger = Logger.getLogger(LibvirtCopyRemoteVolumeCommandWrapper.class); + + @Override + public Answer execute(final CopyRemoteVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) { + String result = null; + String srcIp = command.getRemoteIp(); + String username = command.getUsername(); + String password = command.getPassword(); + String srcFile = command.getSrcFile(); + StorageFilerTO storageFilerTO = command.getStorageFilerTO(); + String tmpPath = command.getTmpPath(); + KVMStoragePoolManager poolMgr = libvirtComputingResource.getStoragePoolMgr(); + KVMStoragePool pool = poolMgr.getStoragePool(storageFilerTO.getType(), storageFilerTO.getUuid()); + String dstPath = pool.getLocalPath(); + + try { + if (storageFilerTO.getType() == Storage.StoragePoolType.Filesystem || + storageFilerTO.getType() == Storage.StoragePoolType.NetworkFilesystem) { + String filename = libvirtComputingResource.copyVolume(srcIp, username, password, dstPath, srcFile, tmpPath); + s_logger.debug("Volume Copy Successful"); + final KVMPhysicalDisk vol = pool.getPhysicalDisk(filename); + final String path = vol.getPath(); + long size = getVirtualSizeFromFile(path); + return new CopyRemoteVolumeAnswer(command, "", filename, size); + } else { + return new Answer(command, false, "Unsupported Storage Pool"); + } + + } catch (final Exception e) { + s_logger.error("Error while copying file from remote host: "+ e.getMessage()); + return new Answer(command, false, result); + } + } + + private long getVirtualSizeFromFile(String path) { + try { + QemuImg qemu = new QemuImg(0); + QemuImgFile qemuFile = new QemuImgFile(path); + Map info = qemu.info(qemuFile); + if (info.containsKey(QemuImg.VIRTUAL_SIZE)) { + return Long.parseLong(info.get(QemuImg.VIRTUAL_SIZE)); + } else { + throw new CloudRuntimeException("Unable to determine virtual size of volume at path " + path); + } + } catch (QemuImgException | LibvirtException ex) { + throw new CloudRuntimeException("Error when inspecting volume at path " + path, ex); + } + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java new file mode 100644 index 00000000000..700f058b59b --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java @@ -0,0 +1,194 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.GetRemoteVmsAnswer; +import com.cloud.agent.api.GetRemoteVmsCommand; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.resource.LibvirtConnection; +import com.cloud.hypervisor.kvm.resource.LibvirtDomainXMLParser; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.vm.UnmanagedInstanceTO; +import org.apache.log4j.Logger; +import org.libvirt.Connect; +import org.libvirt.Domain; +import org.libvirt.DomainBlockInfo; +import org.libvirt.DomainInfo; +import org.libvirt.LibvirtException; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +@ResourceWrapper(handles = GetRemoteVmsCommand.class) +public final class LibvirtGetRemoteVmsCommandWrapper extends CommandWrapper { + + private static final Logger s_logger = Logger.getLogger(LibvirtGetRemoteVmsCommandWrapper.class); + + @Override + public Answer execute(final GetRemoteVmsCommand command, final LibvirtComputingResource libvirtComputingResource) { + String result = null; + String hypervisorURI = "qemu+tcp://" + command.getRemoteIp() + + "/system"; + HashMap unmanagedInstances = new HashMap<>(); + try { + Connect conn = LibvirtConnection.getConnection(hypervisorURI); + final List allVmNames = libvirtComputingResource.getAllVmNames(conn); + for (String name : allVmNames) { + final Domain domain = libvirtComputingResource.getDomain(conn, name); + + final DomainInfo.DomainState ps = domain.getInfo().state; + + final VirtualMachine.PowerState state = libvirtComputingResource.convertToPowerState(ps); + + s_logger.debug("VM " + domain.getName() + ": powerstate = " + ps + "; vm state=" + state.toString()); + + if (state == VirtualMachine.PowerState.PowerOff) { + try { + UnmanagedInstanceTO instance = getUnmanagedInstance(libvirtComputingResource, domain, conn); + unmanagedInstances.put(instance.getName(), instance); + } catch (Exception e) { + s_logger.error("Error while fetching instance details", e); + } + } + domain.free(); + } + s_logger.debug("Found Vms: "+ unmanagedInstances.size()); + return new GetRemoteVmsAnswer(command, "", unmanagedInstances); + } catch (final LibvirtException e) { + s_logger.error("Error while listing stopped Vms on remote host: "+ e.getMessage()); + return new Answer(command, false, result); + } + } + + private UnmanagedInstanceTO getUnmanagedInstance(LibvirtComputingResource libvirtComputingResource, Domain domain, Connect conn) { + try { + final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser(); + parser.parseDomainXML(domain.getXMLDesc(1)); + + final UnmanagedInstanceTO instance = new UnmanagedInstanceTO(); + instance.setName(domain.getName()); + if (parser.getCpuModeDef() != null) { + instance.setCpuCoresPerSocket(parser.getCpuModeDef().getCoresPerSocket()); + } + Long memory = domain.getMaxMemory(); + instance.setMemory(memory.intValue()/1024); + if (parser.getCpuTuneDef() !=null) { + instance.setCpuSpeed(parser.getCpuTuneDef().getShares()); + } + instance.setPowerState(getPowerState(libvirtComputingResource.getVmState(conn,domain.getName()))); + instance.setNics(getUnmanagedInstanceNics(parser.getInterfaces())); + instance.setDisks(getUnmanagedInstanceDisks(parser.getDisks(),libvirtComputingResource, domain)); + instance.setVncPassword(parser.getVncPasswd() + "aaaaaaaaaaaaaa"); // Suffix back extra characters for DB compatibility + + return instance; + } catch (Exception e) { + s_logger.debug("Unable to retrieve unmanaged instance info. ", e); + throw new CloudRuntimeException("Unable to retrieve unmanaged instance info. " + e.getMessage()); + } + } + + private UnmanagedInstanceTO.PowerState getPowerState(VirtualMachine.PowerState vmPowerState) { + switch (vmPowerState) { + case PowerOn: + return UnmanagedInstanceTO.PowerState.PowerOn; + case PowerOff: + return UnmanagedInstanceTO.PowerState.PowerOff; + default: + return UnmanagedInstanceTO.PowerState.PowerUnknown; + + } + } + + private List getUnmanagedInstanceNics(List interfaces) { + final ArrayList nics = new ArrayList<>(interfaces.size()); + int counter = 0; + for (LibvirtVMDef.InterfaceDef interfaceDef : interfaces) { + final UnmanagedInstanceTO.Nic nic = new UnmanagedInstanceTO.Nic(); + nic.setNicId(String.valueOf(counter++)); + nic.setMacAddress(interfaceDef.getMacAddress()); + nic.setAdapterType(interfaceDef.getModel().toString()); + nic.setNetwork(interfaceDef.getDevName()); + nic.setPciSlot(interfaceDef.getSlot().toString()); + nic.setVlan(interfaceDef.getVlanTag()); + nics.add(nic); + } + return nics; + } + + private List getUnmanagedInstanceDisks(List disksInfo, + LibvirtComputingResource libvirtComputingResource, + Domain dm){ + final ArrayList disks = new ArrayList<>(disksInfo.size()); + int counter = 0; + for (LibvirtVMDef.DiskDef diskDef : disksInfo) { + if (diskDef.getDeviceType() != LibvirtVMDef.DiskDef.DeviceType.DISK) { + continue; + } + + final UnmanagedInstanceTO.Disk disk = new UnmanagedInstanceTO.Disk(); + + disk.setPosition(counter); + + Long size; + try { + DomainBlockInfo blockInfo = dm.blockInfo(diskDef.getSourcePath()); + size = blockInfo.getCapacity(); + } catch (LibvirtException e) { + throw new RuntimeException(e); + } + + disk.setCapacity(size); + disk.setDiskId(String.valueOf(counter++)); + disk.setLabel(diskDef.getDiskLabel()); + disk.setController(diskDef.getBusType().toString()); + + + Pair sourceHostPath = getSourceHostPath(libvirtComputingResource, diskDef.getSourcePath()); + if (sourceHostPath != null) { + disk.setDatastoreHost(sourceHostPath.first()); + disk.setDatastorePath(sourceHostPath.second()); + } else { + disk.setDatastorePath(diskDef.getSourcePath()); + disk.setDatastoreHost(diskDef.getSourceHost()); + } + + disk.setDatastoreType(diskDef.getDiskType().toString()); + disk.setDatastorePort(diskDef.getSourceHostPort()); + disks.add(disk); + } + return disks; + } + + private Pair getSourceHostPath(LibvirtComputingResource libvirtComputingResource, String diskPath) { + int pathEnd = diskPath.lastIndexOf("/"); + if (pathEnd >= 0) { + diskPath = diskPath.substring(0, pathEnd); + return libvirtComputingResource.getSourceHostPath(diskPath); + } + return null; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetUnmanagedInstancesCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetUnmanagedInstancesCommandWrapper.java new file mode 100644 index 00000000000..a2d84063d74 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetUnmanagedInstancesCommandWrapper.java @@ -0,0 +1,227 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.GetUnmanagedInstancesAnswer; +import com.cloud.agent.api.GetUnmanagedInstancesCommand; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.resource.LibvirtDomainXMLParser; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.cloudstack.vm.UnmanagedInstanceTO; +import org.apache.commons.lang3.StringUtils; +import org.apache.log4j.Logger; +import org.libvirt.Connect; +import org.libvirt.Domain; +import org.libvirt.LibvirtException; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +@ResourceWrapper(handles=GetUnmanagedInstancesCommand.class) +public final class LibvirtGetUnmanagedInstancesCommandWrapper extends CommandWrapper { + private static final Logger LOGGER = Logger.getLogger(LibvirtGetUnmanagedInstancesCommandWrapper.class); + + @Override + public GetUnmanagedInstancesAnswer execute(GetUnmanagedInstancesCommand command, LibvirtComputingResource libvirtComputingResource) { + LOGGER.info("Fetching unmanaged instance on host"); + + HashMap unmanagedInstances = new HashMap<>(); + try { + final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper(); + final Connect conn = libvirtUtilitiesHelper.getConnection(); + final List domains = getDomains(command, libvirtComputingResource, conn); + + for (Domain domain : domains) { + UnmanagedInstanceTO instance = getUnmanagedInstance(libvirtComputingResource, domain, conn); + if (instance != null) { + unmanagedInstances.put(instance.getName(), instance); + domain.free(); + } + } + } catch (Exception e) { + String err = String.format("Error listing unmanaged instances: %s", e.getMessage()); + LOGGER.error(err, e); + return new GetUnmanagedInstancesAnswer(command, err); + } + + return new GetUnmanagedInstancesAnswer(command, "OK", unmanagedInstances); + } + + private List getDomains(GetUnmanagedInstancesCommand command, + LibvirtComputingResource libvirtComputingResource, + Connect conn) throws LibvirtException, CloudRuntimeException { + final List domains = new ArrayList<>(); + final String vmNameCmd = command.getInstanceName(); + if (StringUtils.isNotBlank(vmNameCmd)) { + final Domain domain = libvirtComputingResource.getDomain(conn, vmNameCmd); + if (domain == null) { + String msg = String.format("VM %s not found", vmNameCmd); + LOGGER.error(msg); + throw new CloudRuntimeException(msg); + } + + checkIfVmExists(vmNameCmd,domain); + checkIfVmIsManaged(command,vmNameCmd,domain); + + domains.add(domain); + } else { + final List allVmNames = libvirtComputingResource.getAllVmNames(conn); + for (String name : allVmNames) { + if (!command.hasManagedInstance(name)) { + final Domain domain = libvirtComputingResource.getDomain(conn, name); + domains.add(domain); + } + } + } + return domains; + } + + private void checkIfVmExists(String vmNameCmd,final Domain domain) throws LibvirtException { + if (StringUtils.isNotEmpty(vmNameCmd) && + !vmNameCmd.equals(domain.getName())) { + LOGGER.error("GetUnmanagedInstancesCommand: exact vm name not found " + vmNameCmd); + throw new CloudRuntimeException("GetUnmanagedInstancesCommand: exact vm name not found " + vmNameCmd); + } + } + + private void checkIfVmIsManaged(GetUnmanagedInstancesCommand command,String vmNameCmd,final Domain domain) throws LibvirtException { + if (command.hasManagedInstance(domain.getName())) { + LOGGER.error("GetUnmanagedInstancesCommand: vm already managed " + vmNameCmd); + throw new CloudRuntimeException("GetUnmanagedInstancesCommand: vm already managed " + vmNameCmd); + } + } + private UnmanagedInstanceTO getUnmanagedInstance(LibvirtComputingResource libvirtComputingResource, Domain domain, Connect conn) { + try { + final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser(); + parser.parseDomainXML(domain.getXMLDesc(1)); + + final UnmanagedInstanceTO instance = new UnmanagedInstanceTO(); + instance.setName(domain.getName()); + + instance.setCpuCores((int) LibvirtComputingResource.countDomainRunningVcpus(domain)); + instance.setCpuSpeed(parser.getCpuTuneDef().getShares()/instance.getCpuCores()); + + if (parser.getCpuModeDef() != null) { + instance.setCpuCoresPerSocket(parser.getCpuModeDef().getCoresPerSocket()); + } + instance.setPowerState(getPowerState(libvirtComputingResource.getVmState(conn,domain.getName()))); + instance.setMemory((int) LibvirtComputingResource.getDomainMemory(domain) / 1024); + instance.setNics(getUnmanagedInstanceNics(parser.getInterfaces())); + instance.setDisks(getUnmanagedInstanceDisks(parser.getDisks(),libvirtComputingResource)); + instance.setVncPassword(parser.getVncPasswd() + "aaaaaaaaaaaaaa"); // Suffix back extra characters for DB compatibility + + return instance; + } catch (Exception e) { + LOGGER.info("Unable to retrieve unmanaged instance info. " + e.getMessage(), e); + return null; + } + } + + private UnmanagedInstanceTO.PowerState getPowerState(VirtualMachine.PowerState vmPowerState) { + switch (vmPowerState) { + case PowerOn: + return UnmanagedInstanceTO.PowerState.PowerOn; + case PowerOff: + return UnmanagedInstanceTO.PowerState.PowerOff; + default: + return UnmanagedInstanceTO.PowerState.PowerUnknown; + + } + } + + private List getUnmanagedInstanceNics(List interfaces) { + final ArrayList nics = new ArrayList<>(interfaces.size()); + int counter = 0; + for (LibvirtVMDef.InterfaceDef interfaceDef : interfaces) { + final UnmanagedInstanceTO.Nic nic = new UnmanagedInstanceTO.Nic(); + nic.setNicId(String.valueOf(counter++)); + nic.setMacAddress(interfaceDef.getMacAddress()); + nic.setAdapterType(interfaceDef.getModel().toString()); + nic.setNetwork(interfaceDef.getDevName()); + nic.setPciSlot(interfaceDef.getSlot().toString()); + nic.setVlan(interfaceDef.getVlanTag()); + nics.add(nic); + } + return nics; + } + + private List getUnmanagedInstanceDisks(List disksInfo, LibvirtComputingResource libvirtComputingResource){ + final ArrayList disks = new ArrayList<>(disksInfo.size()); + int counter = 0; + for (LibvirtVMDef.DiskDef diskDef : disksInfo) { + if (diskDef.getDeviceType() != LibvirtVMDef.DiskDef.DeviceType.DISK) { + continue; + } + + final UnmanagedInstanceTO.Disk disk = new UnmanagedInstanceTO.Disk(); + Long size = null; + String imagePath = null; + try { + QemuImgFile file = new QemuImgFile(diskDef.getSourcePath()); + QemuImg qemu = new QemuImg(0); + Map info = qemu.info(file); + size = Long.parseLong(info.getOrDefault("virtual_size", "0")); + imagePath = info.getOrDefault("image", null); + } catch (QemuImgException | LibvirtException e) { + throw new RuntimeException(e); + } + + disk.setPosition(counter); + disk.setCapacity(size); + disk.setDiskId(String.valueOf(counter++)); + disk.setLabel(diskDef.getDiskLabel()); + disk.setController(diskDef.getBusType().toString()); + + + Pair sourceHostPath = getSourceHostPath(libvirtComputingResource, diskDef.getSourcePath()); + if (sourceHostPath != null) { + disk.setDatastoreHost(sourceHostPath.first()); + disk.setDatastorePath(sourceHostPath.second()); + } else { + disk.setDatastorePath(diskDef.getSourcePath()); + disk.setDatastoreHost(diskDef.getSourceHost()); + } + + disk.setDatastoreType(diskDef.getDiskType().toString()); + disk.setDatastorePort(diskDef.getSourceHostPort()); + disk.setImagePath(imagePath); + disk.setDatastoreName(imagePath.substring(imagePath.lastIndexOf("/"))); + disks.add(disk); + } + return disks; + } + + private Pair getSourceHostPath(LibvirtComputingResource libvirtComputingResource, String diskPath) { + int pathEnd = diskPath.lastIndexOf("/"); + if (pathEnd >= 0) { + diskPath = diskPath.substring(0, pathEnd); + return libvirtComputingResource.getSourceHostPath(diskPath); + } + return null; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java index d0ab77829af..fb526626ef8 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java @@ -23,6 +23,7 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Map; import java.util.Set; @@ -211,6 +212,8 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper + *
  • + * If both hosts utilize cgroup v1; then, the shares value of the VM is equal in both hosts, and there is no need to update the VM CPU shares value for the + * migration.
  • + *
  • + * If, at least, one of the hosts utilize cgroup v2, the VM CPU shares must be recalculated for the migration, accordingly to + * method {@link LibvirtComputingResource#calculateCpuShares(VirtualMachineTO)}. + *
  • + * + */ + protected String updateVmSharesIfNeeded(MigrateCommand migrateCommand, String xmlDesc, LibvirtComputingResource libvirtComputingResource) + throws ParserConfigurationException, IOException, SAXException, TransformerException { + Integer newVmCpuShares = migrateCommand.getNewVmCpuShares(); + int currentCpuShares = libvirtComputingResource.calculateCpuShares(migrateCommand.getVirtualMachine()); + + if (newVmCpuShares == currentCpuShares) { + s_logger.info(String.format("Current CPU shares [%s] is equal in both hosts; therefore, there is no need to update the CPU shares for the new host.", + currentCpuShares)); + return xmlDesc; + } + + InputStream inputStream = IOUtils.toInputStream(xmlDesc, StandardCharsets.UTF_8); + DocumentBuilderFactory docFactory = ParserUtils.getSaferDocumentBuilderFactory(); + DocumentBuilder docBuilder = docFactory.newDocumentBuilder(); + Document document = docBuilder.parse(inputStream); + + Element root = document.getDocumentElement(); + Node sharesNode = root.getElementsByTagName("shares").item(0); + String currentShares = sharesNode.getTextContent(); + + s_logger.info(String.format("VM [%s] will have CPU shares altered from [%s] to [%s] as part of migration because the cgroups version differs between hosts.", + migrateCommand.getVmName(), currentShares, newVmCpuShares)); + sharesNode.setTextContent(String.valueOf(newVmCpuShares)); + return getXml(document); + } + /** * Replace DPDK source path and target before migrations */ diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java index 5c893e5d12f..2a09c340891 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java @@ -279,6 +279,10 @@ public class LibvirtMigrateVolumeCommandWrapper extends CommandWrapper srcDetails = command.getSrcDetails(); String srcPath = srcDetails != null ? srcDetails.get(DiskTO.IQN) : srcVolumeObjectTO.getPath(); + // its possible a volume has details but is not using IQN addressing... + if (srcPath == null) { + srcPath = srcVolumeObjectTO.getPath(); + } VolumeObjectTO destVolumeObjectTO = (VolumeObjectTO)command.getDestData(); PrimaryDataStoreTO destPrimaryDataStore = (PrimaryDataStoreTO)destVolumeObjectTO.getDataStore(); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java index ec9e67e894c..6292ca71c2e 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java @@ -125,11 +125,7 @@ public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapp return new PrepareForMigrationAnswer(command, "failed to connect physical disks to host"); } - PrepareForMigrationAnswer answer = new PrepareForMigrationAnswer(command); - if (MapUtils.isNotEmpty(dpdkInterfaceMapping)) { - answer.setDpdkInterfaceMapping(dpdkInterfaceMapping); - } - return answer; + return createPrepareForMigrationAnswer(command, dpdkInterfaceMapping, libvirtComputingResource, vm); } catch (final LibvirtException | CloudRuntimeException | InternalErrorException | URISyntaxException e) { if (MapUtils.isNotEmpty(dpdkInterfaceMapping)) { for (DpdkTO to : dpdkInterfaceMapping.values()) { @@ -146,6 +142,22 @@ public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapp } } + protected PrepareForMigrationAnswer createPrepareForMigrationAnswer(PrepareForMigrationCommand command, Map dpdkInterfaceMapping, + LibvirtComputingResource libvirtComputingResource, VirtualMachineTO vm) { + PrepareForMigrationAnswer answer = new PrepareForMigrationAnswer(command); + + if (MapUtils.isNotEmpty(dpdkInterfaceMapping)) { + s_logger.debug(String.format("Setting DPDK interface for the migration of VM [%s].", vm)); + answer.setDpdkInterfaceMapping(dpdkInterfaceMapping); + } + + int newCpuShares = libvirtComputingResource.calculateCpuShares(vm); + s_logger.debug(String.format("Setting CPU shares to [%s] for the migration of VM [%s].", newCpuShares, vm)); + answer.setNewVmCpuShares(newCpuShares); + + return answer; + } + private Answer handleRollback(PrepareForMigrationCommand command, LibvirtComputingResource libvirtComputingResource) { KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); VirtualMachineTO vmTO = command.getVirtualMachine(); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareUnmanageVMInstanceCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareUnmanageVMInstanceCommandWrapper.java new file mode 100644 index 00000000000..68373089038 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareUnmanageVMInstanceCommandWrapper.java @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.PrepareUnmanageVMInstanceAnswer; +import com.cloud.agent.api.PrepareUnmanageVMInstanceCommand; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import org.apache.log4j.Logger; +import org.libvirt.Connect; +import org.libvirt.Domain; + +@ResourceWrapper(handles=PrepareUnmanageVMInstanceCommand.class) +public final class LibvirtPrepareUnmanageVMInstanceCommandWrapper extends CommandWrapper { + private static final Logger LOGGER = Logger.getLogger(LibvirtPrepareUnmanageVMInstanceCommandWrapper.class); + @Override + public PrepareUnmanageVMInstanceAnswer execute(PrepareUnmanageVMInstanceCommand command, LibvirtComputingResource libvirtComputingResource) { + final String vmName = command.getInstanceName(); + final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper(); + LOGGER.debug(String.format("Verify if KVM instance: [%s] is available before Unmanaging VM.", vmName)); + try { + final Connect conn = libvirtUtilitiesHelper.getConnectionByVmName(vmName); + final Domain domain = libvirtComputingResource.getDomain(conn, vmName); + if (domain == null) { + LOGGER.error("Prepare Unmanage VMInstanceCommand: vm not found " + vmName); + new PrepareUnmanageVMInstanceAnswer(command, false, String.format("Cannot find VM with name [%s] in KVM host.", vmName)); + } + } catch (Exception e){ + LOGGER.error("PrepareUnmanagedInstancesCommand failed due to " + e.getMessage()); + return new PrepareUnmanageVMInstanceAnswer(command, false, "Error: " + e.getMessage()); + } + + return new PrepareUnmanageVMInstanceAnswer(command, true, "OK"); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java index 36ff69d83af..4f1ad728b5d 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java @@ -50,6 +50,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; import com.cloud.hypervisor.kvm.storage.KVMStoragePool; import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.hypervisor.kvm.storage.MultipathSCSIPool; import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; import com.cloud.storage.Storage.StoragePoolType; @@ -84,6 +85,10 @@ public final class LibvirtResizeVolumeCommandWrapper extends CommandWrapper; connid= + String type = null; + String address = null; + String connectionId = null; + String path = null; + String[] parts = inPath.split(";"); + // handle initial code of wwn only + if (parts.length == 1) { + type = "FIBERWWN"; + address = parts[0]; + } else { + for (String part: parts) { + String[] pair = part.split("="); + if (pair.length == 2) { + String key = pair[0].trim(); + String value = pair[1].trim(); + if (key.equals("type")) { + type = value.toUpperCase(); + } else if (key.equals("address")) { + address = value; + } else if (key.equals("connid")) { + connectionId = value; + } + } + } + } + + if ("FIBERWWN".equals(type)) { + path = "/dev/mapper/3" + address; + } else { + throw new CloudRuntimeException("Invalid address type provided for target disk: " + type); + } + + return new AddressInfo(type, address, connectionId, path); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index dd31025d35f..1be4a8b6185 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -290,9 +290,12 @@ public class KVMStorageProcessor implements StorageProcessor { final TemplateObjectTO newTemplate = new TemplateObjectTO(); newTemplate.setPath(primaryVol.getName()); newTemplate.setSize(primaryVol.getSize()); - if (primaryPool.getType() == StoragePoolType.RBD || - primaryPool.getType() == StoragePoolType.PowerFlex || - primaryPool.getType() == StoragePoolType.Linstor) { + + if(List.of( + StoragePoolType.RBD, + StoragePoolType.PowerFlex, + StoragePoolType.Linstor, + StoragePoolType.FiberChannel).contains(primaryPool.getType())) { newTemplate.setFormat(ImageFormat.RAW); } else { newTemplate.setFormat(ImageFormat.QCOW2); @@ -584,7 +587,9 @@ public class KVMStorageProcessor implements StorageProcessor { public Answer createTemplateFromVolume(final CopyCommand cmd) { Map details = cmd.getOptions(); - if (details != null && details.get(DiskTO.IQN) != null) { + // handle cases where the managed storage driver had to make a temporary volume from + // the snapshot in order to support the copy + if (details != null && (details.get(DiskTO.IQN) != null || details.get(DiskTO.PATH) != null)) { // use the managed-storage approach return createTemplateFromVolumeOrSnapshot(cmd); } @@ -712,7 +717,7 @@ public class KVMStorageProcessor implements StorageProcessor { public Answer createTemplateFromSnapshot(CopyCommand cmd) { Map details = cmd.getOptions(); - if (details != null && details.get(DiskTO.IQN) != null) { + if (details != null && (details.get(DiskTO.IQN) != null || details.get(DiskTO.PATH) != null)) { // use the managed-storage approach return createTemplateFromVolumeOrSnapshot(cmd); } @@ -750,12 +755,15 @@ public class KVMStorageProcessor implements StorageProcessor { KVMStoragePool secondaryStorage = null; try { + // look for options indicating an overridden path or IQN. Used when snapshots have to be + // temporarily copied on the manaaged storage device before the actual copy to target object Map details = cmd.getOptions(); - - String path = details != null ? details.get(DiskTO.IQN) : null; - + String path = details != null ? details.get(DiskTO.PATH) : null; if (path == null) { - new CloudRuntimeException("The 'path' field must be specified."); + path = details != null ? details.get(DiskTO.IQN) : null; + if (path == null) { + new CloudRuntimeException("The 'path' or 'iqn' field must be specified."); + } } storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details); @@ -2188,7 +2196,16 @@ public class KVMStorageProcessor implements StorageProcessor { Map details = cmd.getOptions2(); - String path = details != null ? details.get(DiskTO.IQN) : null; + String path = cmd.getDestTO().getPath(); + if (path == null) { + path = details != null ? details.get(DiskTO.PATH) : null; + if (path == null) { + path = details != null ? details.get(DiskTO.IQN) : null; + if (path == null) { + new CloudRuntimeException("The 'path' or 'iqn' field must be specified."); + } + } + } storagePoolMgr.connectPhysicalDisk(pool.getPoolType(), pool.getUuid(), path, details); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java new file mode 100644 index 00000000000..06dea46a98d --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -0,0 +1,758 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.storage; + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Timer; +import java.util.TimerTask; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.log4j.Logger; + +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.utils.PropertiesUtil; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.OutputInterpreter; +import com.cloud.utils.script.Script; +import org.apache.commons.lang3.StringUtils; +import org.libvirt.LibvirtException; +import org.joda.time.Duration; + +public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { + static final Logger LOGGER = Logger.getLogger(MultipathSCSIAdapterBase.class); + static final Map MapStorageUuidToStoragePool = new HashMap<>(); + + /** + * A lock to avoid any possiblity of multiple requests for a scan + */ + static byte[] CLEANUP_LOCK = new byte[0]; + + /** + * Property keys and defaults + */ + static final Property CLEANUP_FREQUENCY_SECS = new Property("multimap.cleanup.frequency.secs", 60); + static final Property CLEANUP_TIMEOUT_SECS = new Property("multimap.cleanup.timeout.secs", 4); + static final Property CLEANUP_ENABLED = new Property("multimap.cleanup.enabled", true); + static final Property CLEANUP_SCRIPT = new Property("multimap.cleanup.script", "cleanStaleMaps.sh"); + static final Property CONNECT_SCRIPT = new Property("multimap.connect.script", "connectVolume.sh"); + static final Property COPY_SCRIPT = new Property("multimap.copy.script", "copyVolume.sh"); + static final Property DISCONNECT_SCRIPT = new Property("multimap.disconnect.script", "disconnectVolume.sh"); + static final Property RESIZE_SCRIPT = new Property("multimap.resize.script", "resizeVolume.sh"); + static final Property DISK_WAIT_SECS = new Property("multimap.disk.wait.secs", 240); + static final Property STORAGE_SCRIPTS_DIR = new Property("multimap.storage.scripts.dir", "scripts/storage/multipath"); + + static Timer cleanupTimer = new Timer(); + private static int cleanupTimeoutSecs = CLEANUP_TIMEOUT_SECS.getFinalValue(); + private static String connectScript = CONNECT_SCRIPT.getFinalValue(); + private static String disconnectScript = DISCONNECT_SCRIPT.getFinalValue(); + private static String cleanupScript = CLEANUP_SCRIPT.getFinalValue(); + private static String resizeScript = RESIZE_SCRIPT.getFinalValue(); + private static String copyScript = COPY_SCRIPT.getFinalValue(); + private static int diskWaitTimeSecs = DISK_WAIT_SECS.getFinalValue(); + + /** + * Initialize static program-wide configurations and background jobs + */ + static { + long cleanupFrequency = CLEANUP_FREQUENCY_SECS.getFinalValue() * 1000; + boolean cleanupEnabled = CLEANUP_ENABLED.getFinalValue(); + + + connectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), connectScript); + if (connectScript == null) { + throw new Error("Unable to find the connectVolume.sh script"); + } + + disconnectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), disconnectScript); + if (disconnectScript == null) { + throw new Error("Unable to find the disconnectVolume.sh script"); + } + + resizeScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), resizeScript); + if (resizeScript == null) { + throw new Error("Unable to find the resizeVolume.sh script"); + } + + copyScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), copyScript); + if (copyScript == null) { + throw new Error("Unable to find the copyVolume.sh script"); + } + + if (cleanupEnabled) { + cleanupScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), cleanupScript); + if (cleanupScript == null) { + throw new Error("Unable to find the cleanStaleMaps.sh script and " + CLEANUP_ENABLED.getName() + " is true"); + } + + TimerTask task = new TimerTask() { + @Override + public void run() { + try { + MultipathSCSIAdapterBase.cleanupStaleMaps(); + } catch (Throwable e) { + LOGGER.warn("Error running stale multipath map cleanup", e); + } + } + }; + + cleanupTimer = new Timer("MultipathMapCleanupJob"); + cleanupTimer.scheduleAtFixedRate(task, 0, cleanupFrequency); + } + } + + @Override + public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) { + return getStoragePool(uuid); + } + + public abstract String getName(); + + public abstract boolean isStoragePoolTypeSupported(Storage.StoragePoolType type); + + /** + * We expect WWN values in the volumePath so need to convert it to an actual physical path + */ + public abstract AddressInfo parseAndValidatePath(String path); + + @Override + public KVMPhysicalDisk getPhysicalDisk(String volumePath, KVMStoragePool pool) { + LOGGER.debug(String.format("getPhysicalDisk(volumePath,pool) called with args (%s,%s)", volumePath, pool)); + + if (StringUtils.isEmpty(volumePath) || pool == null) { + LOGGER.error("Unable to get physical disk, volume path or pool not specified"); + return null; + } + + AddressInfo address = parseAndValidatePath(volumePath); + return getPhysicalDisk(address, pool); + } + + private KVMPhysicalDisk getPhysicalDisk(AddressInfo address, KVMStoragePool pool) { + LOGGER.debug(String.format("getPhysicalDisk(addressInfo,pool) called with args (%s,%s)", address.getPath(), pool)); + KVMPhysicalDisk disk = new KVMPhysicalDisk(address.getPath(), address.toString(), pool); + disk.setFormat(QemuImg.PhysicalDiskFormat.RAW); + + long diskSize = getPhysicalDiskSize(address.getPath()); + disk.setSize(diskSize); + disk.setVirtualSize(diskSize); + LOGGER.debug("Physical disk " + disk.getPath() + " with format " + disk.getFormat() + " and size " + disk.getSize() + " provided"); + return disk; + } + + @Override + public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, Storage.StoragePoolType type, Map details) { + LOGGER.info(String.format("createStoragePool(uuid,host,port,path,type) called with args (%s, %s, %s, %s, %s)", uuid, host, ""+port, path, type)); + MultipathSCSIPool storagePool = new MultipathSCSIPool(uuid, host, port, path, type, details, this); + MapStorageUuidToStoragePool.put(uuid, storagePool); + return storagePool; + } + + @Override + public boolean deleteStoragePool(String uuid) { + return MapStorageUuidToStoragePool.remove(uuid) != null; + } + + @Override + public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map details) { + LOGGER.info("connectPhysicalDisk called for [" + volumePath + "]"); + + if (StringUtils.isEmpty(volumePath)) { + LOGGER.error("Unable to connect physical disk due to insufficient data - volume path is undefined"); + throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - volume path is underfined"); + } + + if (pool == null) { + LOGGER.error("Unable to connect physical disk due to insufficient data - pool is not set"); + throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - pool is not set"); + } + + AddressInfo address = this.parseAndValidatePath(volumePath); + int waitTimeInSec = diskWaitTimeSecs; + if (details != null && details.containsKey(StorageManager.STORAGE_POOL_DISK_WAIT.toString())) { + String waitTime = details.get(StorageManager.STORAGE_POOL_DISK_WAIT.toString()); + if (StringUtils.isNotEmpty(waitTime)) { + waitTimeInSec = Integer.valueOf(waitTime).intValue(); + } + } + return waitForDiskToBecomeAvailable(address, pool, waitTimeInSec); + } + + @Override + public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool) { + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) START", volumePath, pool.getUuid())); + AddressInfo address = this.parseAndValidatePath(volumePath); + ScriptResult result = runScript(disconnectScript, 60000L, address.getAddress().toLowerCase()); + if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult()); + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) COMPLETE [rc=%s]", volumePath, pool.getUuid(), result.getResult())); return true; + } + + @Override + public boolean disconnectPhysicalDisk(Map volumeToDisconnect) { + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumeToDisconnect) called with arg bag [not implemented]:") + " " + volumeToDisconnect); + return false; + } + + @Override + public boolean disconnectPhysicalDiskByPath(String localPath) { + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) STARTED", localPath)); + ScriptResult result = runScript(disconnectScript, 60000L, localPath.replace("/dev/mapper/3", "")); + if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult()); + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode())); return true; + } + + @Override + public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.ImageFormat format) { + LOGGER.info(String.format("deletePhysicalDisk(uuid,pool,format) called with args (%s, %s, %s) [not implemented]", uuid, pool.getUuid(), format.toString())); + return true; + } + + @Override + public KVMPhysicalDisk createTemplateFromDisk(KVMPhysicalDisk disk, String name, QemuImg.PhysicalDiskFormat format, long size, KVMStoragePool destPool) { + LOGGER.info(String.format("createTemplateFromDisk(disk,name,format,size,destPool) called with args (%s, %s, %s, %s, %s) [not implemented]", disk.getPath(), name, format.toString(), ""+size, destPool.getUuid())); + return null; + } + + @Override + public List listPhysicalDisks(String storagePoolUuid, KVMStoragePool pool) { + LOGGER.info(String.format("listPhysicalDisks(uuid,pool) called with args (%s, %s) [not implemented]", storagePoolUuid, pool.getUuid())); + return null; + } + + @Override + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) { + return copyPhysicalDisk(disk, name, destPool, timeout, null, null, null); + } + + @Override + public boolean refresh(KVMStoragePool pool) { + LOGGER.info(String.format("refresh(pool) called with args (%s)", pool.getUuid())); + return true; + } + + @Override + public boolean deleteStoragePool(KVMStoragePool pool) { + LOGGER.info(String.format("deleteStroagePool(pool) called with args (%s)", pool.getUuid())); + return deleteStoragePool(pool.getUuid()); + } + + @Override + public boolean createFolder(String uuid, String path) { + LOGGER.info(String.format("createFolder(uuid,path) called with args (%s, %s) [not implemented]", uuid, path)); + return createFolder(uuid, path, null); + } + + @Override + public boolean createFolder(String uuid, String path, String localPath) { + LOGGER.info(String.format("createFolder(uuid,path,localPath) called with args (%s, %s, %s) [not implemented]", uuid, path, localPath)); + return true; + } + + /** + * Validate inputs and return the source file for a template copy + * @param templateFilePath + * @param destTemplatePath + * @param destPool + * @param format + * @return + */ + File createTemplateFromDirectDownloadFileValidate(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format) { + if (StringUtils.isAnyEmpty(templateFilePath, destTemplatePath) || destPool == null) { + LOGGER.error("Unable to create template from direct download template file due to insufficient data"); + throw new CloudRuntimeException("Unable to create template from direct download template file due to insufficient data"); + } + + LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString()); + + File sourceFile = new File(templateFilePath); + if (!sourceFile.exists()) { + throw new CloudRuntimeException("Direct download template file " + templateFilePath + " does not exist on this host"); + } + + if (destTemplatePath == null || destTemplatePath.isEmpty()) { + LOGGER.error("Failed to create template, target template disk path not provided"); + throw new CloudRuntimeException("Target template disk path not provided"); + } + + if (this.isStoragePoolTypeSupported(destPool.getType())) { + throw new CloudRuntimeException("Unsupported storage pool type: " + destPool.getType().toString()); + } + + if (Storage.ImageFormat.RAW.equals(format) && Storage.ImageFormat.QCOW2.equals(format)) { + LOGGER.error("Failed to create template, unsupported template format: " + format.toString()); + throw new CloudRuntimeException("Unsupported template format: " + format.toString()); + } + return sourceFile; + } + + String extractSourceTemplateIfNeeded(File sourceFile, String templateFilePath) { + String srcTemplateFilePath = templateFilePath; + if (isTemplateExtractable(templateFilePath)) { + srcTemplateFilePath = sourceFile.getParent() + "/" + UUID.randomUUID().toString(); + LOGGER.debug("Extract the downloaded template " + templateFilePath + " to " + srcTemplateFilePath); + String extractCommand = getExtractCommandForDownloadedFile(templateFilePath, srcTemplateFilePath); + Script.runSimpleBashScript(extractCommand); + Script.runSimpleBashScript("rm -f " + templateFilePath); + } + return srcTemplateFilePath; + } + + QemuImg.PhysicalDiskFormat deriveImgFileFormat(Storage.ImageFormat format) { + if (format == Storage.ImageFormat.RAW) { + return QemuImg.PhysicalDiskFormat.RAW; + } else if (format == Storage.ImageFormat.QCOW2) { + return QemuImg.PhysicalDiskFormat.QCOW2; + } else { + return QemuImg.PhysicalDiskFormat.RAW; + } + } + + @Override + public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { + File sourceFile = createTemplateFromDirectDownloadFileValidate(templateFilePath, destTemplatePath, destPool, format); + LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString()); + KVMPhysicalDisk sourceDisk = destPool.getPhysicalDisk(sourceFile.getAbsolutePath()); + return copyPhysicalDisk(sourceDisk, destTemplatePath, destPool, timeout, null, null, Storage.ProvisioningType.THIN); + } + + @Override + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout, + byte[] srcPassphrase, byte[] dstPassphrase, Storage.ProvisioningType provisioningType) { + + validateForDiskCopy(disk, name, destPool); + LOGGER.info("Copying FROM source physical disk " + disk.getPath() + ", size: " + disk.getSize() + ", virtualsize: " + disk.getVirtualSize()+ ", format: " + disk.getFormat()); + + KVMPhysicalDisk destDisk = destPool.getPhysicalDisk(name); + if (destDisk == null) { + LOGGER.error("Failed to find the disk: " + name + " of the storage pool: " + destPool.getUuid()); + throw new CloudRuntimeException("Failed to find the disk: " + name + " of the storage pool: " + destPool.getUuid()); + } + + if (srcPassphrase != null || dstPassphrase != null) { + throw new CloudRuntimeException("Storage provider does not support user-space encrypted source or destination volumes"); + } + + destDisk.setFormat(QemuImg.PhysicalDiskFormat.RAW); + destDisk.setVirtualSize(disk.getVirtualSize()); + destDisk.setSize(disk.getSize()); + + LOGGER.info("Copying TO destination physical disk " + destDisk.getPath() + ", size: " + destDisk.getSize() + ", virtualsize: " + destDisk.getVirtualSize()+ ", format: " + destDisk.getFormat()); + QemuImgFile srcFile = new QemuImgFile(disk.getPath(), disk.getFormat()); + QemuImgFile destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); + LOGGER.debug("Starting COPY from source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath()); + ScriptResult result = runScript(copyScript, timeout, destDisk.getFormat().toString().toLowerCase(), srcFile.getFileName(), destFile.getFileName()); + int rc = result.getExitCode(); + if (rc != 0) { + throw new CloudRuntimeException("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + rc + " - " + result.getResult()); + } + LOGGER.debug("Successfully converted source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath() + " " + result.getResult()); + + return destDisk; + } + + void validateForDiskCopy(KVMPhysicalDisk disk, String name, KVMStoragePool destPool) { + if (StringUtils.isEmpty(name) || disk == null || destPool == null) { + LOGGER.error("Unable to copy physical disk due to insufficient data"); + throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data"); + } + } + + /** + * Copy a disk path to another disk path using QemuImg command + * @param disk + * @param destDisk + * @param name + * @param timeout + */ + void qemuCopy(KVMPhysicalDisk disk, KVMPhysicalDisk destDisk, String name, int timeout) { + QemuImg qemu; + try { + qemu = new QemuImg(timeout); + } catch (LibvirtException | QemuImgException e) { + throw new CloudRuntimeException (e); + } + QemuImgFile srcFile = null; + QemuImgFile destFile = null; + + try { + srcFile = new QemuImgFile(disk.getPath(), disk.getFormat()); + destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); + + LOGGER.debug("Starting copy from source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath()); + qemu.convert(srcFile, destFile, true); + LOGGER.debug("Successfully converted source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath()); + } catch (QemuImgException | LibvirtException e) { + try { + Map srcInfo = qemu.info(srcFile); + LOGGER.debug("Source disk info: " + Arrays.asList(srcInfo)); + } catch (Exception ignored) { + LOGGER.warn("Unable to get info from source disk: " + disk.getName()); + } + + String errMsg = String.format("Unable to convert/copy from %s to %s, due to: %s", disk.getName(), name, ((StringUtils.isEmpty(e.getMessage())) ? "an unknown error" : e.getMessage())); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg, e); + } + } + + @Override + public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, + String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, + KVMStoragePool destPool, int timeout, byte[] passphrase) { + throw new UnsupportedOperationException("Unimplemented method 'createDiskFromTemplate'"); + } + + @Override + public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, + String name, PhysicalDiskFormat format, long size, + KVMStoragePool destPool, int timeout, byte[] passphrase) { + throw new UnsupportedOperationException("Unimplemented method 'createDiskFromTemplateBacking'"); + } + + @Override + public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, + PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { + throw new UnsupportedOperationException("Unimplemented method 'createPhysicalDisk'"); + } + + boolean isTemplateExtractable(String templatePath) { + ScriptResult result = runScript("file", 5000L, templatePath, "| awk -F' ' '{print $2}'"); + String type = result.getResult(); + return type.equalsIgnoreCase("bzip2") || type.equalsIgnoreCase("gzip") || type.equalsIgnoreCase("zip"); + } + + String getExtractCommandForDownloadedFile(String downloadedTemplateFile, String templateFile) { + if (downloadedTemplateFile.endsWith(".zip")) { + return "unzip -p " + downloadedTemplateFile + " | cat > " + templateFile; + } else if (downloadedTemplateFile.endsWith(".bz2")) { + return "bunzip2 -c " + downloadedTemplateFile + " > " + templateFile; + } else if (downloadedTemplateFile.endsWith(".gz")) { + return "gunzip -c " + downloadedTemplateFile + " > " + templateFile; + } else { + throw new CloudRuntimeException("Unable to extract template " + downloadedTemplateFile); + } + } + + private static final ScriptResult runScript(String script, long timeout, String...args) { + ScriptResult result = new ScriptResult(); + Script cmd = new Script(script, Duration.millis(timeout), LOGGER); + cmd.add(args); + OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); + String output = cmd.execute(parser); + // its possible the process never launches which causes an NPE on getExitValue below + if (output != null && output.contains("Unable to execute the command")) { + result.setResult(output); + result.setExitCode(-1); + return result; + } + result.setResult(output); + result.setExitCode(cmd.getExitValue()); + return result; + } + + boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) { + LOGGER.debug("Waiting for the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid() + " to become available for " + waitTimeInSec + " secs"); + long scriptTimeoutSecs = 30; // how long to wait for each script execution to run + long maxTries = 10; // how many max retries to attempt the script + long waitTimeInMillis = waitTimeInSec * 1000; // how long overall to wait + int timeBetweenTries = 1000; // how long to sleep between tries + // wait at least 60 seconds even if input was lower + if (waitTimeInSec < 60) { + waitTimeInSec = 60; + } + KVMPhysicalDisk physicalDisk = null; + + // Rescan before checking for the physical disk + int tries = 0; + while (waitTimeInMillis > 0 && tries < maxTries) { + tries++; + long start = System.currentTimeMillis(); + String lun; + if (address.getConnectionId() == null) { + lun = "-"; + } else { + lun = address.getConnectionId(); + } + + Process p = null; + try { + ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress()); + p = builder.start(); + if (p.waitFor(scriptTimeoutSecs, TimeUnit.SECONDS)) { + int rc = p.exitValue(); + StringBuffer output = new StringBuffer(); + if (rc == 0) { + BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream())); + String line = null; + while ((line = input.readLine()) != null) { + output.append(line); + output.append(" "); + } + + physicalDisk = getPhysicalDisk(address, pool); + if (physicalDisk != null && physicalDisk.getSize() > 0) { + LOGGER.debug("Found the volume using id: " + address.getPath() + " of the storage pool: " + pool.getUuid()); + return true; + } + + break; + } else { + LOGGER.warn("Failure discovering LUN via " + connectScript); + BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream())); + String line = null; + while ((line = error.readLine()) != null) { + LOGGER.warn("error --> " + line); + } + } + } else { + LOGGER.debug("Timeout waiting for " + connectScript + " to complete - try " + tries); + } + } catch (IOException | InterruptedException | IllegalThreadStateException e) { + LOGGER.warn("Problem performing scan on SCSI hosts - try " + tries, e); + } finally { + if (p != null && p.isAlive()) { + p.destroyForcibly(); + } + } + + long elapsed = System.currentTimeMillis() - start; + waitTimeInMillis = waitTimeInMillis - elapsed; + + try { + Thread.sleep(timeBetweenTries); + } catch (Exception ex) { + // don't do anything + } + } + + LOGGER.debug("Unable to find the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid()); + return false; + } + + void runConnectScript(String lun, AddressInfo address) { + try { + ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress()); + Process p = builder.start(); + int rc = p.waitFor(); + StringBuffer output = new StringBuffer(); + if (rc == 0) { + BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream())); + String line = null; + while ((line = input.readLine()) != null) { + output.append(line); + output.append(" "); + } + } else { + LOGGER.warn("Failure discovering LUN via " + connectScript); + BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream())); + String line = null; + while ((line = error.readLine()) != null) { + LOGGER.warn("error --> " + line); + } + } + } catch (IOException | InterruptedException e) { + throw new CloudRuntimeException("Problem performing scan on SCSI hosts", e); + } + } + + void sleep(long sleepTimeMs) { + try { + Thread.sleep(sleepTimeMs); + } catch (Exception ex) { + // don't do anything + } + } + + long getPhysicalDiskSize(String diskPath) { + if (StringUtils.isEmpty(diskPath)) { + return 0; + } + + Script diskCmd = new Script("blockdev", LOGGER); + diskCmd.add("--getsize64", diskPath); + + OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); + String result = diskCmd.execute(parser); + + if (result != null) { + LOGGER.debug("Unable to get the disk size at path: " + diskPath); + return 0; + } + + Long size = Long.parseLong(parser.getLine()); + + if (size <= 0) { + // its possible the path can't be seen on the host yet, lets rescan + // now rerun the command + parser = new OutputInterpreter.OneLineParser(); + result = diskCmd.execute(parser); + + if (result != null) { + LOGGER.debug("Unable to get the disk size at path: " + diskPath); + return 0; + } + + size = Long.parseLong(parser.getLine()); + } + + return size; + } + + public void resize(String path, String vmName, long newSize) { + if (LOGGER.isDebugEnabled()) LOGGER.debug("Executing resize of " + path + " to " + newSize + " bytes for VM " + vmName); + + // extract wwid + AddressInfo address = parseAndValidatePath(path); + if (address == null || address.getAddress() == null) { + LOGGER.error("Unable to resize volume, address value is not valid"); + throw new CloudRuntimeException("Unable to resize volume, address value is not valid"); + } + + if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("Running %s %s %s %s", resizeScript, address.getAddress(), vmName, newSize)); + + // call resizeVolume.sh + ScriptResult result = runScript(resizeScript, 60000L, address.getAddress(), vmName, ""+newSize); + + if (result.getExitCode() != 0) { + throw new CloudRuntimeException("Failed to resize volume at address " + address.getAddress() + " to " + newSize + " bytes for VM " + vmName + ": " + result.getResult()); + } + + LOGGER.info("Resize of volume at address " + address.getAddress() + " completed successfully: " + result.getResult()); + } + + static void cleanupStaleMaps() { + synchronized(CLEANUP_LOCK) { + long start = System.currentTimeMillis(); + ScriptResult result = runScript(cleanupScript, cleanupTimeoutSecs * 1000); + LOGGER.debug("Multipath Cleanup Job elapsed time (ms): "+ (System.currentTimeMillis() - start) + "; result: " + result.getExitCode(), null); + } + } + + public static final class AddressInfo { + String type; + String address; + String connectionId; + String path; + + public AddressInfo(String type, String address, String connectionId, String path) { + this.type = type; + this.address = address; + this.connectionId = connectionId; + this.path = path; + } + + public String getType() { + return type; + } + + public String getAddress() { + return address; + } + + public String getConnectionId() { + return connectionId; + } + + public String getPath() { + return path; + } + + public String toString() { + return String.format("type=%s; address=%s; connid=%s", getType(), getAddress(), getConnectionId()); + } + } + + public static class Property { + private String name; + private T defaultValue; + + Property(String name, T value) { + this.name = name; + this.defaultValue = value; + } + + public String getName() { + return this.name; + } + + public T getDefaultValue() { + return this.defaultValue; + } + + public T getFinalValue() { + File agentPropertiesFile = PropertiesUtil.findConfigFile("agent.properties"); + if (agentPropertiesFile == null) { + LOGGER.debug(String.format("File [%s] was not found, we will use default defined values. Property [%s]: [%s].", "agent.properties", name, defaultValue)); + return defaultValue; + } else { + try { + String configValue = PropertiesUtil.loadFromFile(agentPropertiesFile).getProperty(name); + if (StringUtils.isBlank(configValue)) { + LOGGER.debug(String.format("Property [%s] has empty or null value. Using default value [%s].", name, defaultValue)); + return defaultValue; + } else { + if (defaultValue instanceof Integer) { + return (T)Integer.getInteger(configValue); + } else if (defaultValue instanceof Long) { + return (T)Long.getLong(configValue); + } else if (defaultValue instanceof String) { + return (T)configValue; + } else if (defaultValue instanceof Boolean) { + return (T)Boolean.valueOf(configValue); + } else { + return null; + } + } + } catch (IOException var5) { + LOGGER.debug(String.format("Failed to get property [%s]. Using default value [%s].", name, defaultValue), var5); + return defaultValue; + } + } + } + } + + public static class ScriptResult { + private int exitCode = -1; + private String result = null; + public int getExitCode() { + return exitCode; + } + public void setExitCode(int exitCode) { + this.exitCode = exitCode; + } + public String getResult() { + return result; + } + public void setResult(String result) { + this.result = result; + } + } + +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIPool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIPool.java new file mode 100644 index 00000000000..bc2f072f719 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIPool.java @@ -0,0 +1,241 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.storage; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.joda.time.Duration; + +import com.cloud.agent.api.to.HostTO; +import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool; +import com.cloud.storage.Storage; +import com.cloud.storage.Storage.ProvisioningType; + +public class MultipathSCSIPool implements KVMStoragePool { + private String uuid; + private String sourceHost; + private int sourcePort; + private String sourceDir; + private Storage.StoragePoolType storagePoolType; + private StorageAdaptor storageAdaptor; + private long capacity; + private long used; + private long available; + private Map details; + + public MultipathSCSIPool(String uuid, String host, int port, String path, Storage.StoragePoolType poolType, Map poolDetails, StorageAdaptor adaptor) { + this.uuid = uuid; + sourceHost = host; + sourcePort = port; + sourceDir = path; + storagePoolType = poolType; + storageAdaptor = adaptor; + capacity = 0; + used = 0; + available = 0; + details = poolDetails; + } + + public MultipathSCSIPool(String uuid, StorageAdaptor adapter) { + this.uuid = uuid; + sourceHost = null; + sourcePort = -1; + sourceDir = null; + storagePoolType = Storage.StoragePoolType.FiberChannel; + details = new HashMap(); + this.storageAdaptor = adapter; + } + + @Override + public KVMPhysicalDisk createPhysicalDisk(String arg0, ProvisioningType arg1, long arg2, byte[] arg3) { + return null; + } + + @Override + public KVMPhysicalDisk createPhysicalDisk(String arg0, PhysicalDiskFormat arg1, ProvisioningType arg2, long arg3, + byte[] arg4) { + return null; + } + + @Override + public boolean connectPhysicalDisk(String volumeUuid, Map details) { + return storageAdaptor.connectPhysicalDisk(volumeUuid, this, details); + } + + @Override + public KVMPhysicalDisk getPhysicalDisk(String volumeId) { + return storageAdaptor.getPhysicalDisk(volumeId, this); + } + + @Override + public boolean disconnectPhysicalDisk(String volumeUuid) { + return storageAdaptor.disconnectPhysicalDisk(volumeUuid, this); + } + + @Override + public boolean deletePhysicalDisk(String volumeUuid, Storage.ImageFormat format) { + return true; + } + + @Override + public List listPhysicalDisks() { + return null; + } + + @Override + public String getUuid() { + return uuid; + } + + public void setCapacity(long capacity) { + this.capacity = capacity; + } + + @Override + public long getCapacity() { + return this.capacity; + } + + public void setUsed(long used) { + this.used = used; + } + + @Override + public long getUsed() { + return this.used; + } + + public void setAvailable(long available) { + this.available = available; + } + + @Override + public long getAvailable() { + return this.available; + } + + @Override + public boolean refresh() { + return false; + } + + @Override + public boolean isExternalSnapshot() { + return true; + } + + @Override + public String getLocalPath() { + return null; + } + + @Override + public String getSourceHost() { + return this.sourceHost; + } + + @Override + public String getSourceDir() { + return this.sourceDir; + } + + @Override + public int getSourcePort() { + return this.sourcePort; + } + + @Override + public String getAuthUserName() { + return null; + } + + @Override + public String getAuthSecret() { + return null; + } + + @Override + public Storage.StoragePoolType getType() { + return storagePoolType; + } + + @Override + public boolean delete() { + return false; + } + + @Override + public QemuImg.PhysicalDiskFormat getDefaultFormat() { + return QemuImg.PhysicalDiskFormat.RAW; + } + + @Override + public boolean createFolder(String path) { + return false; + } + + @Override + public boolean supportsConfigDriveIso() { + return false; + } + + @Override + public Map getDetails() { + return this.details; + } + + @Override + public boolean isPoolSupportHA() { + return false; + } + + @Override + public String getHearthBeatPath() { + return null; + } + + @Override + public String createHeartBeatCommand(HAStoragePool primaryStoragePool, String hostPrivateIp, + boolean hostValidation) { + return null; + } + + @Override + public String getStorageNodeId() { + return null; + } + + @Override + public Boolean checkingHeartBeat(HAStoragePool pool, HostTO host) { + return null; + } + + @Override + public Boolean vmActivityCheck(HAStoragePool pool, HostTO host, Duration activityScriptTimeout, + String volumeUUIDListString, String vmActivityCheckPath, long duration) { + return null; + } + + public void resize(String path, String vmName, long newSize) { + ((MultipathSCSIAdapterBase)storageAdaptor).resize(path, vmName, newSize); + } +} diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java index fc462adc86d..aac7f7343af 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java @@ -6200,4 +6200,99 @@ public class LibvirtComputingResourceTest { Mockito.verify(loggerMock).debug("Skipping the memory balloon stats period setting for the VM (Libvirt Domain) with ID [1] and name [fake-VM-name] because this" + " VM has no memory balloon."); } + + @Test + public void calculateCpuSharesTestMinSpeedNullAndHostCgroupV1ShouldNotConsiderCgroupLimit() { + int cpuCores = 2; + int cpuSpeed = 2000; + int maxCpuShares = 0; + int expectedCpuShares = 4000; + + Mockito.doReturn(cpuCores).when(vmTO).getCpus(); + Mockito.doReturn(null).when(vmTO).getMinSpeed(); + Mockito.doReturn(cpuSpeed).when(vmTO).getSpeed(); + Mockito.doReturn(maxCpuShares).when(libvirtComputingResourceSpy).getHostCpuMaxCapacity(); + int calculatedCpuShares = libvirtComputingResourceSpy.calculateCpuShares(vmTO); + + Assert.assertEquals(expectedCpuShares, calculatedCpuShares); + } + + @Test + public void calculateCpuSharesTestMinSpeedNotNullAndHostCgroupV1ShouldNotConsiderCgroupLimit() { + int cpuCores = 2; + int cpuSpeed = 2000; + int maxCpuShares = 0; + int expectedCpuShares = 4000; + + Mockito.doReturn(cpuCores).when(vmTO).getCpus(); + Mockito.doReturn(cpuSpeed).when(vmTO).getMinSpeed(); + Mockito.doReturn(maxCpuShares).when(libvirtComputingResourceSpy).getHostCpuMaxCapacity(); + int calculatedCpuShares = libvirtComputingResourceSpy.calculateCpuShares(vmTO); + + Assert.assertEquals(expectedCpuShares, calculatedCpuShares); + } + + + @Test + public void calculateCpuSharesTestMinSpeedNullAndHostCgroupV2ShouldConsiderCgroupLimit() { + int cpuCores = 2; + int cpuSpeed = 2000; + int maxCpuShares = 5000; + int expectedCpuShares = 8000; + + Mockito.doReturn(cpuCores).when(vmTO).getCpus(); + Mockito.doReturn(null).when(vmTO).getMinSpeed(); + Mockito.doReturn(cpuSpeed).when(vmTO).getSpeed(); + Mockito.doReturn(maxCpuShares).when(libvirtComputingResourceSpy).getHostCpuMaxCapacity(); + int calculatedCpuShares = libvirtComputingResourceSpy.calculateCpuShares(vmTO); + + Assert.assertEquals(expectedCpuShares, calculatedCpuShares); + } + + @Test + public void calculateCpuSharesTestMinSpeedNotNullAndHostCgroupV2ShouldConsiderCgroupLimit() { + int cpuCores = 2; + int cpuSpeed = 2000; + int maxCpuShares = 5000; + int expectedCpuShares = 8000; + + Mockito.doReturn(cpuCores).when(vmTO).getCpus(); + Mockito.doReturn(cpuSpeed).when(vmTO).getMinSpeed(); + Mockito.doReturn(maxCpuShares).when(libvirtComputingResourceSpy).getHostCpuMaxCapacity(); + int calculatedCpuShares = libvirtComputingResourceSpy.calculateCpuShares(vmTO); + + Assert.assertEquals(expectedCpuShares, calculatedCpuShares); + } + + @Test + public void setMaxHostCpuSharesIfCGroupV2TestShouldCalculateMaxCpuCapacityIfHostUtilizesCgroupV2() { + int cpuCores = 2; + long cpuSpeed = 2500L; + int expectedShares = 5000; + + String hostCgroupVersion = LibvirtComputingResource.CGROUP_V2; + try (MockedStatic diff --git a/ui/src/views/tools/ManageInstances.vue b/ui/src/views/tools/ManageInstances.vue index 96eba539638..fc14f684e72 100644 --- a/ui/src/views/tools/ManageInstances.vue +++ b/ui/src/views/tools/ManageInstances.vue @@ -48,126 +48,278 @@
    - - - - - - - VMware - - - KVM - - - - + + + + + + + + VMware + + + KVM + + + + + + + + {{ opt.label }} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {{ $t('label.clusterid') }} + {{ $t('label.zoneid') }} + + + - + - {{ opt.label }} + + + {{ zoneitem.label }} - - - - - - - - - - - - - - - {{ zoneitem.label }} - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + {{ $t('label.import.instance') }} + + + + + + + + +
    + + {{ $t('label.fetch.instances') }} + +
    - + + + +