Merge branch 'main' into nsx-integration

This commit is contained in:
nvazquez 2023-12-14 09:37:14 -03:00
commit 7814829a48
No known key found for this signature in database
GPG Key ID: 656E1BCC8CB54F84
196 changed files with 15392 additions and 765 deletions

View File

@ -41,6 +41,7 @@ github:
features:
wiki: true
issues: true
discussions: true
projects: true
enabled_merge_buttons:
@ -49,15 +50,21 @@ github:
rebase: false
collaborators:
- acs-robot
- kiranchavala
- rajujith
- alexandremattioli
- vishesh92
- GaOrtiga
- acs-robot
- BryanMLima
- SadiJr
- JoaoJandre
- winterhazel
protected_branches: ~
notifications:
commits: commits@cloudstack.apache.org
issues: commits@cloudstack.apache.org
pullrequests: commits@cloudstack.apache.org
discussions: users@cloudstack.apache.org

View File

@ -14,6 +14,8 @@
*/
package com.cloud.agent.properties;
import org.apache.cloudstack.utils.security.KeyStoreUtils;
/**
* Class of constant agent's properties available to configure on
* "agent.properties".
@ -779,6 +781,13 @@ public class AgentProperties{
*/
public static final Property<Long> KVM_HEARTBEAT_CHECKER_TIMEOUT = new Property<>("kvm.heartbeat.checker.timeout", 360000L);
/**
* Keystore passphrase
* Data type: String.<br>
* Default value: <code>null</code>
*/
public static final Property<String> KEYSTORE_PASSPHRASE = new Property<>(KeyStoreUtils.KS_PASSPHRASE_PROPERTY, null, String.class);
public static class Property <T>{
private String name;
private T defaultValue;

View File

@ -320,6 +320,7 @@ public class EventTypes {
public static final String EVENT_DOMAIN_CREATE = "DOMAIN.CREATE";
public static final String EVENT_DOMAIN_DELETE = "DOMAIN.DELETE";
public static final String EVENT_DOMAIN_UPDATE = "DOMAIN.UPDATE";
public static final String EVENT_DOMAIN_MOVE = "DOMAIN.MOVE";
// Snapshots
public static final String EVENT_SNAPSHOT_COPY = "SNAPSHOT.COPY";
@ -878,6 +879,7 @@ public class EventTypes {
entityEventDetails.put(EVENT_DOMAIN_CREATE, Domain.class);
entityEventDetails.put(EVENT_DOMAIN_DELETE, Domain.class);
entityEventDetails.put(EVENT_DOMAIN_UPDATE, Domain.class);
entityEventDetails.put(EVENT_DOMAIN_MOVE, Domain.class);
// Snapshots
entityEventDetails.put(EVENT_SNAPSHOT_CREATE, Snapshot.class);

View File

@ -77,13 +77,18 @@ public class Storage {
}
public static enum Capability {
HARDWARE_ACCELERATION("HARDWARE_ACCELERATION");
HARDWARE_ACCELERATION("HARDWARE_ACCELERATION"),
ALLOW_MIGRATE_OTHER_POOLS("ALLOW_MIGRATE_OTHER_POOLS");
private final String capability;
private Capability(String capability) {
this.capability = capability;
}
public String toString() {
return this.capability;
}
}
public static enum ProvisioningType {
@ -150,7 +155,8 @@ public class Storage {
ManagedNFS(true, false, false),
Linstor(true, true, false),
DatastoreCluster(true, true, false), // for VMware, to abstract pool of clusters
StorPool(true, true, true);
StorPool(true, true, true),
FiberChannel(true, true, false); // Fiber Channel Pool for KVM hypervisors is used to find the volume by WWN value (/dev/disk/by-id/wwn-<wwnvalue>)
private final boolean shared;
private final boolean overprovisioning;

View File

@ -20,9 +20,11 @@ import java.util.List;
import org.apache.cloudstack.api.command.admin.domain.ListDomainChildrenCmd;
import org.apache.cloudstack.api.command.admin.domain.ListDomainsCmd;
import org.apache.cloudstack.api.command.admin.domain.MoveDomainCmd;
import com.cloud.domain.Domain;
import com.cloud.exception.PermissionDeniedException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.utils.Pair;
public interface DomainService {
@ -66,4 +68,5 @@ public interface DomainService {
*/
Domain findDomainByIdOrPath(Long id, String domainPath);
Domain moveDomainAndChildrenToNewParentDomain(MoveDomainCmd cmd) throws ResourceAllocationException;
}

View File

@ -16,6 +16,7 @@
// under the License.
package com.cloud.vm;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@ -518,7 +519,8 @@ public interface UserVmService {
UserVm importVM(final DataCenter zone, final Host host, final VirtualMachineTemplate template, final String instanceName, final String displayName, final Account owner, final String userData, final Account caller, final Boolean isDisplayVm, final String keyboard,
final long accountId, final long userId, final ServiceOffering serviceOffering, final String sshPublicKey,
final String hostName, final HypervisorType hypervisorType, final Map<String, String> customParameters, final VirtualMachine.PowerState powerState) throws InsufficientCapacityException;
final String hostName, final HypervisorType hypervisorType, final Map<String, String> customParameters,
final VirtualMachine.PowerState powerState, final LinkedHashMap<String, List<NicProfile>> networkNicMap) throws InsufficientCapacityException;
/**
* Unmanage a guest VM from CloudStack

View File

@ -39,6 +39,7 @@ public interface VmDetailConstants {
// KVM specific (internal)
String KVM_VNC_PORT = "kvm.vnc.port";
String KVM_VNC_ADDRESS = "kvm.vnc.address";
String KVM_VNC_PASSWORD = "kvm.vnc.password";
// KVM specific, custom virtual GPU hardware
String VIDEO_HARDWARE = "video.hardware";

View File

@ -212,6 +212,7 @@ public class ApiConstants {
public static final String HOST_IDS = "hostids";
public static final String HOST_IP = "hostip";
public static final String HOST_NAME = "hostname";
public static final String HOST = "host";
public static final String HOST_CONTROL_STATE = "hostcontrolstate";
public static final String HOSTS_MAP = "hostsmap";
public static final String HYPERVISOR = "hypervisor";
@ -1079,7 +1080,9 @@ public class ApiConstants {
public static final String SOURCE_NAT_IP_ID = "sourcenatipaddressid";
public static final String HAS_RULES = "hasrules";
public static final String NSX_DETAIL_KEY = "forNsx";
public static final String DISK_PATH = "diskpath";
public static final String IMPORT_SOURCE = "importsource";
public static final String TEMP_PATH = "temppath";
public static final String OBJECT_STORAGE = "objectstore";
public static final String HEURISTIC_RULE = "heuristicrule";
public static final String HEURISTIC_TYPE_VALID_OPTIONS = "Valid options are: ISO, SNAPSHOT, TEMPLATE and VOLUME.";

View File

@ -0,0 +1,73 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.domain;
import com.cloud.domain.Domain;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.user.Account;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.DomainResponse;
@APICommand(name = "moveDomain", description = "Moves a domain and its children to a new parent domain.", since = "4.19.0.0", responseObject = DomainResponse.class,
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin})
public class MoveDomainCmd extends BaseCmd {
private static final String APINAME = "moveDomain";
@Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, required = true, entityType = DomainResponse.class, description = "The ID of the domain to be moved.")
private Long domainId;
@Parameter(name = ApiConstants.PARENT_DOMAIN_ID, type = CommandType.UUID, required = true, entityType = DomainResponse.class,
description = "The ID of the new parent domain of the domain to be moved.")
private Long parentDomainId;
public Long getDomainId() {
return domainId;
}
public Long getParentDomainId() {
return parentDomainId;
}
@Override
public String getCommandName() {
return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX;
}
@Override
public long getEntityOwnerId() {
return Account.ACCOUNT_ID_SYSTEM;
}
@Override
public void execute() throws ResourceAllocationException {
Domain domain = _domainService.moveDomainAndChildrenToNewParentDomain(this);
if (domain != null) {
DomainResponse response = _responseGenerator.createDomainResponse(domain);
response.setResponseName(getCommandName());
this.setResponseObject(response);
} else {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to move the domain.");
}
}
}

View File

@ -17,6 +17,7 @@
package org.apache.cloudstack.api.command.admin.storage;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.api.ApiCommandResourceType;
import org.apache.log4j.Logger;
@ -32,6 +33,7 @@ import org.apache.cloudstack.api.response.StoragePoolResponse;
import com.cloud.storage.StoragePool;
import com.cloud.user.Account;
@SuppressWarnings("rawtypes")
@APICommand(name = "updateStoragePool", description = "Updates a storage pool.", responseObject = StoragePoolResponse.class, since = "3.0.0",
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class UpdateStoragePoolCmd extends BaseCmd {
@ -61,6 +63,20 @@ public class UpdateStoragePoolCmd extends BaseCmd {
" enable it back.")
private Boolean enabled;
@Parameter(name = ApiConstants.DETAILS,
type = CommandType.MAP,
required = false,
description = "the details for the storage pool",
since = "4.19.0")
private Map details;
@Parameter(name = ApiConstants.URL,
type = CommandType.STRING,
required = false,
description = "the URL of the storage pool",
since = "4.19.0")
private String url;
@Parameter(name = ApiConstants.IS_TAG_A_RULE, type = CommandType.BOOLEAN, description = ApiConstants.PARAMETER_DESCRIPTION_IS_TAG_A_RULE)
private Boolean isTagARule;
@ -115,6 +131,22 @@ public class UpdateStoragePoolCmd extends BaseCmd {
return ApiCommandResourceType.StoragePool;
}
public Map<String,String> getDetails() {
return details;
}
public void setDetails(Map<String,String> details) {
this.details = details;
}
public String getUrl() {
return url;
}
public void setUrl(String url) {
this.url = url;
}
@Override
public void execute() {
StoragePool result = _storageService.updateStoragePool(this);

View File

@ -84,7 +84,7 @@ public class ImportUnmanagedInstanceCmd extends BaseAsyncCmd {
@Parameter(name = ApiConstants.NAME,
type = CommandType.STRING,
required = true,
description = "the hypervisor name of the instance")
description = "the name of the instance as it is known to the hypervisor")
private String name;
@Parameter(name = ApiConstants.DISPLAY_NAME,

View File

@ -31,13 +31,18 @@ import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ResponseObject;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.HostResponse;
import org.apache.cloudstack.api.response.NetworkResponse;
import org.apache.cloudstack.api.response.StoragePoolResponse;
import org.apache.cloudstack.api.response.UserVmResponse;
import org.apache.cloudstack.api.response.VmwareDatacenterResponse;
import org.apache.cloudstack.api.response.ZoneResponse;
import org.apache.cloudstack.vm.VmImportService;
import org.apache.commons.lang3.ObjectUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
import javax.inject.Inject;
@APICommand(name = "importVm",
description = "Import virtual machine from a unmanaged host into CloudStack",
responseObject = UserVmResponse.class,
@ -47,21 +52,72 @@ import org.apache.log4j.Logger;
authorized = {RoleType.Admin},
since = "4.19.0")
public class ImportVmCmd extends ImportUnmanagedInstanceCmd {
public static final Logger LOGGER = Logger.getLogger(ImportVmCmd.class);
@Inject
public VmImportService vmImportService;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.ZONE_ID,
type = CommandType.UUID,
entityType = ZoneResponse.class,
required = true,
description = "the zone ID")
private Long zoneId;
@Parameter(name = ApiConstants.USERNAME,
type = CommandType.STRING,
description = "the username for the host")
private String username;
@Parameter(name = ApiConstants.PASSWORD,
type = CommandType.STRING,
description = "the password for the host")
private String password;
@Parameter(name = ApiConstants.HOST,
type = CommandType.STRING,
description = "the host name or IP address")
private String host;
@Parameter(name = ApiConstants.HYPERVISOR,
type = CommandType.STRING,
required = true,
description = "hypervisor type of the host")
private String hypervisor;
@Parameter(name = ApiConstants.DISK_PATH,
type = CommandType.STRING,
description = "path of the disk image")
private String diskPath;
@Parameter(name = ApiConstants.IMPORT_SOURCE,
type = CommandType.STRING,
required = true,
description = "Source location for Import" )
private String importSource;
@Parameter(name = ApiConstants.NETWORK_ID,
type = CommandType.UUID,
entityType = NetworkResponse.class,
description = "the network ID")
private Long networkId;
@Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "Host where local disk is located")
private Long hostId;
@Parameter(name = ApiConstants.STORAGE_ID, type = CommandType.UUID, entityType = StoragePoolResponse.class, description = "Shared storage pool where disk is located")
private Long storagePoolId;
@Parameter(name = ApiConstants.TEMP_PATH,
type = CommandType.STRING,
description = "Temp Path on external host for disk image copy" )
private String tmpPath;
// Import from Vmware to KVM migration parameters
@Parameter(name = ApiConstants.EXISTING_VCENTER_ID,
@ -73,7 +129,7 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd {
@Parameter(name = ApiConstants.HOST_IP,
type = BaseCmd.CommandType.STRING,
description = "(only for importing migrated VMs from Vmware to KVM) VMware ESXi host IP/Name.")
private String host;
private String hostip;
@Parameter(name = ApiConstants.VCENTER,
type = CommandType.STRING,
@ -88,14 +144,6 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd {
description = "(only for importing migrated VMs from Vmware to KVM) Name of VMware cluster.")
private String clusterName;
@Parameter(name = ApiConstants.USERNAME, type = CommandType.STRING,
description = "(only for importing migrated VMs from Vmware to KVM) The Username required to connect to resource.")
private String username;
@Parameter(name = ApiConstants.PASSWORD, type = CommandType.STRING,
description = "(only for importing migrated VMs from Vmware to KVM) The password for the specified username.")
private String password;
@Parameter(name = ApiConstants.CONVERT_INSTANCE_HOST_ID, type = CommandType.UUID, entityType = HostResponse.class,
description = "(only for importing migrated VMs from Vmware to KVM) optional - the host to perform the virt-v2v migration from VMware to KVM.")
private Long convertInstanceHostId;
@ -104,30 +152,20 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd {
description = "(only for importing migrated VMs from Vmware to KVM) optional - the temporary storage pool to perform the virt-v2v migration from VMware to KVM.")
private Long convertStoragePoolId;
@Override
public String getEventType() {
return EventTypes.EVENT_VM_IMPORT;
}
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@Override
public String getEventDescription() {
String vmName = getName();
if (ObjectUtils.anyNotNull(vcenter, existingVcenterId)) {
String msg = StringUtils.isNotBlank(vcenter) ?
String.format("external vCenter: %s - datacenter: %s", vcenter, datacenterName) :
String.format("existing vCenter Datacenter with ID: %s", existingVcenterId);
return String.format("Importing unmanaged VM: %s from %s - VM: %s", getDisplayName(), msg, vmName);
public Long getZoneId() {
return zoneId;
}
return String.format("Importing unmanaged VM: %s", vmName);
}
public Long getExistingVcenterId() {
return existingVcenterId;
}
public String getHost() {
return host;
public String getHostIp() {
return hostip;
}
public String getVcenter() {
@ -150,6 +188,10 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd {
return password;
}
public String getHost() {
return host;
}
public Long getConvertInstanceHostId() {
return convertInstanceHostId;
}
@ -162,10 +204,47 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd {
return hypervisor;
}
public String getDiskPath() {
return diskPath;
}
public String getImportSource() {
return importSource;
}
public Long getHostId() {
return hostId;
}
public Long getStoragePoolId() {
return storagePoolId;
}
public String getTmpPath() {
return tmpPath;
}
public Long getNetworkId() {
return networkId;
}
@Override
public String getEventType() {
return EventTypes.EVENT_VM_IMPORT;
}
@Override
public String getEventDescription() {
String vmName = getName();
if (ObjectUtils.anyNotNull(vcenter, existingVcenterId)) {
String msg = StringUtils.isNotBlank(vcenter) ?
String.format("external vCenter: %s - datacenter: %s", vcenter, datacenterName) :
String.format("existing vCenter Datacenter with ID: %s", existingVcenterId);
return String.format("Importing unmanaged VM: %s from %s - VM: %s", getDisplayName(), msg, vmName);
}
return String.format("Importing unmanaged VM: %s", vmName);
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@ -176,5 +255,4 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd {
response.setResponseName(getCommandName());
setResponseObject(response);
}
}

View File

@ -0,0 +1,134 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.vm;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.NetworkRuleConflictException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.user.Account;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseListCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ResponseObject;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.api.response.UnmanagedInstanceResponse;
import org.apache.cloudstack.api.response.ZoneResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.vm.UnmanagedInstanceTO;
import org.apache.cloudstack.vm.VmImportService;
import org.apache.log4j.Logger;
import javax.inject.Inject;
@APICommand(name = "listVmsForImport",
description = "Lists virtual machines on a unmanaged host",
responseObject = UnmanagedInstanceResponse.class,
responseView = ResponseObject.ResponseView.Full,
entityType = {UnmanagedInstanceTO.class},
requestHasSensitiveInfo = false,
responseHasSensitiveInfo = true,
authorized = {RoleType.Admin},
since = "4.19.0")
public class ListVmsForImportCmd extends BaseListCmd {
public static final Logger LOGGER = Logger.getLogger(ListVmsForImportCmd.class.getName());
@Inject
public VmImportService vmImportService;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.ZONE_ID,
type = CommandType.UUID,
entityType = ZoneResponse.class,
required = true,
description = "the zone ID")
private Long zoneId;
@Parameter(name = ApiConstants.USERNAME,
type = CommandType.STRING,
description = "the username for the host")
private String username;
@Parameter(name = ApiConstants.PASSWORD,
type = CommandType.STRING,
description = "the password for the host")
private String password;
@Parameter(name = ApiConstants.HOST,
type = CommandType.STRING,
required = true,
description = "the host name or IP address")
private String host;
@Parameter(name = ApiConstants.HYPERVISOR,
type = CommandType.STRING,
required = true,
description = "hypervisor type of the host")
private String hypervisor;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getZoneId() {
return zoneId;
}
public String getUsername() {
return username;
}
public String getPassword() {
return password;
}
public String getHost() {
return host;
}
public String getHypervisor() {
return hypervisor;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException {
ListResponse<UnmanagedInstanceResponse> response = vmImportService.listVmsForImport(this);
response.setResponseName(getCommandName());
setResponseObject(response);
}
@Override
public long getEntityOwnerId() {
Account account = CallContext.current().getCallingAccount();
if (account != null) {
return account.getId();
}
return Account.ACCOUNT_ID_SYSTEM;
}
}

View File

@ -97,52 +97,44 @@ public class ListLoadBalancerRuleInstancesCmd extends BaseListCmd implements Use
public void execute() {
Pair<List<? extends UserVm>, List<String>> vmServiceMap = _lbService.listLoadBalancerInstances(this);
List<? extends UserVm> result = vmServiceMap.first();
s_logger.debug(String.format("A total of [%s] user VMs were obtained when listing the load balancer instances: [%s].", result.size(), result));
List<String> serviceStates = vmServiceMap.second();
s_logger.debug(String.format("A total of [%s] service states were obtained when listing the load balancer instances: [%s].", serviceStates.size(), serviceStates));
if (!isListLbVmip()) {
// list lb instances
ListResponse<UserVmResponse> response = new ListResponse<UserVmResponse>();
List<UserVmResponse> vmResponses = new ArrayList<UserVmResponse>();
if (result != null) {
vmResponses = _responseGenerator.createUserVmResponse(ResponseView.Restricted, "loadbalancerruleinstance", result.toArray(new UserVm[result.size()]));
ListResponse<UserVmResponse> response = new ListResponse<>();
List<UserVmResponse> vmResponses = _responseGenerator.createUserVmResponse(ResponseView.Restricted, "loadbalancerruleinstance", result.toArray(new UserVm[0]));
for (int i = 0; i < result.size(); i++) {
vmResponses.get(i).setServiceState(serviceStates.get(i));
}
}
response.setResponses(vmResponses);
response.setResponseName(getCommandName());
setResponseObject(response);
return;
}
ListResponse<LoadBalancerRuleVmMapResponse> lbRes = new ListResponse<>();
} else {
ListResponse<LoadBalancerRuleVmMapResponse> lbRes = new ListResponse<LoadBalancerRuleVmMapResponse>();
List<UserVmResponse> vmResponses = new ArrayList<UserVmResponse>();
List<LoadBalancerRuleVmMapResponse> listlbVmRes = new ArrayList<LoadBalancerRuleVmMapResponse>();
if (result != null) {
vmResponses = _responseGenerator.createUserVmResponse(getResponseView(), "loadbalancerruleinstance", result.toArray(new UserVm[result.size()]));
List<String> ipaddr = null;
List<UserVmResponse> vmResponses = _responseGenerator.createUserVmResponse(getResponseView(), "loadbalancerruleinstance", result.toArray(new UserVm[0]));
List<LoadBalancerRuleVmMapResponse> lbRuleVmMapList = new ArrayList<>();
for (int i=0; i<result.size(); i++) {
LoadBalancerRuleVmMapResponse lbRuleVmIpResponse = new LoadBalancerRuleVmMapResponse();
vmResponses.get(i).setServiceState(serviceStates.get(i));
lbRuleVmIpResponse.setUserVmResponse(vmResponses.get(i));
//get vm id from the uuid
VirtualMachine lbvm = _entityMgr.findByUuid(VirtualMachine.class, vmResponses.get(i).getId());
lbRuleVmIpResponse.setIpAddr(_lbService.listLbVmIpAddress(getId(), lbvm.getId()));
UserVmResponse userVmResponse = vmResponses.get(i);
userVmResponse.setServiceState(serviceStates.get(i));
lbRuleVmIpResponse.setUserVmResponse(userVmResponse);
VirtualMachine lbVm = _entityMgr.findByUuid(VirtualMachine.class, userVmResponse.getId());
lbRuleVmIpResponse.setIpAddr(_lbService.listLbVmIpAddress(getId(), lbVm.getId()));
lbRuleVmIpResponse.setObjectName("lbrulevmidip");
listlbVmRes.add(lbRuleVmIpResponse);
}
lbRuleVmMapList.add(lbRuleVmIpResponse);
}
lbRes.setResponseName(getCommandName());
lbRes.setResponses(listlbVmRes);
lbRes.setResponses(lbRuleVmMapList);
setResponseObject(lbRes);
}
}
}

View File

@ -166,11 +166,11 @@ public class UserVmResponse extends BaseResponseWithTagInformation implements Co
private String serviceOfferingName;
@SerializedName(ApiConstants.DISK_OFFERING_ID)
@Param(description = "the ID of the disk offering of the virtual machine", since = "4.4")
@Param(description = "the ID of the disk offering of the virtual machine. This parameter should not be used for retrieving disk offering details of DATA volumes. Use listVolumes API instead", since = "4.4")
private String diskOfferingId;
@SerializedName("diskofferingname")
@Param(description = "the name of the disk offering of the virtual machine", since = "4.4")
@Param(description = "the name of the disk offering of the virtual machine. This parameter should not be used for retrieving disk offering details of DATA volumes. Use listVolumes API instead", since = "4.4")
private String diskOfferingName;
@SerializedName(ApiConstants.BACKUP_OFFERING_ID)

View File

@ -55,6 +55,8 @@ public class UnmanagedInstanceTO {
private List<Nic> nics;
private String vncPassword;
public String getName() {
return name;
}
@ -167,6 +169,14 @@ public class UnmanagedInstanceTO {
this.nics = nics;
}
public String getVncPassword() {
return vncPassword;
}
public void setVncPassword(String vncPassword) {
this.vncPassword = vncPassword;
}
public static class Disk {
private String diskId;
@ -192,6 +202,8 @@ public class UnmanagedInstanceTO {
private String datastorePath;
private int datastorePort;
private String datastoreType;
public String getDiskId() {
@ -297,6 +309,14 @@ public class UnmanagedInstanceTO {
public void setDatastoreType(String datastoreType) {
this.datastoreType = datastoreType;
}
public void setDatastorePort(int datastorePort) {
this.datastorePort = datastorePort;
}
public int getDatastorePort() {
return datastorePort;
}
}
public static class Nic {

View File

@ -17,13 +17,20 @@
package org.apache.cloudstack.vm;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.utils.component.PluggableService;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
import static com.cloud.hypervisor.Hypervisor.HypervisorType.KVM;
import static com.cloud.hypervisor.Hypervisor.HypervisorType.VMware;
public interface UnmanagedVMsManager extends VmImportService, UnmanageVMService, PluggableService, Configurable {
ConfigKey<Boolean> UnmanageVMPreserveNic = new ConfigKey<>("Advanced", Boolean.class, "unmanage.vm.preserve.nics", "false",
"If set to true, do not remove VM nics (and its MAC addresses) when unmanaging a VM, leaving them allocated but not reserved. " +
"If set to false, nics are removed and MAC addresses can be reassigned", true, ConfigKey.Scope.Zone);
static boolean isSupported(Hypervisor.HypervisorType hypervisorType) {
return hypervisorType == VMware || hypervisorType == KVM;
}
}

View File

@ -20,6 +20,7 @@ package org.apache.cloudstack.vm;
import org.apache.cloudstack.api.command.admin.vm.ImportUnmanagedInstanceCmd;
import org.apache.cloudstack.api.command.admin.vm.ImportVmCmd;
import org.apache.cloudstack.api.command.admin.vm.ListUnmanagedInstancesCmd;
import org.apache.cloudstack.api.command.admin.vm.ListVmsForImportCmd;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.api.response.UnmanagedInstanceResponse;
import org.apache.cloudstack.api.response.UserVmResponse;
@ -37,5 +38,8 @@ public interface VmImportService {
ListResponse<UnmanagedInstanceResponse> listUnmanagedInstances(ListUnmanagedInstancesCmd cmd);
UserVmResponse importUnmanagedInstance(ImportUnmanagedInstanceCmd cmd);
UserVmResponse importVm(ImportVmCmd cmd);
ListResponse<UnmanagedInstanceResponse> listVmsForImport(ListVmsForImportCmd cmd);
}

View File

@ -71,6 +71,56 @@
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-agent</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-core</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-framework-cluster</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-framework-config</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-framework-db</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-framework-events</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-framework-jobs</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-framework-managed-context</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-framework-security</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-framework-spring-module</artifactId>
@ -81,6 +131,11 @@
<artifactId>cloud-framework-spring-lifecycle</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-adaptive</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-solidfire</artifactId>
@ -111,6 +166,16 @@
<artifactId>cloud-plugin-storage-volume-storpool</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-primera</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-flasharray</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-server</artifactId>
@ -597,6 +662,16 @@
<artifactId>cloud-plugin-storage-object-simulator</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-usage</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-utils</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<build>
<plugins>
@ -906,6 +981,7 @@
<exclude>mysql:mysql-connector-java</exclude>
<exclude>org.apache.cloudstack:cloud-plugin-storage-volume-storpool</exclude>
<exclude>org.apache.cloudstack:cloud-plugin-storage-volume-linstor</exclude>
<exclude>org.apache.cloudstack:cloud-usage</exclude>
<exclude>com.linbit.linstor.api:java-linstor</exclude>
</excludes>
</artifactSet>

View File

@ -0,0 +1,40 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.agent.api;
@LogLevel(LogLevel.Log4jLevel.Trace)
public class CheckVolumeAnswer extends Answer {
private long size;
CheckVolumeAnswer() {
}
public CheckVolumeAnswer(CheckVolumeCommand cmd, String details, long size) {
super(cmd, true, details);
this.size = size;
}
public long getSize() {
return size;
}
public String getString() {
return "CheckVolumeAnswer [size=" + size + "]";
}
}

View File

@ -0,0 +1,59 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.agent.api;
import com.cloud.agent.api.to.StorageFilerTO;
@LogLevel(LogLevel.Log4jLevel.Trace)
public class CheckVolumeCommand extends Command {
String srcFile;
StorageFilerTO storageFilerTO;
public String getSrcFile() {
return srcFile;
}
public void setSrcFile(String srcFile) {
this.srcFile = srcFile;
}
public CheckVolumeCommand() {
}
@Override
public boolean executeInSequence() {
return false;
}
public String getString() {
return "CheckVolumeCommand [srcFile=" + srcFile + "]";
}
public StorageFilerTO getStorageFilerTO() {
return storageFilerTO;
}
public void setStorageFilerTO(StorageFilerTO storageFilerTO) {
this.storageFilerTO = storageFilerTO;
}
}

View File

@ -0,0 +1,61 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.agent.api;
@LogLevel(LogLevel.Log4jLevel.Trace)
public class CopyRemoteVolumeAnswer extends Answer {
private String remoteIp;
private String filename;
private long size;
CopyRemoteVolumeAnswer() {
}
public CopyRemoteVolumeAnswer(CopyRemoteVolumeCommand cmd, String details, String filename, long size) {
super(cmd, true, details);
this.remoteIp = cmd.getRemoteIp();
this.filename = filename;
this.size = size;
}
public String getRemoteIp() {
return remoteIp;
}
public void setRemoteIp(String remoteIp) {
this.remoteIp = remoteIp;
}
public void setFilename(String filename) {
this.filename = filename;
}
public String getFilename() {
return filename;
}
public long getSize() {
return size;
}
public String getString() {
return "CopyRemoteVolumeAnswer [remoteIp=" + remoteIp + "]";
}
}

View File

@ -0,0 +1,101 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.agent.api;
import com.cloud.agent.api.to.StorageFilerTO;
@LogLevel(LogLevel.Log4jLevel.Trace)
public class CopyRemoteVolumeCommand extends Command {
String remoteIp;
String username;
String password;
String srcFile;
String tmpPath;
StorageFilerTO storageFilerTO;
public CopyRemoteVolumeCommand(String remoteIp, String username, String password) {
this.remoteIp = remoteIp;
this.username = username;
this.password = password;
}
public String getRemoteIp() {
return remoteIp;
}
public void setRemoteIp(String remoteIp) {
this.remoteIp = remoteIp;
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public String getSrcFile() {
return srcFile;
}
public void setSrcFile(String srcFile) {
this.srcFile = srcFile;
}
public CopyRemoteVolumeCommand() {
}
@Override
public boolean executeInSequence() {
return false;
}
public String getString() {
return "CopyRemoteVolumeCommand [remoteIp=" + remoteIp + "]";
}
public void setTempPath(String tmpPath) {
this.tmpPath = tmpPath;
}
public String getTmpPath() {
return tmpPath;
}
public StorageFilerTO getStorageFilerTO() {
return storageFilerTO;
}
public void setStorageFilerTO(StorageFilerTO storageFilerTO) {
this.storageFilerTO = storageFilerTO;
}
}

View File

@ -0,0 +1,75 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.agent.api;
import org.apache.cloudstack.vm.UnmanagedInstanceTO;
import java.util.HashMap;
import java.util.List;
@LogLevel(LogLevel.Log4jLevel.Trace)
public class GetRemoteVmsAnswer extends Answer {
private String remoteIp;
private HashMap<String, UnmanagedInstanceTO> unmanagedInstances;
List<String> vmNames;
GetRemoteVmsAnswer() {
}
public GetRemoteVmsAnswer(GetRemoteVmsCommand cmd, String details, HashMap<String, UnmanagedInstanceTO> unmanagedInstances) {
super(cmd, true, details);
this.remoteIp = cmd.getRemoteIp();
this.unmanagedInstances = unmanagedInstances;
}
public GetRemoteVmsAnswer(GetRemoteVmsCommand cmd, String details, List<String> vmNames) {
super(cmd, true, details);
this.remoteIp = cmd.getRemoteIp();
this.vmNames = vmNames;
}
public String getRemoteIp() {
return remoteIp;
}
public void setRemoteIp(String remoteIp) {
this.remoteIp = remoteIp;
}
public HashMap<String, UnmanagedInstanceTO> getUnmanagedInstances() {
return unmanagedInstances;
}
public void setUnmanagedInstances(HashMap<String, UnmanagedInstanceTO> unmanagedInstances) {
this.unmanagedInstances = unmanagedInstances;
}
public List<String> getVmNames() {
return vmNames;
}
public void setVmNames(List<String> vmNames) {
this.vmNames = vmNames;
}
public String getString() {
return "GetRemoteVmsAnswer [remoteIp=" + remoteIp + "]";
}
}

View File

@ -0,0 +1,70 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.agent.api;
@LogLevel(LogLevel.Log4jLevel.Trace)
public class GetRemoteVmsCommand extends Command {
String remoteIp;
String username;
String password;
public GetRemoteVmsCommand(String remoteIp, String username, String password) {
this.remoteIp = remoteIp;
this.username = username;
this.password = password;
}
public String getRemoteIp() {
return remoteIp;
}
public void setRemoteIp(String remoteIp) {
this.remoteIp = remoteIp;
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public GetRemoteVmsCommand() {
}
@Override
public boolean executeInSequence() {
return false;
}
public String getString() {
return "GetRemoteVmsCommand [remoteIp=" + remoteIp + "]";
}
}

View File

@ -30,6 +30,10 @@ public class GetUnmanagedInstancesAnswer extends Answer {
GetUnmanagedInstancesAnswer() {
}
public GetUnmanagedInstancesAnswer(GetUnmanagedInstancesCommand cmd, String details) {
super(cmd, false, details);
}
public GetUnmanagedInstancesAnswer(GetUnmanagedInstancesCommand cmd, String details, HashMap<String, UnmanagedInstanceTO> unmanagedInstances) {
super(cmd, true, details);
this.instanceName = cmd.getInstanceName();

View File

@ -40,6 +40,9 @@ public class MigrateCommand extends Command {
private boolean executeInSequence = false;
private List<MigrateDiskInfo> migrateDiskInfoList = new ArrayList<>();
private Map<String, DpdkTO> dpdkInterfaceMapping = new HashMap<>();
private int newVmCpuShares;
Map<String, Boolean> vlanToPersistenceMap = new HashMap<>();
public Map<String, DpdkTO> getDpdkInterfaceMapping() {
@ -138,6 +141,14 @@ public class MigrateCommand extends Command {
this.migrateDiskInfoList = migrateDiskInfoList;
}
public int getNewVmCpuShares() {
return newVmCpuShares;
}
public void setNewVmCpuShares(int newVmCpuShares) {
this.newVmCpuShares = newVmCpuShares;
}
public static class MigrateDiskInfo {
public enum DiskType {
FILE, BLOCK;

View File

@ -28,6 +28,8 @@ public class PrepareForMigrationAnswer extends Answer {
private Map<String, DpdkTO> dpdkInterfaceMapping = new HashMap<>();
private Integer newVmCpuShares = null;
protected PrepareForMigrationAnswer() {
}
@ -50,4 +52,12 @@ public class PrepareForMigrationAnswer extends Answer {
public Map<String, DpdkTO> getDpdkInterfaceMapping() {
return this.dpdkInterfaceMapping;
}
public Integer getNewVmCpuShares() {
return newVmCpuShares;
}
public void setNewVmCpuShares(Integer newVmCpuShares) {
this.newVmCpuShares = newVmCpuShares;
}
}

View File

@ -168,6 +168,9 @@ public interface VolumeOrchestrationService {
DiskProfile importVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template,
Account owner, Long deviceId, Long poolId, String path, String chainInfo);
DiskProfile updateImportedVolume(Type type, DiskOffering offering, VirtualMachine vm, VirtualMachineTemplate template,
Long deviceId, Long poolId, String path, String chainInfo, DiskProfile diskProfile);
/**
* Unmanage VM volumes
*/

View File

@ -54,6 +54,7 @@ import com.cloud.network.vpc.VpcVO;
import com.cloud.network.vpc.dao.VpcDao;
import com.cloud.user.dao.AccountDao;
import com.cloud.event.ActionEventUtils;
import com.google.gson.Gson;
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
@ -2854,23 +2855,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
}
boolean migrated = false;
Map<String, DpdkTO> dpdkInterfaceMapping = null;
Map<String, DpdkTO> dpdkInterfaceMapping = new HashMap<>();
try {
final boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows");
Map<String, Boolean> vlanToPersistenceMap = getVlanToPersistenceMapForVM(vm.getId());
final MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows, to, getExecuteInSequence(vm.getHypervisorType()));
if (MapUtils.isNotEmpty(vlanToPersistenceMap)) {
mc.setVlanToPersistenceMap(vlanToPersistenceMap);
}
boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value();
mc.setAutoConvergence(kvmAutoConvergence);
mc.setHostGuid(dest.getHost().getGuid());
dpdkInterfaceMapping = ((PrepareForMigrationAnswer) pfma).getDpdkInterfaceMapping();
if (MapUtils.isNotEmpty(dpdkInterfaceMapping)) {
mc.setDpdkInterfaceMapping(dpdkInterfaceMapping);
}
final MigrateCommand mc = buildMigrateCommand(vm, to, dest, pfma, dpdkInterfaceMapping);
try {
final Answer ma = _agentMgr.send(vm.getLastHostId(), mc);
@ -2942,6 +2929,43 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
}
}
/**
* Create and set parameters for the {@link MigrateCommand} used in the migration and scaling of VMs.
*/
protected MigrateCommand buildMigrateCommand(VMInstanceVO vmInstance, VirtualMachineTO virtualMachineTO, DeployDestination destination, Answer answer,
Map<String, DpdkTO> dpdkInterfaceMapping) {
final boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vmInstance.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows");
final MigrateCommand migrateCommand = new MigrateCommand(vmInstance.getInstanceName(), destination.getHost().getPrivateIpAddress(), isWindows, virtualMachineTO,
getExecuteInSequence(vmInstance.getHypervisorType()));
Map<String, Boolean> vlanToPersistenceMap = getVlanToPersistenceMapForVM(vmInstance.getId());
if (MapUtils.isNotEmpty(vlanToPersistenceMap)) {
s_logger.debug(String.format("Setting VLAN persistence to [%s] as part of migrate command for VM [%s].", new Gson().toJson(vlanToPersistenceMap), virtualMachineTO));
migrateCommand.setVlanToPersistenceMap(vlanToPersistenceMap);
}
migrateCommand.setAutoConvergence(StorageManager.KvmAutoConvergence.value());
migrateCommand.setHostGuid(destination.getHost().getGuid());
PrepareForMigrationAnswer prepareForMigrationAnswer = (PrepareForMigrationAnswer) answer;
Map<String, DpdkTO> answerDpdkInterfaceMapping = prepareForMigrationAnswer.getDpdkInterfaceMapping();
if (MapUtils.isNotEmpty(answerDpdkInterfaceMapping) && dpdkInterfaceMapping != null) {
s_logger.debug(String.format("Setting DPDK interface mapping to [%s] as part of migrate command for VM [%s].", new Gson().toJson(vlanToPersistenceMap),
virtualMachineTO));
dpdkInterfaceMapping.putAll(answerDpdkInterfaceMapping);
migrateCommand.setDpdkInterfaceMapping(dpdkInterfaceMapping);
}
Integer newVmCpuShares = prepareForMigrationAnswer.getNewVmCpuShares();
if (newVmCpuShares != null) {
s_logger.debug(String.format("Setting CPU shares to [%d] as part of migrate command for VM [%s].", newVmCpuShares, virtualMachineTO));
migrateCommand.setNewVmCpuShares(newVmCpuShares);
}
return migrateCommand;
}
private void updateVmPod(VMInstanceVO vm, long dstHostId) {
// update the VMs pod
HostVO host = _hostDao.findById(dstHostId);
@ -3021,6 +3045,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
* <ul>
* <li> If the current storage pool of the volume is not a managed storage, we do not need to validate anything here.
* <li> If the current storage pool is a managed storage and the target storage pool ID is different from the current one, we throw an exception.
* <li> If the current storage pool is a managed storage and explicitly declared its capable of migration to alternate storage pools
* </ul>
*/
protected void executeManagedStorageChecksWhenTargetStoragePoolProvided(StoragePoolVO currentPool, VolumeVO volume, StoragePoolVO targetPool) {
@ -3030,6 +3055,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
if (currentPool.getId() == targetPool.getId()) {
return;
}
Map<String, String> details = _storagePoolDao.getDetails(currentPool.getId());
if (details != null && Boolean.parseBoolean(details.get(Storage.Capability.ALLOW_MIGRATE_OTHER_POOLS.toString()))) {
return;
}
throw new CloudRuntimeException(String.format("Currently, a volume on managed storage can only be 'migrated' to itself " + "[volumeId=%s, currentStoragePoolId=%s, targetStoragePoolId=%s].",
volume.getUuid(), currentPool.getUuid(), targetPool.getUuid()));
}
@ -4459,16 +4489,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
boolean migrated = false;
try {
Map<String, Boolean> vlanToPersistenceMap = getVlanToPersistenceMapForVM(vm.getId());
final boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows");
final MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows, to, getExecuteInSequence(vm.getHypervisorType()));
if (MapUtils.isNotEmpty(vlanToPersistenceMap)) {
mc.setVlanToPersistenceMap(vlanToPersistenceMap);
}
boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value();
mc.setAutoConvergence(kvmAutoConvergence);
mc.setHostGuid(dest.getHost().getGuid());
final MigrateCommand mc = buildMigrateCommand(vm, to, dest, pfma, null);
try {
final Answer ma = _agentMgr.send(vm.getLastHostId(), mc);

View File

@ -2224,6 +2224,51 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
return toDiskProfile(vol, offering);
}
@Override
public DiskProfile updateImportedVolume(Type type, DiskOffering offering, VirtualMachine vm, VirtualMachineTemplate template,
Long deviceId, Long poolId, String path, String chainInfo, DiskProfile diskProfile) {
VolumeVO vol = _volsDao.findById(diskProfile.getVolumeId());
if (vm != null) {
vol.setInstanceId(vm.getId());
}
if (deviceId != null) {
vol.setDeviceId(deviceId);
} else if (type.equals(Type.ROOT)) {
vol.setDeviceId(0l);
} else {
vol.setDeviceId(1l);
}
if (template != null) {
if (ImageFormat.ISO.equals(template.getFormat())) {
vol.setIsoId(template.getId());
} else if (Storage.TemplateType.DATADISK.equals(template.getTemplateType())) {
vol.setTemplateId(template.getId());
}
if (type == Type.ROOT) {
vol.setTemplateId(template.getId());
}
}
// display flag matters only for the User vms
if (VirtualMachine.Type.User.equals(vm.getType())) {
UserVmVO userVm = _userVmDao.findById(vm.getId());
vol.setDisplayVolume(userVm.isDisplayVm());
}
vol.setFormat(getSupportedImageFormatForCluster(vm.getHypervisorType()));
vol.setPoolId(poolId);
vol.setPath(path);
vol.setChainInfo(chainInfo);
vol.setSize(diskProfile.getSize());
vol.setState(Volume.State.Ready);
vol.setAttached(new Date());
_volsDao.update(vol.getId(), vol);
return toDiskProfile(vol, offering);
}
@Override
public void unmanageVolumes(long vmId) {
if (s_logger.isDebugEnabled()) {

View File

@ -17,6 +17,7 @@
package com.cloud.network.dao;
import java.util.List;
import java.util.Map;
import com.cloud.utils.db.GenericDao;
@ -26,4 +27,6 @@ public interface NetworkDomainDao extends GenericDao<NetworkDomainVO, Long> {
NetworkDomainVO getDomainNetworkMapByNetworkId(long networkId);
List<Long> listNetworkIdsByDomain(long domainId);
Map<Long, List<String>> listDomainsOfSharedNetworksUsedByDomainPath(String domainPath);
}

View File

@ -16,10 +16,17 @@
// under the License.
package com.cloud.network.dao;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.cloud.utils.db.TransactionLegacy;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.utils.db.DB;
@ -31,9 +38,23 @@ import com.cloud.utils.db.SearchCriteria.Op;
@Component
@DB()
public class NetworkDomainDaoImpl extends GenericDaoBase<NetworkDomainVO, Long> implements NetworkDomainDao {
public static Logger logger = Logger.getLogger(NetworkDomainDaoImpl.class.getName());
final SearchBuilder<NetworkDomainVO> AllFieldsSearch;
final SearchBuilder<NetworkDomainVO> DomainsSearch;
private static final String LIST_DOMAINS_OF_SHARED_NETWORKS_USED_BY_DOMAIN_PATH = "SELECT shared_nw.domain_id, \n" +
"GROUP_CONCAT('VM:', vm.uuid, ' | NW:' , network.uuid) \n" +
"FROM cloud.domain_network_ref AS shared_nw\n" +
"INNER JOIN cloud.nics AS nic ON (nic.network_id = shared_nw.network_id AND nic.removed IS NULL)\n" +
"INNER JOIN cloud.vm_instance AS vm ON (vm.id = nic.instance_id)\n" +
"INNER JOIN cloud.domain AS domain ON (domain.id = vm.domain_id)\n" +
"INNER JOIN cloud.domain AS domain_sn ON (domain_sn.id = shared_nw.domain_id)\n" +
"INNER JOIN cloud.networks AS network ON (shared_nw.network_id = network.id)\n" +
"WHERE shared_nw.subdomain_access = 1\n" +
"AND domain.path LIKE ?\n" +
"AND domain_sn.path NOT LIKE ?\n" +
"GROUP BY shared_nw.network_id";
protected NetworkDomainDaoImpl() {
super();
@ -71,4 +92,37 @@ public class NetworkDomainDaoImpl extends GenericDaoBase<NetworkDomainVO, Long>
}
return networkIdsToReturn;
}
@Override
public Map<Long, List<String>> listDomainsOfSharedNetworksUsedByDomainPath(String domainPath) {
logger.debug(String.format("Retrieving the domains of the shared networks with subdomain access used by domain with path [%s].", domainPath));
TransactionLegacy txn = TransactionLegacy.currentTxn();
try (PreparedStatement pstmt = txn.prepareStatement(LIST_DOMAINS_OF_SHARED_NETWORKS_USED_BY_DOMAIN_PATH)) {
Map<Long, List<String>> domainsOfSharedNetworksUsedByDomainPath = new HashMap<>();
String domainSearch = domainPath.concat("%");
pstmt.setString(1, domainSearch);
pstmt.setString(2, domainSearch);
try (ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
Long domainId = rs.getLong(1);
List<String> vmUuidsAndNetworkUuids = Arrays.asList(rs.getString(2).split(","));
domainsOfSharedNetworksUsedByDomainPath.put(domainId, vmUuidsAndNetworkUuids);
}
}
return domainsOfSharedNetworksUsedByDomainPath;
} catch (SQLException e) {
logger.error(String.format("Failed to retrieve the domains of the shared networks with subdomain access used by domain with path [%s] due to [%s]. Returning an empty "
+ "list of domains.", domainPath, e.getMessage()));
logger.debug(String.format("Failed to retrieve the domains of the shared networks with subdomain access used by domain with path [%s]. Returning an empty "
+ "list of domains.", domainPath), e);
return new HashMap<>();
}
}
}

View File

@ -152,5 +152,7 @@ public interface VolumeDao extends GenericDao<VolumeVO, Long>, StateDao<Volume.S
List<VolumeVO> listByPoolIdAndPaths(long id, List<String> pathList);
VolumeVO findByPoolIdAndPath(long id, String path);
List<VolumeVO> listByIds(List<Long> ids);
}

View File

@ -71,6 +71,7 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
protected GenericSearchBuilder<VolumeVO, SumCount> primaryStorageSearch;
protected GenericSearchBuilder<VolumeVO, SumCount> primaryStorageSearch2;
protected GenericSearchBuilder<VolumeVO, SumCount> secondaryStorageSearch;
private final SearchBuilder<VolumeVO> poolAndPathSearch;
@Inject
ResourceTagDao _tagsDao;
@ -487,6 +488,11 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
volumeIdSearch.and("idIN", volumeIdSearch.entity().getId(), Op.IN);
volumeIdSearch.done();
poolAndPathSearch = createSearchBuilder();
poolAndPathSearch.and("poolId", poolAndPathSearch.entity().getPoolId(), Op.EQ);
poolAndPathSearch.and("path", poolAndPathSearch.entity().getPath(), Op.EQ);
poolAndPathSearch.done();
}
@Override
@ -802,6 +808,14 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
return listBy(sc);
}
@Override
public VolumeVO findByPoolIdAndPath(long id, String path) {
SearchCriteria<VolumeVO> sc = poolAndPathSearch.create();
sc.setParameters("poolId", id);
sc.setParameters("path", path);
return findOneBy(sc);
}
@Override
public List<VolumeVO> listByIds(List<Long> ids) {
if (CollectionUtils.isEmpty(ids)) {

View File

@ -21,6 +21,7 @@ import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
public class DatabaseAccessObject {
@ -85,8 +86,8 @@ public class DatabaseAccessObject {
return columnExists;
}
public String generateIndexName(String tableName, String columnName) {
return String.format("i_%s__%s", tableName, columnName);
public String generateIndexName(String tableName, String... columnName) {
return String.format("i_%s__%s", tableName, StringUtils.join(columnName, "__"));
}
public boolean indexExists(Connection conn, String tableName, String indexName) {
@ -101,8 +102,8 @@ public class DatabaseAccessObject {
return false;
}
public void createIndex(Connection conn, String tableName, String columnName, String indexName) {
String stmt = String.format("CREATE INDEX %s on %s (%s)", indexName, tableName, columnName);
public void createIndex(Connection conn, String tableName, String indexName, String... columnNames) {
String stmt = String.format("CREATE INDEX %s ON %s (%s)", indexName, tableName, StringUtils.join(columnNames, ", "));
s_logger.debug("Statement: " + stmt);
try (PreparedStatement pstmt = conn.prepareStatement(stmt)) {
pstmt.execute();

View File

@ -23,11 +23,11 @@ public class DbUpgradeUtils {
private static DatabaseAccessObject dao = new DatabaseAccessObject();
public static void addIndexIfNeeded(Connection conn, String tableName, String columnName) {
String indexName = dao.generateIndexName(tableName, columnName);
public static void addIndexIfNeeded(Connection conn, String tableName, String... columnNames) {
String indexName = dao.generateIndexName(tableName, columnNames);
if (!dao.indexExists(conn, tableName, indexName)) {
dao.createIndex(conn, tableName, columnName, indexName);
dao.createIndex(conn, tableName, indexName, columnNames);
}
}

View File

@ -76,6 +76,7 @@ public class Upgrade41810to41900 implements DbUpgrade, DbUpgradeSystemVmTemplate
public void performDataMigration(Connection conn) {
decryptConfigurationValuesFromAccountAndDomainScopesNotInSecureHiddenCategories(conn);
migrateBackupDates(conn);
addIndexes(conn);
}
@Override
@ -254,4 +255,11 @@ public class Upgrade41810to41900 implements DbUpgrade, DbUpgradeSystemVmTemplate
}
}
private void addIndexes(Connection conn) {
DbUpgradeUtils.addIndexIfNeeded(conn, "alert", "archived", "created");
DbUpgradeUtils.addIndexIfNeeded(conn, "alert", "type", "data_center_id", "pod_id");
DbUpgradeUtils.addIndexIfNeeded(conn, "event", "resource_type", "resource_id");
}
}

View File

@ -17,6 +17,7 @@
package org.apache.cloudstack.affinity.dao;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.affinity.AffinityGroupDomainMapVO;
@ -28,4 +29,6 @@ public interface AffinityGroupDomainMapDao extends GenericDao<AffinityGroupDomai
List<AffinityGroupDomainMapVO> listByDomain(Object... domainId);
Map<Long, List<String>> listDomainsOfAffinityGroupsUsedByDomainPath(String domainPath);
}

View File

@ -16,23 +16,46 @@
// under the License.
package org.apache.cloudstack.affinity.dao;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.annotation.PostConstruct;
import org.apache.cloudstack.affinity.AffinityGroupDomainMapVO;
import org.apache.log4j.Logger;
import com.cloud.network.dao.NetworkDomainDaoImpl;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.TransactionLegacy;
public class AffinityGroupDomainMapDaoImpl extends GenericDaoBase<AffinityGroupDomainMapVO, Long> implements AffinityGroupDomainMapDao {
public static Logger logger = Logger.getLogger(NetworkDomainDaoImpl.class.getName());
private SearchBuilder<AffinityGroupDomainMapVO> ListByAffinityGroup;
private SearchBuilder<AffinityGroupDomainMapVO> DomainsSearch;
private static final String LIST_DOMAINS_WITH_AFFINITY_GROUPS_WITH_SUBDOMAIN_ACCESS_USED_BY_DOMAIN_PATH = "SELECT affinity_group_domain_map.domain_id, \n" +
"GROUP_CONCAT('VM:', vm.uuid, ' | AG:' , affinity_group.uuid) \n" +
"FROM cloud.affinity_group_domain_map AS affinity_group_domain_map\n" +
"INNER JOIN cloud.affinity_group_vm_map AS affinity_group_vm_map ON (cloud.affinity_group_domain_map.affinity_group_id = affinity_group_vm_map.affinity_group_id)\n" +
"INNER JOIN cloud.vm_instance AS vm ON (vm.id = affinity_group_vm_map.instance_id)\n" +
"INNER JOIN cloud.domain AS domain ON (domain.id = vm.domain_id)\n" +
"INNER JOIN cloud.domain AS domain_sn ON (domain_sn.id = affinity_group_domain_map.domain_id)\n" +
"INNER JOIN cloud.affinity_group AS affinity_group ON (affinity_group.id = affinity_group_domain_map.affinity_group_id)\n" +
"WHERE affinity_group_domain_map.subdomain_access = 1\n" +
"AND domain.path LIKE ?\n" +
"AND domain_sn.path NOT LIKE ?\n" +
"GROUP BY affinity_group.id";
public AffinityGroupDomainMapDaoImpl() {
}
@ -62,4 +85,38 @@ public class AffinityGroupDomainMapDaoImpl extends GenericDaoBase<AffinityGroupD
return listBy(sc);
}
@Override
public Map<Long, List<String>> listDomainsOfAffinityGroupsUsedByDomainPath(String domainPath) {
logger.debug(String.format("Retrieving the domains of the affinity groups with subdomain access used by domain with path [%s].", domainPath));
TransactionLegacy txn = TransactionLegacy.currentTxn();
try (PreparedStatement pstmt = txn.prepareStatement(LIST_DOMAINS_WITH_AFFINITY_GROUPS_WITH_SUBDOMAIN_ACCESS_USED_BY_DOMAIN_PATH)) {
Map<Long, List<String>> domainsOfAffinityGroupsUsedByDomainPath = new HashMap<>();
String domainSearch = domainPath.concat("%");
pstmt.setString(1, domainSearch);
pstmt.setString(2, domainSearch);
try (ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
Long domainId = rs.getLong(1);
List<String> vmUuidsAndAffinityGroupUuids = Arrays.asList(rs.getString(2).split(","));
domainsOfAffinityGroupsUsedByDomainPath.put(domainId, vmUuidsAndAffinityGroupUuids);
}
}
return domainsOfAffinityGroupsUsedByDomainPath;
} catch (SQLException e) {
logger.error(String.format("Failed to retrieve the domains of the affinity groups with subdomain access used by domain with path [%s] due to [%s]. Returning an " +
"empty list of domains.", domainPath, e.getMessage()));
logger.debug(String.format("Failed to retrieve the domains of the affinity groups with subdomain access used by domain with path [%s]. Returning an empty "
+ "list of domains.", domainPath), e);
return new HashMap<>();
}
}
}

View File

@ -93,8 +93,8 @@ public class DatabaseAccessObjectTest {
@Test
public void generateIndexNameTest() {
String indexName = dao.generateIndexName("mytable","mycolumn");
Assert.assertEquals( "i_mytable__mycolumn", indexName);
String indexName = dao.generateIndexName("mytable","mycolumn1", "mycolumn2");
Assert.assertEquals( "i_mytable__mycolumn1__mycolumn2", indexName);
}
@Test
@ -136,10 +136,11 @@ public class DatabaseAccessObjectTest {
Connection conn = connectionMock;
String tableName = "mytable";
String columnName = "mycolumn";
String columnName1 = "mycolumn1";
String columnName2 = "mycolumn2";
String indexName = "myindex";
dao.createIndex(conn, tableName, columnName, indexName);
dao.createIndex(conn, tableName, indexName, columnName1, columnName2);
verify(connectionMock, times(1)).prepareStatement(anyString());
verify(preparedStatementMock, times(1)).execute();
verify(preparedStatementMock, times(1)).close();

View File

@ -193,7 +193,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
destData.getType() == DataObjectType.TEMPLATE)) {
// volume transfer from primary to secondary. Volume transfer between primary pools are already handled by copyVolumeBetweenPools
// Delete cache in order to certainly transfer a latest image.
s_logger.debug("Delete " + cacheType + " cache(id: " + cacheId +
if (s_logger.isDebugEnabled()) s_logger.debug("Delete " + cacheType + " cache(id: " + cacheId +
", uuid: " + cacheUuid + ")");
cacheMgr.deleteCacheObject(srcForCopy);
} else {
@ -205,7 +205,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
", uuid: " + cacheUuid + ")");
cacheMgr.deleteCacheObject(srcForCopy);
} else {
s_logger.debug("Decrease reference count of " + cacheType +
if (s_logger.isDebugEnabled()) s_logger.debug("Decrease reference count of " + cacheType +
" cache(id: " + cacheId + ", uuid: " + cacheUuid + ")");
cacheMgr.releaseCacheObject(srcForCopy);
}
@ -213,7 +213,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
}
return answer;
} catch (Exception e) {
s_logger.debug("copy object failed: ", e);
if (s_logger.isDebugEnabled()) s_logger.debug("copy object failed: ", e);
if (cacheData != null) {
cacheMgr.deleteCacheObject(cacheData);
}
@ -331,7 +331,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
}
return answer;
} catch (Exception e) {
s_logger.debug("Failed to send to storage pool", e);
if (s_logger.isDebugEnabled()) s_logger.debug("Failed to send to storage pool", e);
throw new CloudRuntimeException("Failed to send to storage pool", e);
}
}
@ -388,7 +388,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
if (answer == null || !answer.getResult()) {
if (answer != null) {
s_logger.debug("copy to image store failed: " + answer.getDetails());
if (s_logger.isDebugEnabled()) s_logger.debug("copy to image store failed: " + answer.getDetails());
}
objOnImageStore.processEvent(Event.OperationFailed);
imageStore.delete(objOnImageStore);
@ -411,7 +411,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
if (answer == null || !answer.getResult()) {
if (answer != null) {
s_logger.debug("copy to primary store failed: " + answer.getDetails());
if (s_logger.isDebugEnabled()) s_logger.debug("copy to primary store failed: " + answer.getDetails());
}
objOnImageStore.processEvent(Event.OperationFailed);
imageStore.delete(objOnImageStore);
@ -471,13 +471,17 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
s_logger.error(errMsg);
answer = new Answer(command, false, errMsg);
} else {
if (s_logger.isDebugEnabled()) s_logger.debug("Sending MIGRATE_COPY request to node " + ep);
answer = ep.sendMessage(command);
if (s_logger.isDebugEnabled()) s_logger.debug("Received MIGRATE_COPY response from node with answer: " + answer);
}
if (answer == null || !answer.getResult()) {
throw new CloudRuntimeException("Failed to migrate volume " + volume + " to storage pool " + destPool);
} else {
// Update the volume details after migration.
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE_COPY updating volume");
VolumeVO volumeVo = volDao.findById(volume.getId());
Long oldPoolId = volume.getPoolId();
volumeVo.setPath(((MigrateVolumeAnswer)answer).getVolumePath());
@ -496,6 +500,8 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
}
volumeVo.setFolder(folder);
volDao.update(volume.getId(), volumeVo);
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE_COPY update volume data complete");
}
return answer;
@ -507,7 +513,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
Answer answer = null;
String errMsg = null;
try {
s_logger.debug("copyAsync inspecting src type " + srcData.getType().toString() + " copyAsync inspecting dest type " + destData.getType().toString());
if (s_logger.isDebugEnabled()) s_logger.debug("copyAsync inspecting src type " + srcData.getType().toString() + " copyAsync inspecting dest type " + destData.getType().toString());
if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.VOLUME) {
answer = copyVolumeFromSnapshot(srcData, destData);
} else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.TEMPLATE) {
@ -516,11 +522,16 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
answer = cloneVolume(srcData, destData);
} else if (destData.getType() == DataObjectType.VOLUME && srcData.getType() == DataObjectType.VOLUME &&
srcData.getDataStore().getRole() == DataStoreRole.Primary && destData.getDataStore().getRole() == DataStoreRole.Primary) {
if (s_logger.isDebugEnabled()) s_logger.debug("About to MIGRATE copy between datasources");
if (srcData.getId() == destData.getId()) {
// The volume has to be migrated across storage pools.
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using migrateVolumeToPool STARTING");
answer = migrateVolumeToPool(srcData, destData);
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using migrateVolumeToPool DONE: " + answer.getResult());
} else {
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using copyVolumeBetweenPools STARTING");
answer = copyVolumeBetweenPools(srcData, destData);
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using copyVolumeBetweenPools DONE: " + answer.getResult());
}
} else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.SNAPSHOT) {
answer = copySnapshot(srcData, destData);
@ -532,7 +543,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
errMsg = answer.getDetails();
}
} catch (Exception e) {
s_logger.debug("copy failed", e);
if (s_logger.isDebugEnabled()) s_logger.debug("copy failed", e);
errMsg = e.toString();
}
CopyCommandResult result = new CopyCommandResult(null, answer);
@ -627,7 +638,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
}
return answer;
} catch (Exception e) {
s_logger.debug("copy snasphot failed: ", e);
if (s_logger.isDebugEnabled()) s_logger.debug("copy snasphot failed: ", e);
if (cacheData != null) {
cacheMgr.deleteCacheObject(cacheData);
}

View File

@ -31,6 +31,7 @@ import java.util.concurrent.TimeUnit;
import javax.inject.Inject;
import com.cloud.agent.api.PrepareForMigrationAnswer;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
@ -106,6 +107,7 @@ import com.cloud.storage.Snapshot;
import com.cloud.storage.SnapshotVO;
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.ProvisioningType;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
@ -186,6 +188,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
private EndPointSelector selector;
@Inject
VMTemplatePoolDao templatePoolDao;
@Inject
private VolumeDataFactory _volFactory;
@Override
public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
@ -400,15 +404,15 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
} else if (!isVolumeOnManagedStorage(destVolumeInfo)) {
handleVolumeMigrationFromManagedStorageToNonManagedStorage(srcVolumeInfo, destVolumeInfo, callback);
} else {
String errMsg = "The source volume to migrate and the destination volume are both on managed storage. " +
"Migration in this case is not yet supported.";
handleError(errMsg, callback);
handleVolumeMigrationFromManagedStorageToManagedStorage(srcVolumeInfo, destVolumeInfo, callback);
}
} else if (!isVolumeOnManagedStorage(destVolumeInfo)) {
String errMsg = "The 'StorageSystemDataMotionStrategy' does not support this migration use case.";
if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) {
String errMsg = String.format("Currently migrating volumes between managed storage providers is not supported on %s hypervisor", srcVolumeInfo.getHypervisorType().toString());
handleError(errMsg, callback);
} else {
handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback);
}
} else {
handleVolumeMigrationFromNonManagedStorageToManagedStorage(srcVolumeInfo, destVolumeInfo, callback);
}
@ -453,7 +457,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
String volumePath = null;
try {
if (!ImageFormat.QCOW2.equals(srcVolumeInfo.getFormat())) {
if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) {
throw new CloudRuntimeException("Currently, only the KVM hypervisor type is supported for the migration of a volume " +
"from managed storage to non-managed storage.");
}
@ -485,7 +489,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeCopyFromManagedStorageToSecondaryStorage': " +
ex.getMessage();
throw new CloudRuntimeException(errMsg);
throw new CloudRuntimeException(errMsg, ex);
}
finally {
CopyCmdAnswer copyCmdAnswer;
@ -512,12 +516,22 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
}
}
private void handleVolumeMigrationFromManagedStorageToManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo,
AsyncCompletionCallback<CopyCommandResult> callback) {
if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) {
String errMsg = String.format("Currently migrating volumes between managed storage providers is not supported on %s hypervisor", srcVolumeInfo.getHypervisorType().toString());
handleError(errMsg, callback);
} else {
handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback);
}
}
private void handleVolumeMigrationFromManagedStorageToNonManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo,
AsyncCompletionCallback<CopyCommandResult> callback) {
String errMsg = null;
try {
if (!ImageFormat.QCOW2.equals(srcVolumeInfo.getFormat())) {
if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) {
throw new CloudRuntimeException("Currently, only the KVM hypervisor type is supported for the migration of a volume " +
"from managed storage to non-managed storage.");
}
@ -525,10 +539,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
HypervisorType hypervisorType = HypervisorType.KVM;
VirtualMachine vm = srcVolumeInfo.getAttachedVM();
if (vm != null && vm.getState() != VirtualMachine.State.Stopped) {
throw new CloudRuntimeException("Currently, if a volume to migrate from managed storage to non-managed storage is attached to " +
"a VM, the VM must be in the Stopped state.");
}
checkAvailableForMigration(vm);
long destStoragePoolId = destVolumeInfo.getPoolId();
StoragePoolVO destStoragePoolVO = _storagePoolDao.findById(destStoragePoolId);
@ -553,7 +564,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromManagedStorageToNonManagedStorage': " +
ex.getMessage();
throw new CloudRuntimeException(errMsg);
throw new CloudRuntimeException(errMsg, ex);
}
finally {
CopyCmdAnswer copyCmdAnswer;
@ -579,9 +590,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
private void verifyFormatWithPoolType(ImageFormat imageFormat, StoragePoolType poolType) {
if (imageFormat != ImageFormat.VHD && imageFormat != ImageFormat.OVA && imageFormat != ImageFormat.QCOW2 &&
!(imageFormat == ImageFormat.RAW && StoragePoolType.PowerFlex == poolType)) {
throw new CloudRuntimeException("Only the following image types are currently supported: " +
ImageFormat.VHD.toString() + ", " + ImageFormat.OVA.toString() + ", " + ImageFormat.QCOW2.toString() + ", and " + ImageFormat.RAW.toString() + "(for PowerFlex)");
!(imageFormat == ImageFormat.RAW && (StoragePoolType.PowerFlex == poolType ||
StoragePoolType.FiberChannel == poolType))) {
throw new CloudRuntimeException(String.format("Only the following image types are currently supported: %s, %s, %s, %s (for PowerFlex and FiberChannel)",
ImageFormat.VHD.toString(), ImageFormat.OVA.toString(), ImageFormat.QCOW2.toString(), ImageFormat.RAW.toString()));
}
}
@ -685,14 +697,14 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
handleVolumeMigrationForXenServer(srcVolumeInfo, destVolumeInfo);
}
else {
handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo);
handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback);
}
}
catch (Exception ex) {
errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromNonManagedStorageToManagedStorage': " +
ex.getMessage();
throw new CloudRuntimeException(errMsg);
throw new CloudRuntimeException(errMsg, ex);
}
finally {
CopyCmdAnswer copyCmdAnswer;
@ -826,24 +838,73 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
_volumeDao.update(srcVolumeInfo.getId(), volumeVO);
}
private void handleVolumeMigrationForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) {
private void handleVolumeMigrationForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, AsyncCompletionCallback<CopyCommandResult> callback) {
VirtualMachine vm = srcVolumeInfo.getAttachedVM();
if (vm != null && vm.getState() != VirtualMachine.State.Stopped) {
throw new CloudRuntimeException("Currently, if a volume to migrate from non-managed storage to managed storage on KVM is attached to " +
"a VM, the VM must be in the Stopped state.");
checkAvailableForMigration(vm);
String errMsg = null;
try {
destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null);
VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId());
updatePathFromScsiName(volumeVO);
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
HostVO hostVO = getHostOnWhichToExecuteMigrationCommand(srcVolumeInfo, destVolumeInfo);
// migrate the volume via the hypervisor
String path = migrateVolumeForKVM(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage");
updateVolumePath(destVolumeInfo.getId(), path);
volumeVO = _volumeDao.findById(destVolumeInfo.getId());
// only set this if it was not set. default to QCOW2 for KVM
if (volumeVO.getFormat() == null) {
volumeVO.setFormat(ImageFormat.QCOW2);
_volumeDao.update(volumeVO.getId(), volumeVO);
}
} catch (Exception ex) {
errMsg = "Primary storage migration failed due to an unexpected error: " +
ex.getMessage();
if (ex instanceof CloudRuntimeException) {
throw ex;
} else {
throw new CloudRuntimeException(errMsg, ex);
}
} finally {
CopyCmdAnswer copyCmdAnswer;
if (errMsg != null) {
copyCmdAnswer = new CopyCmdAnswer(errMsg);
}
else {
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
DataTO dataTO = destVolumeInfo.getTO();
copyCmdAnswer = new CopyCmdAnswer(dataTO);
}
destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null);
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
result.setResult(errMsg);
callback.complete(result);
}
}
VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId());
private void checkAvailableForMigration(VirtualMachine vm) {
if (vm != null && (vm.getState() != VirtualMachine.State.Stopped && vm.getState() != VirtualMachine.State.Migrating)) {
throw new CloudRuntimeException("Currently, if a volume to migrate from non-managed storage to managed storage on KVM is attached to " +
"a VM, the VM must be in the Stopped or Migrating state.");
}
}
/**
* Only update the path from the iscsiName if the iscsiName is set. Otherwise take no action to avoid nullifying the path
* with a previously set path value.
*/
private void updatePathFromScsiName(VolumeVO volumeVO) {
if (volumeVO.get_iScsiName() != null) {
volumeVO.setPath(volumeVO.get_iScsiName());
_volumeDao.update(volumeVO.getId(), volumeVO);
}
}
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
private HostVO getHostOnWhichToExecuteMigrationCommand(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) {
long srcStoragePoolId = srcVolumeInfo.getPoolId();
StoragePoolVO srcStoragePoolVO = _storagePoolDao.findById(srcStoragePoolId);
@ -856,14 +917,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
hostVO = getHost(destVolumeInfo.getDataCenterId(), HypervisorType.KVM, false);
}
// migrate the volume via the hypervisor
migrateVolumeForKVM(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage");
volumeVO = _volumeDao.findById(destVolumeInfo.getId());
volumeVO.setFormat(ImageFormat.QCOW2);
_volumeDao.update(volumeVO.getId(), volumeVO);
return hostVO;
}
/**
@ -1075,7 +1129,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
catch (Exception ex) {
errMsg = ex.getMessage();
throw new CloudRuntimeException(errMsg);
throw new CloudRuntimeException(errMsg, ex);
}
finally {
if (usingBackendSnapshot) {
@ -1293,7 +1347,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
catch (Exception ex) {
errMsg = "Copy operation failed in 'StorageSystemDataMotionStrategy.handleCreateManagedVolumeFromNonManagedSnapshot': " + ex.getMessage();
throw new CloudRuntimeException(errMsg);
throw new CloudRuntimeException(errMsg, ex);
}
finally {
handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.NO_MIGRATION);
@ -1674,6 +1728,42 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
return copyCmdAnswer;
}
/**
* Use normal volume semantics (create a volume known to cloudstack, ask the storage driver to create it as a copy of the snapshot)
* @param volumeVO
* @param snapshotInfo
*/
public void prepTempVolumeForCopyFromSnapshot(SnapshotInfo snapshotInfo) {
VolumeVO volumeVO = null;
try {
volumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP",
snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, "");
volumeVO.setPoolId(snapshotInfo.getDataStore().getId());
_volumeDao.persist(volumeVO);
VolumeInfo tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId());
if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) {
snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null);
// refresh volume info as data could have changed
tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId());
// save the "temp" volume info into the snapshot details (we need this to clean up at the end)
_snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID", tempVolumeInfo.getUuid(), true);
_snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyPath", tempVolumeInfo.getPath(), true);
// NOTE: for this to work, the Driver must return a custom SnapshotObjectTO object from getTO()
// whenever the TemporaryVolumeCopyPath is set.
} else {
throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so");
}
} catch (Throwable e) {
// cleanup temporary volume
if (volumeVO != null) {
_volumeDao.remove(volumeVO.getId());
}
throw e;
}
}
/**
* If the underlying storage system is making use of read-only snapshots, this gives the storage system the opportunity to
* create a volume from the snapshot so that we can copy the VHD file that should be inside of the snapshot to secondary storage.
@ -1685,8 +1775,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
* resign the SR and the VDI that should be inside of the snapshot before copying the VHD file to secondary storage.
*/
private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo) {
SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create");
if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) {
prepTempVolumeForCopyFromSnapshot(snapshotInfo);
return;
}
SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create");
try {
snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null);
}
@ -1701,6 +1796,24 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
* invocation of createVolumeFromSnapshot(SnapshotInfo).
*/
private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) {
VolumeVO volumeVO = null;
// cleanup any temporary volume previously created for copy from a snapshot
if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) {
SnapshotDetailsVO tempUuid = null;
tempUuid = _snapshotDetailsDao.findDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID");
if (tempUuid == null || tempUuid.getValue() == null) {
return;
}
volumeVO = _volumeDao.findByUuid(tempUuid.getValue());
if (volumeVO != null) {
_volumeDao.remove(volumeVO.getId());
}
_snapshotDetailsDao.remove(tempUuid.getId());
_snapshotDetailsDao.removeDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID");
return;
}
SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete");
try {
@ -1884,9 +1997,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
}
PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO);
Answer pfma;
try {
Answer pfma = agentManager.send(destHost.getId(), pfmc);
pfma = agentManager.send(destHost.getId(), pfmc);
if (pfma == null || !pfma.getResult()) {
String details = pfma != null ? pfma.getDetails() : "null answer returned";
@ -1894,8 +2008,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
throw new AgentUnavailableException(msg, destHost.getId());
}
}
catch (final OperationTimedoutException e) {
} catch (final OperationTimedoutException e) {
throw new AgentUnavailableException("Operation timed out", destHost.getId());
}
@ -1911,6 +2024,12 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
migrateCommand.setMigrateStorageManaged(managedStorageDestination);
migrateCommand.setMigrateNonSharedInc(migrateNonSharedInc);
Integer newVmCpuShares = ((PrepareForMigrationAnswer) pfma).getNewVmCpuShares();
if (newVmCpuShares != null) {
LOGGER.debug(String.format("Setting CPU shares to [%d] as part of migrate VM with volumes command for VM [%s].", newVmCpuShares, vmTO));
migrateCommand.setNewVmCpuShares(newVmCpuShares);
}
boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value();
migrateCommand.setAutoConvergence(kvmAutoConvergence);
@ -2363,7 +2482,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
try {
StoragePoolVO storagePoolVO = _storagePoolDao.findById(volumeInfo.getPoolId());
if (!ImageFormat.QCOW2.equals(volumeInfo.getFormat()) && !(ImageFormat.RAW.equals(volumeInfo.getFormat()) && StoragePoolType.PowerFlex == storagePoolVO.getPoolType())) {
if (!ImageFormat.QCOW2.equals(volumeInfo.getFormat()) &&
!(ImageFormat.RAW.equals(volumeInfo.getFormat()) && (
StoragePoolType.PowerFlex == storagePoolVO.getPoolType() ||
StoragePoolType.FiberChannel == storagePoolVO.getPoolType()))) {
throw new CloudRuntimeException("When using managed storage, you can only create a template from a volume on KVM currently.");
}
@ -2506,7 +2628,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
long snapshotId = snapshotInfo.getId();
if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex) {
// if the snapshot required a temporary volume be created check if the UUID is set so we can
// retrieve the temporary volume's path to use during remote copy
List<SnapshotDetailsVO> storedDetails = _snapshotDetailsDao.findDetails(snapshotInfo.getId(), "TemporaryVolumeCopyPath");
if (storedDetails != null && storedDetails.size() > 0) {
String value = storedDetails.get(0).getValue();
snapshotDetails.put(DiskTO.PATH, value);
} else if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex || storagePoolVO.getPoolType() == StoragePoolType.FiberChannel) {
snapshotDetails.put(DiskTO.IQN, snapshotInfo.getPath());
} else {
snapshotDetails.put(DiskTO.IQN, getSnapshotProperty(snapshotId, DiskTO.IQN));
@ -2718,8 +2846,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
}
private String migrateVolumeForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, HostVO hostVO, String errMsg) {
boolean srcVolumeDetached = srcVolumeInfo.getAttachedVM() == null;
try {
Map<String, String> srcDetails = getVolumeDetails(srcVolumeInfo);
Map<String, String> destDetails = getVolumeDetails(destVolumeInfo);
@ -2727,16 +2853,11 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
MigrateVolumeCommand migrateVolumeCommand = new MigrateVolumeCommand(srcVolumeInfo.getTO(), destVolumeInfo.getTO(),
srcDetails, destDetails, StorageManager.KvmStorageOfflineMigrationWait.value());
if (srcVolumeDetached) {
_volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore());
}
handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION);
_volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
MigrateVolumeAnswer migrateVolumeAnswer = (MigrateVolumeAnswer)agentManager.send(hostVO.getId(), migrateVolumeCommand);
if (migrateVolumeAnswer == null || !migrateVolumeAnswer.getResult()) {
if (migrateVolumeAnswer != null && StringUtils.isNotEmpty(migrateVolumeAnswer.getDetails())) {
throw new CloudRuntimeException(migrateVolumeAnswer.getDetails());
@ -2745,42 +2866,22 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
throw new CloudRuntimeException(errMsg);
}
}
if (srcVolumeDetached) {
_volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
}
try {
_volumeService.revokeAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore());
}
catch (Exception e) {
// This volume should be deleted soon, so just log a warning here.
LOGGER.warn(e.getMessage(), e);
}
return migrateVolumeAnswer.getVolumePath();
}
catch (Exception ex) {
} catch (CloudRuntimeException ex) {
throw ex;
} catch (Exception ex) {
throw new CloudRuntimeException("Unexpected error during volume migration: " + ex.getMessage(), ex);
} finally {
try {
_volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
}
catch (Exception e) {
// This volume should be deleted soon, so just log a warning here.
LOGGER.warn(e.getMessage(), e);
}
if (srcVolumeDetached) {
_volumeService.revokeAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore());
}
String msg = "Failed to perform volume migration : ";
LOGGER.warn(msg, ex);
throw new CloudRuntimeException(msg + ex.getMessage(), ex);
}
finally {
_volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.NO_MIGRATION);
} catch (Throwable e) {
LOGGER.warn("During cleanup post-migration and exception occured: " + e);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Exception during post-migration cleanup.", e);
}
}
}
}

View File

@ -882,9 +882,7 @@ public class VolumeServiceImpl implements VolumeService {
*/
private TemplateInfo createManagedTemplateVolume(TemplateInfo srcTemplateInfo, PrimaryDataStore destPrimaryDataStore) {
// create a template volume on primary storage
AsyncCallFuture<VolumeApiResult> createTemplateFuture = new AsyncCallFuture<>();
TemplateInfo templateOnPrimary = (TemplateInfo)destPrimaryDataStore.create(srcTemplateInfo, srcTemplateInfo.getDeployAsIsConfiguration());
VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), srcTemplateInfo.getDeployAsIsConfiguration());
if (templatePoolRef == null) {
@ -897,7 +895,6 @@ public class VolumeServiceImpl implements VolumeService {
// At this point, we have an entry in the DB that points to our cached template.
// We need to lock it as there may be other VMs that may get started using the same template.
// We want to avoid having to create multiple cache copies of the same template.
int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600);
long templatePoolRefId = templatePoolRef.getId();
@ -909,28 +906,27 @@ public class VolumeServiceImpl implements VolumeService {
try {
// create a cache volume on the back-end
templateOnPrimary.processEvent(Event.CreateOnlyRequested);
CreateAsyncCompleteCallback callback = new CreateAsyncCompleteCallback();
CreateVolumeContext<CreateCmdResult> createContext = new CreateVolumeContext<>(null, templateOnPrimary, createTemplateFuture);
AsyncCallbackDispatcher<VolumeServiceImpl, CreateCmdResult> createCaller = AsyncCallbackDispatcher.create(this);
createCaller.setCallback(createCaller.getTarget().createManagedTemplateImageCallback(null, null)).setContext(createContext);
destPrimaryDataStore.getDriver().createAsync(destPrimaryDataStore, templateOnPrimary, createCaller);
VolumeApiResult result = createTemplateFuture.get();
if (result.isFailed()) {
String errMesg = result.getResult();
destPrimaryDataStore.getDriver().createAsync(destPrimaryDataStore, templateOnPrimary, callback);
// validate we got a good result back
if (callback.result == null || callback.result.isFailed()) {
String errMesg;
if (callback.result == null) {
errMesg = "Unknown/unable to determine result";
} else {
errMesg = callback.result.getResult();
}
templateOnPrimary.processEvent(Event.OperationFailed);
throw new CloudRuntimeException("Unable to create template " + templateOnPrimary.getId() + " on primary storage " + destPrimaryDataStore.getId() + ":" + errMesg);
}
templateOnPrimary.processEvent(Event.OperationSuccessed);
} catch (Throwable e) {
s_logger.debug("Failed to create template volume on storage", e);
templateOnPrimary.processEvent(Event.OperationFailed);
throw new CloudRuntimeException(e.getMessage());
} finally {
_tmpltPoolDao.releaseFromLockTable(templatePoolRefId);
@ -939,6 +935,17 @@ public class VolumeServiceImpl implements VolumeService {
return templateOnPrimary;
}
private static class CreateAsyncCompleteCallback implements AsyncCompletionCallback<CreateCmdResult> {
public CreateCmdResult result;
@Override
public void complete(CreateCmdResult result) {
this.result = result;
}
}
/**
* This function copies a template from secondary storage to a template volume
* created on managed storage. This template volume will be used as a cache.
@ -1464,6 +1471,16 @@ public class VolumeServiceImpl implements VolumeService {
if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) {
copyTemplateToManagedTemplateVolume(srcTemplateInfo, templateOnPrimary, templatePoolRef, destPrimaryDataStore, destHost);
}
} catch (Exception e) {
if (templateOnPrimary != null) {
templateOnPrimary.processEvent(Event.OperationFailed);
}
VolumeApiResult result = new VolumeApiResult(volumeInfo);
result.setResult(e.getLocalizedMessage());
result.setSuccess(false);
future.complete(result);
s_logger.warn("Failed to create template on primary storage", e);
return future;
} finally {
if (lock != null) {
lock.unlock();
@ -1478,8 +1495,8 @@ public class VolumeServiceImpl implements VolumeService {
createManagedVolumeCloneTemplateAsync(volumeInfo, templateOnPrimary, destPrimaryDataStore, future);
} else {
// We have a template on PowerFlex primary storage. Create new volume and copy to it.
s_logger.debug("Copying the template to the volume on primary storage");
createManagedVolumeCopyManagedTemplateAsync(volumeInfo, destPrimaryDataStore, templateOnPrimary, destHost, future);
createManagedVolumeCopyManagedTemplateAsyncWithLock(volumeInfo, destPrimaryDataStore, templateOnPrimary,
destHost, future, destDataStoreId, srcTemplateInfo.getId());
}
} else {
s_logger.debug("Primary storage does not support cloning or no support for UUID resigning on the host side; copying the template normally");
@ -1490,6 +1507,32 @@ public class VolumeServiceImpl implements VolumeService {
return future;
}
private void createManagedVolumeCopyManagedTemplateAsyncWithLock(VolumeInfo volumeInfo, PrimaryDataStore destPrimaryDataStore, TemplateInfo templateOnPrimary,
Host destHost, AsyncCallFuture<VolumeApiResult> future, long destDataStoreId, long srcTemplateId) {
GlobalLock lock = null;
try {
String tmplIdManagedPoolIdDestinationHostLockString = "tmplId:" + srcTemplateId + "managedPoolId:" + destDataStoreId + "destinationHostId:" + destHost.getId();
lock = GlobalLock.getInternLock(tmplIdManagedPoolIdDestinationHostLockString);
if (lock == null) {
throw new CloudRuntimeException("Unable to create volume from template, couldn't get global lock on " + tmplIdManagedPoolIdDestinationHostLockString);
}
int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600);
if (!lock.lock(storagePoolMaxWaitSeconds)) {
s_logger.debug("Unable to create volume from template, couldn't lock on " + tmplIdManagedPoolIdDestinationHostLockString);
throw new CloudRuntimeException("Unable to create volume from template, couldn't lock on " + tmplIdManagedPoolIdDestinationHostLockString);
}
s_logger.debug("Copying the template to the volume on primary storage");
createManagedVolumeCopyManagedTemplateAsync(volumeInfo, destPrimaryDataStore, templateOnPrimary, destHost, future);
} finally {
if (lock != null) {
lock.unlock();
lock.releaseRef();
}
}
}
private boolean computeSupportsVolumeClone(long zoneId, HypervisorType hypervisorType) {
if (HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType)) {
return true;

View File

@ -61,8 +61,10 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP
@Override
public boolean isEnabled() {
if (!roleService.isEnabled()) {
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("RoleService is disabled. We will not use ProjectRoleBasedApiAccessChecker.");
}
}
return roleService.isEnabled();
}
@ -119,7 +121,9 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP
Account userAccount = accountService.getAccount(user.getAccountId());
if (accountService.isRootAdmin(userAccount.getId()) || accountService.isDomainAdmin(userAccount.getAccountId())) {
LOGGER.info(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", userAccount.getAccountName()));
if (LOGGER.isTraceEnabled()) {
LOGGER.trace(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", userAccount.getAccountName()));
}
return true;
}

View File

@ -73,6 +73,7 @@ import org.apache.commons.io.FileUtils;
import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.lang.BooleanUtils;
import org.apache.commons.lang.math.NumberUtils;
import org.apache.commons.lang3.ObjectUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
import org.apache.xerces.impl.xpath.regex.Match;
@ -485,6 +486,14 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
*/
private static final String COMMAND_SET_MEM_BALLOON_STATS_PERIOD = "virsh dommemstat %s --period %s --live";
private static int hostCpuMaxCapacity = 0;
private static final int CGROUP_V2_UPPER_LIMIT = 10000;
private static final String COMMAND_GET_CGROUP_HOST_VERSION = "stat -fc %T /sys/fs/cgroup/";
public static final String CGROUP_V2 = "cgroup2fs";
protected long getHypervisorLibvirtVersion() {
return hypervisorLibvirtVersion;
}
@ -565,6 +574,18 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
return new ExecutionResult(true, null);
}
/**
* @return the host CPU max capacity according to the method {@link LibvirtComputingResource#calculateHostCpuMaxCapacity(int, Long)}; if the host utilizes cgroup v1, this
* value is 0.
*/
public int getHostCpuMaxCapacity() {
return hostCpuMaxCapacity;
}
public void setHostCpuMaxCapacity(int hostCpuMaxCapacity) {
LibvirtComputingResource.hostCpuMaxCapacity = hostCpuMaxCapacity;
}
public LibvirtKvmAgentHook getTransformer() throws IOException {
return new LibvirtKvmAgentHook(agentHooksBasedir, agentHooksLibvirtXmlScript, agentHooksLibvirtXmlMethod);
}
@ -1044,7 +1065,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
}
enableSSLForKvmAgent(params);
enableSSLForKvmAgent();
configureLocalStorage();
/* Directory to use for Qemu sockets like for the Qemu Guest Agent */
@ -1353,13 +1374,13 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
}
private void enableSSLForKvmAgent(final Map<String, Object> params) {
private void enableSSLForKvmAgent() {
final File keyStoreFile = PropertiesUtil.findConfigFile(KeyStoreUtils.KS_FILENAME);
if (keyStoreFile == null) {
s_logger.info("Failed to find keystore file: " + KeyStoreUtils.KS_FILENAME);
return;
}
String keystorePass = (String)params.get(KeyStoreUtils.KS_PASSPHRASE_PROPERTY);
String keystorePass = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KEYSTORE_PASSPHRASE);
if (StringUtils.isBlank(keystorePass)) {
s_logger.info("Failed to find passphrase for keystore: " + KeyStoreUtils.KS_FILENAME);
return;
@ -2274,7 +2295,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
return new Pair<Map<String, Integer>, Integer>(macAddressToNicNum, devNum);
}
protected PowerState convertToPowerState(final DomainState ps) {
public PowerState convertToPowerState(final DomainState ps) {
final PowerState state = POWER_STATES_TABLE.get(ps);
return state == null ? PowerState.PowerUnknown : state;
}
@ -2707,12 +2728,41 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
*/
protected CpuTuneDef createCpuTuneDef(VirtualMachineTO vmTO) {
CpuTuneDef ctd = new CpuTuneDef();
int shares = vmTO.getCpus() * (vmTO.getMinSpeed() != null ? vmTO.getMinSpeed() : vmTO.getSpeed());
ctd.setShares(shares);
ctd.setShares(calculateCpuShares(vmTO));
setQuotaAndPeriod(vmTO, ctd);
return ctd;
}
/**
* Calculates the VM CPU shares considering the cgroup version of the host.
* <ul>
* <li>
* If the host utilize cgroup v1, then, the CPU shares is calculated as <b>VM CPU shares = CPU cores * CPU frequency</b>.
* </li>
* <li>
* If the host utilize cgroup v2, the CPU shares calculation considers the cgroup v2 upper limit of <b>10,000</b>, and a linear scale conversion is applied
* considering the maximum host CPU shares (i.e. using the number of CPU cores and CPU nominal frequency of the host). Therefore, the VM CPU shares is calculated as
* <b>VM CPU shares = (VM requested shares * cgroup upper limit) / host max shares</b>.
* </li>
* </ul>
*/
public int calculateCpuShares(VirtualMachineTO vmTO) {
int vCpus = vmTO.getCpus();
int cpuSpeed = ObjectUtils.defaultIfNull(vmTO.getMinSpeed(), vmTO.getSpeed());
int requestedCpuShares = vCpus * cpuSpeed;
int hostCpuMaxCapacity = getHostCpuMaxCapacity();
if (hostCpuMaxCapacity > 0) {
int updatedCpuShares = (int) Math.ceil((requestedCpuShares * CGROUP_V2_UPPER_LIMIT) / (double) hostCpuMaxCapacity);
s_logger.debug(String.format("This host utilizes cgroupv2 (as the max shares value is [%s]), thus, the VM requested shares of [%s] will be converted to " +
"consider the host limits; the new CPU shares value is [%s].", hostCpuMaxCapacity, requestedCpuShares, updatedCpuShares));
return updatedCpuShares;
}
s_logger.debug(String.format("This host does not have a maximum CPU shares set; therefore, this host utilizes cgroupv1 and the VM requested CPU shares [%s] will not be " +
"converted.", requestedCpuShares));
return requestedCpuShares;
}
private CpuModeDef createCpuModeDef(VirtualMachineTO vmTO, int vcpus) {
final CpuModeDef cmd = new CpuModeDef();
cmd.setMode(guestCpuMode);
@ -3548,8 +3598,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
@Override
public StartupCommand[] initialize() {
final KVMHostInfo info = new KVMHostInfo(dom0MinMem, dom0OvercommitMem, manualCpuSpeed, dom0MinCpuCores);
calculateHostCpuMaxCapacity(info.getAllocatableCpus(), info.getCpuSpeed());
String capabilities = String.join(",", info.getCapabilities());
if (dpdkSupport) {
@ -3597,6 +3647,32 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
return startupCommandsArray;
}
/**
* Calculates and sets the host CPU max capacity according to the cgroup version of the host.
* <ul>
* <li>
* <b>cgroup v1</b>: the max CPU capacity for the host is set to <b>0</b>.
* </li>
* <li>
* <b>cgroup v2</b>: the max CPU capacity for the host is the value of <b>cpuCores * cpuSpeed</b>.
* </li>
* </ul>
*/
protected void calculateHostCpuMaxCapacity(int cpuCores, Long cpuSpeed) {
String output = Script.runSimpleBashScript(COMMAND_GET_CGROUP_HOST_VERSION);
s_logger.info(String.format("Host uses control group [%s].", output));
if (!CGROUP_V2.equals(output)) {
s_logger.info(String.format("Setting host CPU max capacity to 0, as it uses cgroup v1.", getHostCpuMaxCapacity()));
setHostCpuMaxCapacity(0);
return;
}
s_logger.info(String.format("Calculating the max shares of the host."));
setHostCpuMaxCapacity(cpuCores * cpuSpeed.intValue());
s_logger.info(String.format("The max shares of the host is [%d].", getHostCpuMaxCapacity()));
}
private StartupStorageCommand createLocalStoragePool(String localStoragePath, String localStorageUUID, StartupRoutingCommand cmd) {
StartupStorageCommand sscmd = null;
try {
@ -3701,7 +3777,39 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
}
protected List<String> getAllVmNames(final Connect conn) {
/**
* Given a disk path on KVM host, attempts to find source host and path using mount command
* @param diskPath KVM host path for virtual disk
* @return Pair with IP of host and path
*/
public Pair<String, String> getSourceHostPath(String diskPath) {
String sourceHostIp = null;
String sourcePath = null;
try {
String mountResult = Script.runSimpleBashScript("mount | grep \"" + diskPath + "\"");
s_logger.debug("Got mount result for " + diskPath + "\n\n" + mountResult);
if (StringUtils.isNotEmpty(mountResult)) {
String[] res = mountResult.strip().split(" ");
if (res[0].contains(":")) {
res = res[0].split(":");
sourceHostIp = res[0].strip();
sourcePath = res[1].strip();
} else {
// Assume local storage
sourceHostIp = getPrivateIp();
sourcePath = diskPath;
}
}
if (StringUtils.isNotEmpty(sourceHostIp) && StringUtils.isNotEmpty(sourcePath)) {
return new Pair<>(sourceHostIp, sourcePath);
}
} catch (Exception ex) {
s_logger.warn("Failed to list source host and IP for " + diskPath + ex.toString());
}
return null;
}
public List<String> getAllVmNames(final Connect conn) {
final ArrayList<String> la = new ArrayList<String>();
try {
final String names[] = conn.listDefinedDomains();
@ -5263,4 +5371,25 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
}
}
/*
Scp volume from remote host to local directory
*/
public String copyVolume(String srcIp, String username, String password, String localDir, String remoteFile, String tmpPath) {
try {
String outputFile = UUID.randomUUID().toString();
StringBuilder command = new StringBuilder("qemu-img convert -O qcow2 ");
command.append(remoteFile);
command.append(" "+tmpPath);
command.append(outputFile);
s_logger.debug("Converting remoteFile: "+remoteFile);
SshHelper.sshExecute(srcIp, 22, username, null, password, command.toString());
s_logger.debug("Copying remoteFile to: "+localDir);
SshHelper.scpFrom(srcIp, 22, username, null, password, localDir, tmpPath+outputFile);
s_logger.debug("Successfully copyied remoteFile to: "+localDir+"/"+outputFile);
return outputFile;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}

View File

@ -57,8 +57,10 @@ public class LibvirtDomainXMLParser {
private final List<ChannelDef> channels = new ArrayList<ChannelDef>();
private final List<WatchDogDef> watchDogDefs = new ArrayList<WatchDogDef>();
private Integer vncPort;
private String vncPasswd;
private String desc;
private LibvirtVMDef.CpuTuneDef cpuTuneDef;
private LibvirtVMDef.CpuModeDef cpuModeDef;
private String name;
public boolean parseDomainXML(String domXML) {
@ -278,6 +280,14 @@ public class LibvirtDomainXMLParser {
String name = getAttrValue("target", "name", channel);
String state = getAttrValue("target", "state", channel);
if (ChannelDef.ChannelType.valueOf(type.toUpperCase()).equals(ChannelDef.ChannelType.SPICEVMC)) {
continue;
}
if (path == null) {
path = "";
}
ChannelDef def = null;
if (StringUtils.isBlank(state)) {
def = new ChannelDef(name, ChannelDef.ChannelType.valueOf(type.toUpperCase()), new File(path));
@ -305,6 +315,12 @@ public class LibvirtDomainXMLParser {
vncPort = null;
}
}
String passwd = graphic.getAttribute("passwd");
if (passwd != null) {
vncPasswd = passwd;
}
}
NodeList rngs = devices.getElementsByTagName("rng");
@ -317,6 +333,26 @@ public class LibvirtDomainXMLParser {
String period = getAttrValue("rate", "period", rng);
if (StringUtils.isAnyEmpty(bytes, period)) {
s_logger.debug(String.format("Bytes and period in the rng section should not be null, please check the VM %s", name));
}
if (bytes == null) {
bytes = "0";
}
if (period == null) {
period = "0";
}
if (bytes == null) {
bytes = "0";
}
if (period == null) {
period = "0";
}
if (StringUtils.isEmpty(backendModel)) {
def = new RngDef(path, Integer.parseInt(bytes), Integer.parseInt(period));
} else {
if (StringUtils.isEmpty(backendModel)) {
def = new RngDef(path, Integer.parseInt(bytes), Integer.parseInt(period));
@ -350,7 +386,8 @@ public class LibvirtDomainXMLParser {
watchDogDefs.add(def);
}
extractCpuTuneDef(rootElement);
extractCpuModeDef(rootElement);
return true;
} catch (ParserConfigurationException e) {
s_logger.debug(e.toString());
@ -411,6 +448,10 @@ public class LibvirtDomainXMLParser {
return interfaces;
}
public String getVncPasswd() {
return vncPasswd;
}
public MemBalloonDef getMemBalloon() {
return memBalloonDef;
}
@ -438,4 +479,65 @@ public class LibvirtDomainXMLParser {
public String getName() {
return name;
}
public LibvirtVMDef.CpuTuneDef getCpuTuneDef() {
return cpuTuneDef;
}
public LibvirtVMDef.CpuModeDef getCpuModeDef() {
return cpuModeDef;
}
private void extractCpuTuneDef(final Element rootElement) {
NodeList cpuTunesList = rootElement.getElementsByTagName("cputune");
if (cpuTunesList.getLength() > 0) {
cpuTuneDef = new LibvirtVMDef.CpuTuneDef();
final Element cpuTuneDefElement = (Element) cpuTunesList.item(0);
final String cpuShares = getTagValue("shares", cpuTuneDefElement);
if (StringUtils.isNotBlank(cpuShares)) {
cpuTuneDef.setShares((Integer.parseInt(cpuShares)));
}
final String quota = getTagValue("quota", cpuTuneDefElement);
if (StringUtils.isNotBlank(quota)) {
cpuTuneDef.setQuota((Integer.parseInt(quota)));
}
final String period = getTagValue("period", cpuTuneDefElement);
if (StringUtils.isNotBlank(period)) {
cpuTuneDef.setPeriod((Integer.parseInt(period)));
}
}
}
private void extractCpuModeDef(final Element rootElement){
NodeList cpuModeList = rootElement.getElementsByTagName("cpu");
if (cpuModeList.getLength() > 0){
cpuModeDef = new LibvirtVMDef.CpuModeDef();
final Element cpuModeDefElement = (Element) cpuModeList.item(0);
final String cpuModel = getTagValue("model", cpuModeDefElement);
if (StringUtils.isNotBlank(cpuModel)){
cpuModeDef.setModel(cpuModel);
}
NodeList cpuFeatures = cpuModeDefElement.getElementsByTagName("features");
if (cpuFeatures.getLength() > 0) {
final ArrayList<String> features = new ArrayList<>(cpuFeatures.getLength());
for (int i = 0; i < cpuFeatures.getLength(); i++) {
final Element feature = (Element)cpuFeatures.item(i);
final String policy = feature.getAttribute("policy");
String featureName = feature.getAttribute("name");
if ("disable".equals(policy)) {
featureName = "-" + featureName;
}
features.add(featureName);
}
cpuModeDef.setFeatures(features);
}
final String sockets = getAttrValue("topology", "sockets", cpuModeDefElement);
final String cores = getAttrValue("topology", "cores", cpuModeDefElement);
if (StringUtils.isNotBlank(sockets) && StringUtils.isNotBlank(cores)) {
cpuModeDef.setTopology(Integer.parseInt(cores), Integer.parseInt(sockets));
}
}
}
}

View File

@ -1072,6 +1072,18 @@ public class LibvirtVMDef {
public LibvirtDiskEncryptDetails getLibvirtDiskEncryptDetails() { return this.encryptDetails; }
public String getSourceHost() {
return _sourceHost;
}
public int getSourceHostPort() {
return _sourcePort;
}
public String getSourcePath() {
return _sourcePath;
}
@Override
public String toString() {
StringBuilder diskBuilder = new StringBuilder();
@ -1737,6 +1749,10 @@ public class LibvirtVMDef {
modeBuilder.append("</cpu>");
return modeBuilder.toString();
}
public int getCoresPerSocket() {
return _coresPerSocket;
}
}
public static class SerialDef {
@ -1793,7 +1809,7 @@ public class LibvirtVMDef {
public final static class ChannelDef {
enum ChannelType {
UNIX("unix"), SERIAL("serial");
UNIX("unix"), SERIAL("serial"), SPICEVMC("spicevmc");
String type;
ChannelType(String type) {

View File

@ -0,0 +1,86 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.hypervisor.kvm.resource.wrapper;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.CheckVolumeAnswer;
import com.cloud.agent.api.CheckVolumeCommand;
import com.cloud.agent.api.to.StorageFilerTO;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.cloud.storage.Storage;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImgException;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
import org.apache.log4j.Logger;
import org.libvirt.LibvirtException;
import java.util.Map;
@ResourceWrapper(handles = CheckVolumeCommand.class)
public final class LibvirtCheckVolumeCommandWrapper extends CommandWrapper<CheckVolumeCommand, Answer, LibvirtComputingResource> {
private static final Logger s_logger = Logger.getLogger(LibvirtCheckVolumeCommandWrapper.class);
@Override
public Answer execute(final CheckVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) {
String result = null;
String srcFile = command.getSrcFile();
StorageFilerTO storageFilerTO = command.getStorageFilerTO();
KVMStoragePoolManager poolMgr = libvirtComputingResource.getStoragePoolMgr();
KVMStoragePool pool = poolMgr.getStoragePool(storageFilerTO.getType(), storageFilerTO.getUuid());
try {
if (storageFilerTO.getType() == Storage.StoragePoolType.Filesystem ||
storageFilerTO.getType() == Storage.StoragePoolType.NetworkFilesystem) {
final KVMPhysicalDisk vol = pool.getPhysicalDisk(srcFile);
final String path = vol.getPath();
long size = getVirtualSizeFromFile(path);
return new CheckVolumeAnswer(command, "", size);
} else {
return new Answer(command, false, "Unsupported Storage Pool");
}
} catch (final Exception e) {
s_logger.error("Error while locating disk: "+ e.getMessage());
return new Answer(command, false, result);
}
}
private long getVirtualSizeFromFile(String path) {
try {
QemuImg qemu = new QemuImg(0);
QemuImgFile qemuFile = new QemuImgFile(path);
Map<String, String> info = qemu.info(qemuFile);
if (info.containsKey(QemuImg.VIRTUAL_SIZE)) {
return Long.parseLong(info.get(QemuImg.VIRTUAL_SIZE));
} else {
throw new CloudRuntimeException("Unable to determine virtual size of volume at path " + path);
}
} catch (QemuImgException | LibvirtException ex) {
throw new CloudRuntimeException("Error when inspecting volume at path " + path, ex);
}
}
}

View File

@ -0,0 +1,93 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.hypervisor.kvm.resource.wrapper;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.CopyRemoteVolumeAnswer;
import com.cloud.agent.api.CopyRemoteVolumeCommand;
import com.cloud.agent.api.to.StorageFilerTO;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.cloud.storage.Storage;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImgException;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
import org.apache.log4j.Logger;
import org.libvirt.LibvirtException;
import java.util.Map;
@ResourceWrapper(handles = CopyRemoteVolumeCommand.class)
public final class LibvirtCopyRemoteVolumeCommandWrapper extends CommandWrapper<CopyRemoteVolumeCommand, Answer, LibvirtComputingResource> {
private static final Logger s_logger = Logger.getLogger(LibvirtCopyRemoteVolumeCommandWrapper.class);
@Override
public Answer execute(final CopyRemoteVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) {
String result = null;
String srcIp = command.getRemoteIp();
String username = command.getUsername();
String password = command.getPassword();
String srcFile = command.getSrcFile();
StorageFilerTO storageFilerTO = command.getStorageFilerTO();
String tmpPath = command.getTmpPath();
KVMStoragePoolManager poolMgr = libvirtComputingResource.getStoragePoolMgr();
KVMStoragePool pool = poolMgr.getStoragePool(storageFilerTO.getType(), storageFilerTO.getUuid());
String dstPath = pool.getLocalPath();
try {
if (storageFilerTO.getType() == Storage.StoragePoolType.Filesystem ||
storageFilerTO.getType() == Storage.StoragePoolType.NetworkFilesystem) {
String filename = libvirtComputingResource.copyVolume(srcIp, username, password, dstPath, srcFile, tmpPath);
s_logger.debug("Volume Copy Successful");
final KVMPhysicalDisk vol = pool.getPhysicalDisk(filename);
final String path = vol.getPath();
long size = getVirtualSizeFromFile(path);
return new CopyRemoteVolumeAnswer(command, "", filename, size);
} else {
return new Answer(command, false, "Unsupported Storage Pool");
}
} catch (final Exception e) {
s_logger.error("Error while copying file from remote host: "+ e.getMessage());
return new Answer(command, false, result);
}
}
private long getVirtualSizeFromFile(String path) {
try {
QemuImg qemu = new QemuImg(0);
QemuImgFile qemuFile = new QemuImgFile(path);
Map<String, String> info = qemu.info(qemuFile);
if (info.containsKey(QemuImg.VIRTUAL_SIZE)) {
return Long.parseLong(info.get(QemuImg.VIRTUAL_SIZE));
} else {
throw new CloudRuntimeException("Unable to determine virtual size of volume at path " + path);
}
} catch (QemuImgException | LibvirtException ex) {
throw new CloudRuntimeException("Error when inspecting volume at path " + path, ex);
}
}
}

View File

@ -0,0 +1,194 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.hypervisor.kvm.resource.wrapper;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.GetRemoteVmsAnswer;
import com.cloud.agent.api.GetRemoteVmsCommand;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.resource.LibvirtConnection;
import com.cloud.hypervisor.kvm.resource.LibvirtDomainXMLParser;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VirtualMachine;
import org.apache.cloudstack.vm.UnmanagedInstanceTO;
import org.apache.log4j.Logger;
import org.libvirt.Connect;
import org.libvirt.Domain;
import org.libvirt.DomainBlockInfo;
import org.libvirt.DomainInfo;
import org.libvirt.LibvirtException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
@ResourceWrapper(handles = GetRemoteVmsCommand.class)
public final class LibvirtGetRemoteVmsCommandWrapper extends CommandWrapper<GetRemoteVmsCommand, Answer, LibvirtComputingResource> {
private static final Logger s_logger = Logger.getLogger(LibvirtGetRemoteVmsCommandWrapper.class);
@Override
public Answer execute(final GetRemoteVmsCommand command, final LibvirtComputingResource libvirtComputingResource) {
String result = null;
String hypervisorURI = "qemu+tcp://" + command.getRemoteIp() +
"/system";
HashMap<String, UnmanagedInstanceTO> unmanagedInstances = new HashMap<>();
try {
Connect conn = LibvirtConnection.getConnection(hypervisorURI);
final List<String> allVmNames = libvirtComputingResource.getAllVmNames(conn);
for (String name : allVmNames) {
final Domain domain = libvirtComputingResource.getDomain(conn, name);
final DomainInfo.DomainState ps = domain.getInfo().state;
final VirtualMachine.PowerState state = libvirtComputingResource.convertToPowerState(ps);
s_logger.debug("VM " + domain.getName() + ": powerstate = " + ps + "; vm state=" + state.toString());
if (state == VirtualMachine.PowerState.PowerOff) {
try {
UnmanagedInstanceTO instance = getUnmanagedInstance(libvirtComputingResource, domain, conn);
unmanagedInstances.put(instance.getName(), instance);
} catch (Exception e) {
s_logger.error("Error while fetching instance details", e);
}
}
domain.free();
}
s_logger.debug("Found Vms: "+ unmanagedInstances.size());
return new GetRemoteVmsAnswer(command, "", unmanagedInstances);
} catch (final LibvirtException e) {
s_logger.error("Error while listing stopped Vms on remote host: "+ e.getMessage());
return new Answer(command, false, result);
}
}
private UnmanagedInstanceTO getUnmanagedInstance(LibvirtComputingResource libvirtComputingResource, Domain domain, Connect conn) {
try {
final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
parser.parseDomainXML(domain.getXMLDesc(1));
final UnmanagedInstanceTO instance = new UnmanagedInstanceTO();
instance.setName(domain.getName());
if (parser.getCpuModeDef() != null) {
instance.setCpuCoresPerSocket(parser.getCpuModeDef().getCoresPerSocket());
}
Long memory = domain.getMaxMemory();
instance.setMemory(memory.intValue()/1024);
if (parser.getCpuTuneDef() !=null) {
instance.setCpuSpeed(parser.getCpuTuneDef().getShares());
}
instance.setPowerState(getPowerState(libvirtComputingResource.getVmState(conn,domain.getName())));
instance.setNics(getUnmanagedInstanceNics(parser.getInterfaces()));
instance.setDisks(getUnmanagedInstanceDisks(parser.getDisks(),libvirtComputingResource, domain));
instance.setVncPassword(parser.getVncPasswd() + "aaaaaaaaaaaaaa"); // Suffix back extra characters for DB compatibility
return instance;
} catch (Exception e) {
s_logger.debug("Unable to retrieve unmanaged instance info. ", e);
throw new CloudRuntimeException("Unable to retrieve unmanaged instance info. " + e.getMessage());
}
}
private UnmanagedInstanceTO.PowerState getPowerState(VirtualMachine.PowerState vmPowerState) {
switch (vmPowerState) {
case PowerOn:
return UnmanagedInstanceTO.PowerState.PowerOn;
case PowerOff:
return UnmanagedInstanceTO.PowerState.PowerOff;
default:
return UnmanagedInstanceTO.PowerState.PowerUnknown;
}
}
private List<UnmanagedInstanceTO.Nic> getUnmanagedInstanceNics(List<LibvirtVMDef.InterfaceDef> interfaces) {
final ArrayList<UnmanagedInstanceTO.Nic> nics = new ArrayList<>(interfaces.size());
int counter = 0;
for (LibvirtVMDef.InterfaceDef interfaceDef : interfaces) {
final UnmanagedInstanceTO.Nic nic = new UnmanagedInstanceTO.Nic();
nic.setNicId(String.valueOf(counter++));
nic.setMacAddress(interfaceDef.getMacAddress());
nic.setAdapterType(interfaceDef.getModel().toString());
nic.setNetwork(interfaceDef.getDevName());
nic.setPciSlot(interfaceDef.getSlot().toString());
nic.setVlan(interfaceDef.getVlanTag());
nics.add(nic);
}
return nics;
}
private List<UnmanagedInstanceTO.Disk> getUnmanagedInstanceDisks(List<LibvirtVMDef.DiskDef> disksInfo,
LibvirtComputingResource libvirtComputingResource,
Domain dm){
final ArrayList<UnmanagedInstanceTO.Disk> disks = new ArrayList<>(disksInfo.size());
int counter = 0;
for (LibvirtVMDef.DiskDef diskDef : disksInfo) {
if (diskDef.getDeviceType() != LibvirtVMDef.DiskDef.DeviceType.DISK) {
continue;
}
final UnmanagedInstanceTO.Disk disk = new UnmanagedInstanceTO.Disk();
disk.setPosition(counter);
Long size;
try {
DomainBlockInfo blockInfo = dm.blockInfo(diskDef.getSourcePath());
size = blockInfo.getCapacity();
} catch (LibvirtException e) {
throw new RuntimeException(e);
}
disk.setCapacity(size);
disk.setDiskId(String.valueOf(counter++));
disk.setLabel(diskDef.getDiskLabel());
disk.setController(diskDef.getBusType().toString());
Pair<String, String> sourceHostPath = getSourceHostPath(libvirtComputingResource, diskDef.getSourcePath());
if (sourceHostPath != null) {
disk.setDatastoreHost(sourceHostPath.first());
disk.setDatastorePath(sourceHostPath.second());
} else {
disk.setDatastorePath(diskDef.getSourcePath());
disk.setDatastoreHost(diskDef.getSourceHost());
}
disk.setDatastoreType(diskDef.getDiskType().toString());
disk.setDatastorePort(diskDef.getSourceHostPort());
disks.add(disk);
}
return disks;
}
private Pair<String, String> getSourceHostPath(LibvirtComputingResource libvirtComputingResource, String diskPath) {
int pathEnd = diskPath.lastIndexOf("/");
if (pathEnd >= 0) {
diskPath = diskPath.substring(0, pathEnd);
return libvirtComputingResource.getSourceHostPath(diskPath);
}
return null;
}
}

View File

@ -0,0 +1,227 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.kvm.resource.wrapper;
import com.cloud.agent.api.GetUnmanagedInstancesAnswer;
import com.cloud.agent.api.GetUnmanagedInstancesCommand;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.resource.LibvirtDomainXMLParser;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VirtualMachine;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImgException;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
import org.apache.cloudstack.vm.UnmanagedInstanceTO;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
import org.libvirt.Connect;
import org.libvirt.Domain;
import org.libvirt.LibvirtException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ResourceWrapper(handles=GetUnmanagedInstancesCommand.class)
public final class LibvirtGetUnmanagedInstancesCommandWrapper extends CommandWrapper<GetUnmanagedInstancesCommand, GetUnmanagedInstancesAnswer, LibvirtComputingResource> {
private static final Logger LOGGER = Logger.getLogger(LibvirtGetUnmanagedInstancesCommandWrapper.class);
@Override
public GetUnmanagedInstancesAnswer execute(GetUnmanagedInstancesCommand command, LibvirtComputingResource libvirtComputingResource) {
LOGGER.info("Fetching unmanaged instance on host");
HashMap<String, UnmanagedInstanceTO> unmanagedInstances = new HashMap<>();
try {
final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper();
final Connect conn = libvirtUtilitiesHelper.getConnection();
final List<Domain> domains = getDomains(command, libvirtComputingResource, conn);
for (Domain domain : domains) {
UnmanagedInstanceTO instance = getUnmanagedInstance(libvirtComputingResource, domain, conn);
if (instance != null) {
unmanagedInstances.put(instance.getName(), instance);
domain.free();
}
}
} catch (Exception e) {
String err = String.format("Error listing unmanaged instances: %s", e.getMessage());
LOGGER.error(err, e);
return new GetUnmanagedInstancesAnswer(command, err);
}
return new GetUnmanagedInstancesAnswer(command, "OK", unmanagedInstances);
}
private List<Domain> getDomains(GetUnmanagedInstancesCommand command,
LibvirtComputingResource libvirtComputingResource,
Connect conn) throws LibvirtException, CloudRuntimeException {
final List<Domain> domains = new ArrayList<>();
final String vmNameCmd = command.getInstanceName();
if (StringUtils.isNotBlank(vmNameCmd)) {
final Domain domain = libvirtComputingResource.getDomain(conn, vmNameCmd);
if (domain == null) {
String msg = String.format("VM %s not found", vmNameCmd);
LOGGER.error(msg);
throw new CloudRuntimeException(msg);
}
checkIfVmExists(vmNameCmd,domain);
checkIfVmIsManaged(command,vmNameCmd,domain);
domains.add(domain);
} else {
final List<String> allVmNames = libvirtComputingResource.getAllVmNames(conn);
for (String name : allVmNames) {
if (!command.hasManagedInstance(name)) {
final Domain domain = libvirtComputingResource.getDomain(conn, name);
domains.add(domain);
}
}
}
return domains;
}
private void checkIfVmExists(String vmNameCmd,final Domain domain) throws LibvirtException {
if (StringUtils.isNotEmpty(vmNameCmd) &&
!vmNameCmd.equals(domain.getName())) {
LOGGER.error("GetUnmanagedInstancesCommand: exact vm name not found " + vmNameCmd);
throw new CloudRuntimeException("GetUnmanagedInstancesCommand: exact vm name not found " + vmNameCmd);
}
}
private void checkIfVmIsManaged(GetUnmanagedInstancesCommand command,String vmNameCmd,final Domain domain) throws LibvirtException {
if (command.hasManagedInstance(domain.getName())) {
LOGGER.error("GetUnmanagedInstancesCommand: vm already managed " + vmNameCmd);
throw new CloudRuntimeException("GetUnmanagedInstancesCommand: vm already managed " + vmNameCmd);
}
}
private UnmanagedInstanceTO getUnmanagedInstance(LibvirtComputingResource libvirtComputingResource, Domain domain, Connect conn) {
try {
final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
parser.parseDomainXML(domain.getXMLDesc(1));
final UnmanagedInstanceTO instance = new UnmanagedInstanceTO();
instance.setName(domain.getName());
instance.setCpuCores((int) LibvirtComputingResource.countDomainRunningVcpus(domain));
instance.setCpuSpeed(parser.getCpuTuneDef().getShares()/instance.getCpuCores());
if (parser.getCpuModeDef() != null) {
instance.setCpuCoresPerSocket(parser.getCpuModeDef().getCoresPerSocket());
}
instance.setPowerState(getPowerState(libvirtComputingResource.getVmState(conn,domain.getName())));
instance.setMemory((int) LibvirtComputingResource.getDomainMemory(domain) / 1024);
instance.setNics(getUnmanagedInstanceNics(parser.getInterfaces()));
instance.setDisks(getUnmanagedInstanceDisks(parser.getDisks(),libvirtComputingResource));
instance.setVncPassword(parser.getVncPasswd() + "aaaaaaaaaaaaaa"); // Suffix back extra characters for DB compatibility
return instance;
} catch (Exception e) {
LOGGER.info("Unable to retrieve unmanaged instance info. " + e.getMessage(), e);
return null;
}
}
private UnmanagedInstanceTO.PowerState getPowerState(VirtualMachine.PowerState vmPowerState) {
switch (vmPowerState) {
case PowerOn:
return UnmanagedInstanceTO.PowerState.PowerOn;
case PowerOff:
return UnmanagedInstanceTO.PowerState.PowerOff;
default:
return UnmanagedInstanceTO.PowerState.PowerUnknown;
}
}
private List<UnmanagedInstanceTO.Nic> getUnmanagedInstanceNics(List<LibvirtVMDef.InterfaceDef> interfaces) {
final ArrayList<UnmanagedInstanceTO.Nic> nics = new ArrayList<>(interfaces.size());
int counter = 0;
for (LibvirtVMDef.InterfaceDef interfaceDef : interfaces) {
final UnmanagedInstanceTO.Nic nic = new UnmanagedInstanceTO.Nic();
nic.setNicId(String.valueOf(counter++));
nic.setMacAddress(interfaceDef.getMacAddress());
nic.setAdapterType(interfaceDef.getModel().toString());
nic.setNetwork(interfaceDef.getDevName());
nic.setPciSlot(interfaceDef.getSlot().toString());
nic.setVlan(interfaceDef.getVlanTag());
nics.add(nic);
}
return nics;
}
private List<UnmanagedInstanceTO.Disk> getUnmanagedInstanceDisks(List<LibvirtVMDef.DiskDef> disksInfo, LibvirtComputingResource libvirtComputingResource){
final ArrayList<UnmanagedInstanceTO.Disk> disks = new ArrayList<>(disksInfo.size());
int counter = 0;
for (LibvirtVMDef.DiskDef diskDef : disksInfo) {
if (diskDef.getDeviceType() != LibvirtVMDef.DiskDef.DeviceType.DISK) {
continue;
}
final UnmanagedInstanceTO.Disk disk = new UnmanagedInstanceTO.Disk();
Long size = null;
String imagePath = null;
try {
QemuImgFile file = new QemuImgFile(diskDef.getSourcePath());
QemuImg qemu = new QemuImg(0);
Map<String, String> info = qemu.info(file);
size = Long.parseLong(info.getOrDefault("virtual_size", "0"));
imagePath = info.getOrDefault("image", null);
} catch (QemuImgException | LibvirtException e) {
throw new RuntimeException(e);
}
disk.setPosition(counter);
disk.setCapacity(size);
disk.setDiskId(String.valueOf(counter++));
disk.setLabel(diskDef.getDiskLabel());
disk.setController(diskDef.getBusType().toString());
Pair<String, String> sourceHostPath = getSourceHostPath(libvirtComputingResource, diskDef.getSourcePath());
if (sourceHostPath != null) {
disk.setDatastoreHost(sourceHostPath.first());
disk.setDatastorePath(sourceHostPath.second());
} else {
disk.setDatastorePath(diskDef.getSourcePath());
disk.setDatastoreHost(diskDef.getSourceHost());
}
disk.setDatastoreType(diskDef.getDiskType().toString());
disk.setDatastorePort(diskDef.getSourceHostPort());
disk.setImagePath(imagePath);
disk.setDatastoreName(imagePath.substring(imagePath.lastIndexOf("/")));
disks.add(disk);
}
return disks;
}
private Pair<String, String> getSourceHostPath(LibvirtComputingResource libvirtComputingResource, String diskPath) {
int pathEnd = diskPath.lastIndexOf("/");
if (pathEnd >= 0) {
diskPath = diskPath.substring(0, pathEnd);
return libvirtComputingResource.getSourceHostPath(diskPath);
}
return null;
}
}

View File

@ -23,6 +23,7 @@ import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -211,6 +212,8 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
}
}
xmlDesc = updateVmSharesIfNeeded(command, xmlDesc, libvirtComputingResource);
dconn = libvirtUtilitiesHelper.retrieveQemuConnection(destinationUri);
if (to.getType() == VirtualMachine.Type.User) {
@ -362,6 +365,44 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
return new MigrateAnswer(command, result == null, result, null);
}
/**
* Checks if the CPU shares are equal in the source host and destination host.
* <ul>
* <li>
* If both hosts utilize cgroup v1; then, the shares value of the VM is equal in both hosts, and there is no need to update the VM CPU shares value for the
* migration.</li>
* <li>
* If, at least, one of the hosts utilize cgroup v2, the VM CPU shares must be recalculated for the migration, accordingly to
* method {@link LibvirtComputingResource#calculateCpuShares(VirtualMachineTO)}.
* </li>
* </ul>
*/
protected String updateVmSharesIfNeeded(MigrateCommand migrateCommand, String xmlDesc, LibvirtComputingResource libvirtComputingResource)
throws ParserConfigurationException, IOException, SAXException, TransformerException {
Integer newVmCpuShares = migrateCommand.getNewVmCpuShares();
int currentCpuShares = libvirtComputingResource.calculateCpuShares(migrateCommand.getVirtualMachine());
if (newVmCpuShares == currentCpuShares) {
s_logger.info(String.format("Current CPU shares [%s] is equal in both hosts; therefore, there is no need to update the CPU shares for the new host.",
currentCpuShares));
return xmlDesc;
}
InputStream inputStream = IOUtils.toInputStream(xmlDesc, StandardCharsets.UTF_8);
DocumentBuilderFactory docFactory = ParserUtils.getSaferDocumentBuilderFactory();
DocumentBuilder docBuilder = docFactory.newDocumentBuilder();
Document document = docBuilder.parse(inputStream);
Element root = document.getDocumentElement();
Node sharesNode = root.getElementsByTagName("shares").item(0);
String currentShares = sharesNode.getTextContent();
s_logger.info(String.format("VM [%s] will have CPU shares altered from [%s] to [%s] as part of migration because the cgroups version differs between hosts.",
migrateCommand.getVmName(), currentShares, newVmCpuShares));
sharesNode.setTextContent(String.valueOf(newVmCpuShares));
return getXml(document);
}
/**
* Replace DPDK source path and target before migrations
*/

View File

@ -279,6 +279,10 @@ public class LibvirtMigrateVolumeCommandWrapper extends CommandWrapper<MigrateVo
Map<String, String> srcDetails = command.getSrcDetails();
String srcPath = srcDetails != null ? srcDetails.get(DiskTO.IQN) : srcVolumeObjectTO.getPath();
// its possible a volume has details but is not using IQN addressing...
if (srcPath == null) {
srcPath = srcVolumeObjectTO.getPath();
}
VolumeObjectTO destVolumeObjectTO = (VolumeObjectTO)command.getDestData();
PrimaryDataStoreTO destPrimaryDataStore = (PrimaryDataStoreTO)destVolumeObjectTO.getDataStore();

View File

@ -125,11 +125,7 @@ public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapp
return new PrepareForMigrationAnswer(command, "failed to connect physical disks to host");
}
PrepareForMigrationAnswer answer = new PrepareForMigrationAnswer(command);
if (MapUtils.isNotEmpty(dpdkInterfaceMapping)) {
answer.setDpdkInterfaceMapping(dpdkInterfaceMapping);
}
return answer;
return createPrepareForMigrationAnswer(command, dpdkInterfaceMapping, libvirtComputingResource, vm);
} catch (final LibvirtException | CloudRuntimeException | InternalErrorException | URISyntaxException e) {
if (MapUtils.isNotEmpty(dpdkInterfaceMapping)) {
for (DpdkTO to : dpdkInterfaceMapping.values()) {
@ -146,6 +142,22 @@ public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapp
}
}
protected PrepareForMigrationAnswer createPrepareForMigrationAnswer(PrepareForMigrationCommand command, Map<String, DpdkTO> dpdkInterfaceMapping,
LibvirtComputingResource libvirtComputingResource, VirtualMachineTO vm) {
PrepareForMigrationAnswer answer = new PrepareForMigrationAnswer(command);
if (MapUtils.isNotEmpty(dpdkInterfaceMapping)) {
s_logger.debug(String.format("Setting DPDK interface for the migration of VM [%s].", vm));
answer.setDpdkInterfaceMapping(dpdkInterfaceMapping);
}
int newCpuShares = libvirtComputingResource.calculateCpuShares(vm);
s_logger.debug(String.format("Setting CPU shares to [%s] for the migration of VM [%s].", newCpuShares, vm));
answer.setNewVmCpuShares(newCpuShares);
return answer;
}
private Answer handleRollback(PrepareForMigrationCommand command, LibvirtComputingResource libvirtComputingResource) {
KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
VirtualMachineTO vmTO = command.getVirtualMachine();

View File

@ -0,0 +1,51 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.kvm.resource.wrapper;
import com.cloud.agent.api.PrepareUnmanageVMInstanceAnswer;
import com.cloud.agent.api.PrepareUnmanageVMInstanceCommand;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import org.apache.log4j.Logger;
import org.libvirt.Connect;
import org.libvirt.Domain;
@ResourceWrapper(handles=PrepareUnmanageVMInstanceCommand.class)
public final class LibvirtPrepareUnmanageVMInstanceCommandWrapper extends CommandWrapper<PrepareUnmanageVMInstanceCommand, PrepareUnmanageVMInstanceAnswer, LibvirtComputingResource> {
private static final Logger LOGGER = Logger.getLogger(LibvirtPrepareUnmanageVMInstanceCommandWrapper.class);
@Override
public PrepareUnmanageVMInstanceAnswer execute(PrepareUnmanageVMInstanceCommand command, LibvirtComputingResource libvirtComputingResource) {
final String vmName = command.getInstanceName();
final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper();
LOGGER.debug(String.format("Verify if KVM instance: [%s] is available before Unmanaging VM.", vmName));
try {
final Connect conn = libvirtUtilitiesHelper.getConnectionByVmName(vmName);
final Domain domain = libvirtComputingResource.getDomain(conn, vmName);
if (domain == null) {
LOGGER.error("Prepare Unmanage VMInstanceCommand: vm not found " + vmName);
new PrepareUnmanageVMInstanceAnswer(command, false, String.format("Cannot find VM with name [%s] in KVM host.", vmName));
}
} catch (Exception e){
LOGGER.error("PrepareUnmanagedInstancesCommand failed due to " + e.getMessage());
return new PrepareUnmanageVMInstanceAnswer(command, false, "Error: " + e.getMessage());
}
return new PrepareUnmanageVMInstanceAnswer(command, true, "OK");
}
}

View File

@ -50,6 +50,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.hypervisor.kvm.storage.MultipathSCSIPool;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.cloud.storage.Storage.StoragePoolType;
@ -84,6 +85,10 @@ public final class LibvirtResizeVolumeCommandWrapper extends CommandWrapper<Resi
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
KVMStoragePool pool = storagePoolMgr.getStoragePool(spool.getType(), spool.getUuid());
if (pool instanceof MultipathSCSIPool) {
return handleMultipathSCSIResize(command, pool);
}
if (spool.getType().equals(StoragePoolType.PowerFlex)) {
pool.connectPhysicalDisk(volumeId, null);
}
@ -225,4 +230,9 @@ public final class LibvirtResizeVolumeCommandWrapper extends CommandWrapper<Resi
throw new CloudRuntimeException("Error when inspecting volume at path " + path, ex);
}
}
private Answer handleMultipathSCSIResize(ResizeVolumeCommand command, KVMStoragePool pool) {
((MultipathSCSIPool)pool).resize(command.getPath(), command.getInstanceName(), command.getNewSize());
return new ResizeVolumeAnswer(command, true, "");
}
}

View File

@ -39,8 +39,7 @@ public class LibvirtScaleVmCommandWrapper extends CommandWrapper<ScaleVmCommand,
long newMemory = ByteScaleUtils.bytesToKibibytes(vmSpec.getMaxRam());
int newVcpus = vmSpec.getCpus();
int newCpuSpeed = vmSpec.getMinSpeed() != null ? vmSpec.getMinSpeed() : vmSpec.getSpeed();
int newCpuShares = newVcpus * newCpuSpeed;
int newCpuShares = libvirtComputingResource.calculateCpuShares(vmSpec);
String vmDefinition = vmSpec.toString();
String scalingDetails = String.format("%s memory to [%s KiB], CPU cores to [%s] and cpu_shares to [%s]", vmDefinition, newMemory, newVcpus, newCpuShares);

View File

@ -0,0 +1,88 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.kvm.storage;
import com.cloud.storage.Storage;
import com.cloud.utils.exception.CloudRuntimeException;
@StorageAdaptorInfo(storagePoolType=Storage.StoragePoolType.FiberChannel)
public class FiberChannelAdapter extends MultipathSCSIAdapterBase {
public FiberChannelAdapter() {
LOGGER.info("Loaded FiberChannelAdapter for StorageLayer");
}
@Override
public KVMStoragePool getStoragePool(String uuid) {
KVMStoragePool pool = MapStorageUuidToStoragePool.get(uuid);
if (pool == null) {
// return a dummy pool - this adapter doesn't care about connectivity information
pool = new MultipathSCSIPool(uuid, this);
MapStorageUuidToStoragePool.put(uuid, pool);
}
LOGGER.info("FiberChannelAdapter return storage pool [" + uuid + "]");
return pool;
}
public String getName() {
return "FiberChannelAdapter";
}
public boolean isStoragePoolTypeSupported(Storage.StoragePoolType type) {
if (Storage.StoragePoolType.FiberChannel.equals(type)) {
return true;
}
return false;
}
@Override
public AddressInfo parseAndValidatePath(String inPath) {
// type=FIBERWWN; address=<address>; connid=<connid>
String type = null;
String address = null;
String connectionId = null;
String path = null;
String[] parts = inPath.split(";");
// handle initial code of wwn only
if (parts.length == 1) {
type = "FIBERWWN";
address = parts[0];
} else {
for (String part: parts) {
String[] pair = part.split("=");
if (pair.length == 2) {
String key = pair[0].trim();
String value = pair[1].trim();
if (key.equals("type")) {
type = value.toUpperCase();
} else if (key.equals("address")) {
address = value;
} else if (key.equals("connid")) {
connectionId = value;
}
}
}
}
if ("FIBERWWN".equals(type)) {
path = "/dev/mapper/3" + address;
} else {
throw new CloudRuntimeException("Invalid address type provided for target disk: " + type);
}
return new AddressInfo(type, address, connectionId, path);
}
}

View File

@ -290,9 +290,12 @@ public class KVMStorageProcessor implements StorageProcessor {
final TemplateObjectTO newTemplate = new TemplateObjectTO();
newTemplate.setPath(primaryVol.getName());
newTemplate.setSize(primaryVol.getSize());
if (primaryPool.getType() == StoragePoolType.RBD ||
primaryPool.getType() == StoragePoolType.PowerFlex ||
primaryPool.getType() == StoragePoolType.Linstor) {
if(List.of(
StoragePoolType.RBD,
StoragePoolType.PowerFlex,
StoragePoolType.Linstor,
StoragePoolType.FiberChannel).contains(primaryPool.getType())) {
newTemplate.setFormat(ImageFormat.RAW);
} else {
newTemplate.setFormat(ImageFormat.QCOW2);
@ -584,7 +587,9 @@ public class KVMStorageProcessor implements StorageProcessor {
public Answer createTemplateFromVolume(final CopyCommand cmd) {
Map<String, String> details = cmd.getOptions();
if (details != null && details.get(DiskTO.IQN) != null) {
// handle cases where the managed storage driver had to make a temporary volume from
// the snapshot in order to support the copy
if (details != null && (details.get(DiskTO.IQN) != null || details.get(DiskTO.PATH) != null)) {
// use the managed-storage approach
return createTemplateFromVolumeOrSnapshot(cmd);
}
@ -712,7 +717,7 @@ public class KVMStorageProcessor implements StorageProcessor {
public Answer createTemplateFromSnapshot(CopyCommand cmd) {
Map<String, String> details = cmd.getOptions();
if (details != null && details.get(DiskTO.IQN) != null) {
if (details != null && (details.get(DiskTO.IQN) != null || details.get(DiskTO.PATH) != null)) {
// use the managed-storage approach
return createTemplateFromVolumeOrSnapshot(cmd);
}
@ -750,12 +755,15 @@ public class KVMStorageProcessor implements StorageProcessor {
KVMStoragePool secondaryStorage = null;
try {
// look for options indicating an overridden path or IQN. Used when snapshots have to be
// temporarily copied on the manaaged storage device before the actual copy to target object
Map<String, String> details = cmd.getOptions();
String path = details != null ? details.get(DiskTO.IQN) : null;
String path = details != null ? details.get(DiskTO.PATH) : null;
if (path == null) {
new CloudRuntimeException("The 'path' field must be specified.");
path = details != null ? details.get(DiskTO.IQN) : null;
if (path == null) {
new CloudRuntimeException("The 'path' or 'iqn' field must be specified.");
}
}
storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details);
@ -2188,7 +2196,16 @@ public class KVMStorageProcessor implements StorageProcessor {
Map<String, String> details = cmd.getOptions2();
String path = details != null ? details.get(DiskTO.IQN) : null;
String path = cmd.getDestTO().getPath();
if (path == null) {
path = details != null ? details.get(DiskTO.PATH) : null;
if (path == null) {
path = details != null ? details.get(DiskTO.IQN) : null;
if (path == null) {
new CloudRuntimeException("The 'path' or 'iqn' field must be specified.");
}
}
}
storagePoolMgr.connectPhysicalDisk(pool.getPoolType(), pool.getUuid(), path, details);

View File

@ -0,0 +1,758 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.kvm.storage;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import org.apache.cloudstack.utils.qemu.QemuImgException;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
import org.apache.log4j.Logger;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.utils.PropertiesUtil;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.OutputInterpreter;
import com.cloud.utils.script.Script;
import org.apache.commons.lang3.StringUtils;
import org.libvirt.LibvirtException;
import org.joda.time.Duration;
public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
static final Logger LOGGER = Logger.getLogger(MultipathSCSIAdapterBase.class);
static final Map<String, KVMStoragePool> MapStorageUuidToStoragePool = new HashMap<>();
/**
* A lock to avoid any possiblity of multiple requests for a scan
*/
static byte[] CLEANUP_LOCK = new byte[0];
/**
* Property keys and defaults
*/
static final Property<Integer> CLEANUP_FREQUENCY_SECS = new Property<Integer>("multimap.cleanup.frequency.secs", 60);
static final Property<Integer> CLEANUP_TIMEOUT_SECS = new Property<Integer>("multimap.cleanup.timeout.secs", 4);
static final Property<Boolean> CLEANUP_ENABLED = new Property<Boolean>("multimap.cleanup.enabled", true);
static final Property<String> CLEANUP_SCRIPT = new Property<String>("multimap.cleanup.script", "cleanStaleMaps.sh");
static final Property<String> CONNECT_SCRIPT = new Property<String>("multimap.connect.script", "connectVolume.sh");
static final Property<String> COPY_SCRIPT = new Property<String>("multimap.copy.script", "copyVolume.sh");
static final Property<String> DISCONNECT_SCRIPT = new Property<String>("multimap.disconnect.script", "disconnectVolume.sh");
static final Property<String> RESIZE_SCRIPT = new Property<String>("multimap.resize.script", "resizeVolume.sh");
static final Property<Integer> DISK_WAIT_SECS = new Property<Integer>("multimap.disk.wait.secs", 240);
static final Property<String> STORAGE_SCRIPTS_DIR = new Property<String>("multimap.storage.scripts.dir", "scripts/storage/multipath");
static Timer cleanupTimer = new Timer();
private static int cleanupTimeoutSecs = CLEANUP_TIMEOUT_SECS.getFinalValue();
private static String connectScript = CONNECT_SCRIPT.getFinalValue();
private static String disconnectScript = DISCONNECT_SCRIPT.getFinalValue();
private static String cleanupScript = CLEANUP_SCRIPT.getFinalValue();
private static String resizeScript = RESIZE_SCRIPT.getFinalValue();
private static String copyScript = COPY_SCRIPT.getFinalValue();
private static int diskWaitTimeSecs = DISK_WAIT_SECS.getFinalValue();
/**
* Initialize static program-wide configurations and background jobs
*/
static {
long cleanupFrequency = CLEANUP_FREQUENCY_SECS.getFinalValue() * 1000;
boolean cleanupEnabled = CLEANUP_ENABLED.getFinalValue();
connectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), connectScript);
if (connectScript == null) {
throw new Error("Unable to find the connectVolume.sh script");
}
disconnectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), disconnectScript);
if (disconnectScript == null) {
throw new Error("Unable to find the disconnectVolume.sh script");
}
resizeScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), resizeScript);
if (resizeScript == null) {
throw new Error("Unable to find the resizeVolume.sh script");
}
copyScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), copyScript);
if (copyScript == null) {
throw new Error("Unable to find the copyVolume.sh script");
}
if (cleanupEnabled) {
cleanupScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), cleanupScript);
if (cleanupScript == null) {
throw new Error("Unable to find the cleanStaleMaps.sh script and " + CLEANUP_ENABLED.getName() + " is true");
}
TimerTask task = new TimerTask() {
@Override
public void run() {
try {
MultipathSCSIAdapterBase.cleanupStaleMaps();
} catch (Throwable e) {
LOGGER.warn("Error running stale multipath map cleanup", e);
}
}
};
cleanupTimer = new Timer("MultipathMapCleanupJob");
cleanupTimer.scheduleAtFixedRate(task, 0, cleanupFrequency);
}
}
@Override
public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) {
return getStoragePool(uuid);
}
public abstract String getName();
public abstract boolean isStoragePoolTypeSupported(Storage.StoragePoolType type);
/**
* We expect WWN values in the volumePath so need to convert it to an actual physical path
*/
public abstract AddressInfo parseAndValidatePath(String path);
@Override
public KVMPhysicalDisk getPhysicalDisk(String volumePath, KVMStoragePool pool) {
LOGGER.debug(String.format("getPhysicalDisk(volumePath,pool) called with args (%s,%s)", volumePath, pool));
if (StringUtils.isEmpty(volumePath) || pool == null) {
LOGGER.error("Unable to get physical disk, volume path or pool not specified");
return null;
}
AddressInfo address = parseAndValidatePath(volumePath);
return getPhysicalDisk(address, pool);
}
private KVMPhysicalDisk getPhysicalDisk(AddressInfo address, KVMStoragePool pool) {
LOGGER.debug(String.format("getPhysicalDisk(addressInfo,pool) called with args (%s,%s)", address.getPath(), pool));
KVMPhysicalDisk disk = new KVMPhysicalDisk(address.getPath(), address.toString(), pool);
disk.setFormat(QemuImg.PhysicalDiskFormat.RAW);
long diskSize = getPhysicalDiskSize(address.getPath());
disk.setSize(diskSize);
disk.setVirtualSize(diskSize);
LOGGER.debug("Physical disk " + disk.getPath() + " with format " + disk.getFormat() + " and size " + disk.getSize() + " provided");
return disk;
}
@Override
public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, Storage.StoragePoolType type, Map<String, String> details) {
LOGGER.info(String.format("createStoragePool(uuid,host,port,path,type) called with args (%s, %s, %s, %s, %s)", uuid, host, ""+port, path, type));
MultipathSCSIPool storagePool = new MultipathSCSIPool(uuid, host, port, path, type, details, this);
MapStorageUuidToStoragePool.put(uuid, storagePool);
return storagePool;
}
@Override
public boolean deleteStoragePool(String uuid) {
return MapStorageUuidToStoragePool.remove(uuid) != null;
}
@Override
public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map<String, String> details) {
LOGGER.info("connectPhysicalDisk called for [" + volumePath + "]");
if (StringUtils.isEmpty(volumePath)) {
LOGGER.error("Unable to connect physical disk due to insufficient data - volume path is undefined");
throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - volume path is underfined");
}
if (pool == null) {
LOGGER.error("Unable to connect physical disk due to insufficient data - pool is not set");
throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - pool is not set");
}
AddressInfo address = this.parseAndValidatePath(volumePath);
int waitTimeInSec = diskWaitTimeSecs;
if (details != null && details.containsKey(StorageManager.STORAGE_POOL_DISK_WAIT.toString())) {
String waitTime = details.get(StorageManager.STORAGE_POOL_DISK_WAIT.toString());
if (StringUtils.isNotEmpty(waitTime)) {
waitTimeInSec = Integer.valueOf(waitTime).intValue();
}
}
return waitForDiskToBecomeAvailable(address, pool, waitTimeInSec);
}
@Override
public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool) {
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) START", volumePath, pool.getUuid()));
AddressInfo address = this.parseAndValidatePath(volumePath);
ScriptResult result = runScript(disconnectScript, 60000L, address.getAddress().toLowerCase());
if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult());
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) COMPLETE [rc=%s]", volumePath, pool.getUuid(), result.getResult())); return true;
}
@Override
public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect) {
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumeToDisconnect) called with arg bag [not implemented]:") + " " + volumeToDisconnect);
return false;
}
@Override
public boolean disconnectPhysicalDiskByPath(String localPath) {
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) STARTED", localPath));
ScriptResult result = runScript(disconnectScript, 60000L, localPath.replace("/dev/mapper/3", ""));
if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult());
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode())); return true;
}
@Override
public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.ImageFormat format) {
LOGGER.info(String.format("deletePhysicalDisk(uuid,pool,format) called with args (%s, %s, %s) [not implemented]", uuid, pool.getUuid(), format.toString()));
return true;
}
@Override
public KVMPhysicalDisk createTemplateFromDisk(KVMPhysicalDisk disk, String name, QemuImg.PhysicalDiskFormat format, long size, KVMStoragePool destPool) {
LOGGER.info(String.format("createTemplateFromDisk(disk,name,format,size,destPool) called with args (%s, %s, %s, %s, %s) [not implemented]", disk.getPath(), name, format.toString(), ""+size, destPool.getUuid()));
return null;
}
@Override
public List<KVMPhysicalDisk> listPhysicalDisks(String storagePoolUuid, KVMStoragePool pool) {
LOGGER.info(String.format("listPhysicalDisks(uuid,pool) called with args (%s, %s) [not implemented]", storagePoolUuid, pool.getUuid()));
return null;
}
@Override
public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) {
return copyPhysicalDisk(disk, name, destPool, timeout, null, null, null);
}
@Override
public boolean refresh(KVMStoragePool pool) {
LOGGER.info(String.format("refresh(pool) called with args (%s)", pool.getUuid()));
return true;
}
@Override
public boolean deleteStoragePool(KVMStoragePool pool) {
LOGGER.info(String.format("deleteStroagePool(pool) called with args (%s)", pool.getUuid()));
return deleteStoragePool(pool.getUuid());
}
@Override
public boolean createFolder(String uuid, String path) {
LOGGER.info(String.format("createFolder(uuid,path) called with args (%s, %s) [not implemented]", uuid, path));
return createFolder(uuid, path, null);
}
@Override
public boolean createFolder(String uuid, String path, String localPath) {
LOGGER.info(String.format("createFolder(uuid,path,localPath) called with args (%s, %s, %s) [not implemented]", uuid, path, localPath));
return true;
}
/**
* Validate inputs and return the source file for a template copy
* @param templateFilePath
* @param destTemplatePath
* @param destPool
* @param format
* @return
*/
File createTemplateFromDirectDownloadFileValidate(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format) {
if (StringUtils.isAnyEmpty(templateFilePath, destTemplatePath) || destPool == null) {
LOGGER.error("Unable to create template from direct download template file due to insufficient data");
throw new CloudRuntimeException("Unable to create template from direct download template file due to insufficient data");
}
LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString());
File sourceFile = new File(templateFilePath);
if (!sourceFile.exists()) {
throw new CloudRuntimeException("Direct download template file " + templateFilePath + " does not exist on this host");
}
if (destTemplatePath == null || destTemplatePath.isEmpty()) {
LOGGER.error("Failed to create template, target template disk path not provided");
throw new CloudRuntimeException("Target template disk path not provided");
}
if (this.isStoragePoolTypeSupported(destPool.getType())) {
throw new CloudRuntimeException("Unsupported storage pool type: " + destPool.getType().toString());
}
if (Storage.ImageFormat.RAW.equals(format) && Storage.ImageFormat.QCOW2.equals(format)) {
LOGGER.error("Failed to create template, unsupported template format: " + format.toString());
throw new CloudRuntimeException("Unsupported template format: " + format.toString());
}
return sourceFile;
}
String extractSourceTemplateIfNeeded(File sourceFile, String templateFilePath) {
String srcTemplateFilePath = templateFilePath;
if (isTemplateExtractable(templateFilePath)) {
srcTemplateFilePath = sourceFile.getParent() + "/" + UUID.randomUUID().toString();
LOGGER.debug("Extract the downloaded template " + templateFilePath + " to " + srcTemplateFilePath);
String extractCommand = getExtractCommandForDownloadedFile(templateFilePath, srcTemplateFilePath);
Script.runSimpleBashScript(extractCommand);
Script.runSimpleBashScript("rm -f " + templateFilePath);
}
return srcTemplateFilePath;
}
QemuImg.PhysicalDiskFormat deriveImgFileFormat(Storage.ImageFormat format) {
if (format == Storage.ImageFormat.RAW) {
return QemuImg.PhysicalDiskFormat.RAW;
} else if (format == Storage.ImageFormat.QCOW2) {
return QemuImg.PhysicalDiskFormat.QCOW2;
} else {
return QemuImg.PhysicalDiskFormat.RAW;
}
}
@Override
public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) {
File sourceFile = createTemplateFromDirectDownloadFileValidate(templateFilePath, destTemplatePath, destPool, format);
LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString());
KVMPhysicalDisk sourceDisk = destPool.getPhysicalDisk(sourceFile.getAbsolutePath());
return copyPhysicalDisk(sourceDisk, destTemplatePath, destPool, timeout, null, null, Storage.ProvisioningType.THIN);
}
@Override
public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout,
byte[] srcPassphrase, byte[] dstPassphrase, Storage.ProvisioningType provisioningType) {
validateForDiskCopy(disk, name, destPool);
LOGGER.info("Copying FROM source physical disk " + disk.getPath() + ", size: " + disk.getSize() + ", virtualsize: " + disk.getVirtualSize()+ ", format: " + disk.getFormat());
KVMPhysicalDisk destDisk = destPool.getPhysicalDisk(name);
if (destDisk == null) {
LOGGER.error("Failed to find the disk: " + name + " of the storage pool: " + destPool.getUuid());
throw new CloudRuntimeException("Failed to find the disk: " + name + " of the storage pool: " + destPool.getUuid());
}
if (srcPassphrase != null || dstPassphrase != null) {
throw new CloudRuntimeException("Storage provider does not support user-space encrypted source or destination volumes");
}
destDisk.setFormat(QemuImg.PhysicalDiskFormat.RAW);
destDisk.setVirtualSize(disk.getVirtualSize());
destDisk.setSize(disk.getSize());
LOGGER.info("Copying TO destination physical disk " + destDisk.getPath() + ", size: " + destDisk.getSize() + ", virtualsize: " + destDisk.getVirtualSize()+ ", format: " + destDisk.getFormat());
QemuImgFile srcFile = new QemuImgFile(disk.getPath(), disk.getFormat());
QemuImgFile destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat());
LOGGER.debug("Starting COPY from source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath());
ScriptResult result = runScript(copyScript, timeout, destDisk.getFormat().toString().toLowerCase(), srcFile.getFileName(), destFile.getFileName());
int rc = result.getExitCode();
if (rc != 0) {
throw new CloudRuntimeException("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + rc + " - " + result.getResult());
}
LOGGER.debug("Successfully converted source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath() + " " + result.getResult());
return destDisk;
}
void validateForDiskCopy(KVMPhysicalDisk disk, String name, KVMStoragePool destPool) {
if (StringUtils.isEmpty(name) || disk == null || destPool == null) {
LOGGER.error("Unable to copy physical disk due to insufficient data");
throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data");
}
}
/**
* Copy a disk path to another disk path using QemuImg command
* @param disk
* @param destDisk
* @param name
* @param timeout
*/
void qemuCopy(KVMPhysicalDisk disk, KVMPhysicalDisk destDisk, String name, int timeout) {
QemuImg qemu;
try {
qemu = new QemuImg(timeout);
} catch (LibvirtException | QemuImgException e) {
throw new CloudRuntimeException (e);
}
QemuImgFile srcFile = null;
QemuImgFile destFile = null;
try {
srcFile = new QemuImgFile(disk.getPath(), disk.getFormat());
destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat());
LOGGER.debug("Starting copy from source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath());
qemu.convert(srcFile, destFile, true);
LOGGER.debug("Successfully converted source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath());
} catch (QemuImgException | LibvirtException e) {
try {
Map<String, String> srcInfo = qemu.info(srcFile);
LOGGER.debug("Source disk info: " + Arrays.asList(srcInfo));
} catch (Exception ignored) {
LOGGER.warn("Unable to get info from source disk: " + disk.getName());
}
String errMsg = String.format("Unable to convert/copy from %s to %s, due to: %s", disk.getName(), name, ((StringUtils.isEmpty(e.getMessage())) ? "an unknown error" : e.getMessage()));
LOGGER.error(errMsg);
throw new CloudRuntimeException(errMsg, e);
}
}
@Override
public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template,
String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size,
KVMStoragePool destPool, int timeout, byte[] passphrase) {
throw new UnsupportedOperationException("Unimplemented method 'createDiskFromTemplate'");
}
@Override
public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template,
String name, PhysicalDiskFormat format, long size,
KVMStoragePool destPool, int timeout, byte[] passphrase) {
throw new UnsupportedOperationException("Unimplemented method 'createDiskFromTemplateBacking'");
}
@Override
public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool,
PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) {
throw new UnsupportedOperationException("Unimplemented method 'createPhysicalDisk'");
}
boolean isTemplateExtractable(String templatePath) {
ScriptResult result = runScript("file", 5000L, templatePath, "| awk -F' ' '{print $2}'");
String type = result.getResult();
return type.equalsIgnoreCase("bzip2") || type.equalsIgnoreCase("gzip") || type.equalsIgnoreCase("zip");
}
String getExtractCommandForDownloadedFile(String downloadedTemplateFile, String templateFile) {
if (downloadedTemplateFile.endsWith(".zip")) {
return "unzip -p " + downloadedTemplateFile + " | cat > " + templateFile;
} else if (downloadedTemplateFile.endsWith(".bz2")) {
return "bunzip2 -c " + downloadedTemplateFile + " > " + templateFile;
} else if (downloadedTemplateFile.endsWith(".gz")) {
return "gunzip -c " + downloadedTemplateFile + " > " + templateFile;
} else {
throw new CloudRuntimeException("Unable to extract template " + downloadedTemplateFile);
}
}
private static final ScriptResult runScript(String script, long timeout, String...args) {
ScriptResult result = new ScriptResult();
Script cmd = new Script(script, Duration.millis(timeout), LOGGER);
cmd.add(args);
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
String output = cmd.execute(parser);
// its possible the process never launches which causes an NPE on getExitValue below
if (output != null && output.contains("Unable to execute the command")) {
result.setResult(output);
result.setExitCode(-1);
return result;
}
result.setResult(output);
result.setExitCode(cmd.getExitValue());
return result;
}
boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) {
LOGGER.debug("Waiting for the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid() + " to become available for " + waitTimeInSec + " secs");
long scriptTimeoutSecs = 30; // how long to wait for each script execution to run
long maxTries = 10; // how many max retries to attempt the script
long waitTimeInMillis = waitTimeInSec * 1000; // how long overall to wait
int timeBetweenTries = 1000; // how long to sleep between tries
// wait at least 60 seconds even if input was lower
if (waitTimeInSec < 60) {
waitTimeInSec = 60;
}
KVMPhysicalDisk physicalDisk = null;
// Rescan before checking for the physical disk
int tries = 0;
while (waitTimeInMillis > 0 && tries < maxTries) {
tries++;
long start = System.currentTimeMillis();
String lun;
if (address.getConnectionId() == null) {
lun = "-";
} else {
lun = address.getConnectionId();
}
Process p = null;
try {
ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress());
p = builder.start();
if (p.waitFor(scriptTimeoutSecs, TimeUnit.SECONDS)) {
int rc = p.exitValue();
StringBuffer output = new StringBuffer();
if (rc == 0) {
BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream()));
String line = null;
while ((line = input.readLine()) != null) {
output.append(line);
output.append(" ");
}
physicalDisk = getPhysicalDisk(address, pool);
if (physicalDisk != null && physicalDisk.getSize() > 0) {
LOGGER.debug("Found the volume using id: " + address.getPath() + " of the storage pool: " + pool.getUuid());
return true;
}
break;
} else {
LOGGER.warn("Failure discovering LUN via " + connectScript);
BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream()));
String line = null;
while ((line = error.readLine()) != null) {
LOGGER.warn("error --> " + line);
}
}
} else {
LOGGER.debug("Timeout waiting for " + connectScript + " to complete - try " + tries);
}
} catch (IOException | InterruptedException | IllegalThreadStateException e) {
LOGGER.warn("Problem performing scan on SCSI hosts - try " + tries, e);
} finally {
if (p != null && p.isAlive()) {
p.destroyForcibly();
}
}
long elapsed = System.currentTimeMillis() - start;
waitTimeInMillis = waitTimeInMillis - elapsed;
try {
Thread.sleep(timeBetweenTries);
} catch (Exception ex) {
// don't do anything
}
}
LOGGER.debug("Unable to find the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid());
return false;
}
void runConnectScript(String lun, AddressInfo address) {
try {
ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress());
Process p = builder.start();
int rc = p.waitFor();
StringBuffer output = new StringBuffer();
if (rc == 0) {
BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream()));
String line = null;
while ((line = input.readLine()) != null) {
output.append(line);
output.append(" ");
}
} else {
LOGGER.warn("Failure discovering LUN via " + connectScript);
BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream()));
String line = null;
while ((line = error.readLine()) != null) {
LOGGER.warn("error --> " + line);
}
}
} catch (IOException | InterruptedException e) {
throw new CloudRuntimeException("Problem performing scan on SCSI hosts", e);
}
}
void sleep(long sleepTimeMs) {
try {
Thread.sleep(sleepTimeMs);
} catch (Exception ex) {
// don't do anything
}
}
long getPhysicalDiskSize(String diskPath) {
if (StringUtils.isEmpty(diskPath)) {
return 0;
}
Script diskCmd = new Script("blockdev", LOGGER);
diskCmd.add("--getsize64", diskPath);
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
String result = diskCmd.execute(parser);
if (result != null) {
LOGGER.debug("Unable to get the disk size at path: " + diskPath);
return 0;
}
Long size = Long.parseLong(parser.getLine());
if (size <= 0) {
// its possible the path can't be seen on the host yet, lets rescan
// now rerun the command
parser = new OutputInterpreter.OneLineParser();
result = diskCmd.execute(parser);
if (result != null) {
LOGGER.debug("Unable to get the disk size at path: " + diskPath);
return 0;
}
size = Long.parseLong(parser.getLine());
}
return size;
}
public void resize(String path, String vmName, long newSize) {
if (LOGGER.isDebugEnabled()) LOGGER.debug("Executing resize of " + path + " to " + newSize + " bytes for VM " + vmName);
// extract wwid
AddressInfo address = parseAndValidatePath(path);
if (address == null || address.getAddress() == null) {
LOGGER.error("Unable to resize volume, address value is not valid");
throw new CloudRuntimeException("Unable to resize volume, address value is not valid");
}
if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("Running %s %s %s %s", resizeScript, address.getAddress(), vmName, newSize));
// call resizeVolume.sh <wwid>
ScriptResult result = runScript(resizeScript, 60000L, address.getAddress(), vmName, ""+newSize);
if (result.getExitCode() != 0) {
throw new CloudRuntimeException("Failed to resize volume at address " + address.getAddress() + " to " + newSize + " bytes for VM " + vmName + ": " + result.getResult());
}
LOGGER.info("Resize of volume at address " + address.getAddress() + " completed successfully: " + result.getResult());
}
static void cleanupStaleMaps() {
synchronized(CLEANUP_LOCK) {
long start = System.currentTimeMillis();
ScriptResult result = runScript(cleanupScript, cleanupTimeoutSecs * 1000);
LOGGER.debug("Multipath Cleanup Job elapsed time (ms): "+ (System.currentTimeMillis() - start) + "; result: " + result.getExitCode(), null);
}
}
public static final class AddressInfo {
String type;
String address;
String connectionId;
String path;
public AddressInfo(String type, String address, String connectionId, String path) {
this.type = type;
this.address = address;
this.connectionId = connectionId;
this.path = path;
}
public String getType() {
return type;
}
public String getAddress() {
return address;
}
public String getConnectionId() {
return connectionId;
}
public String getPath() {
return path;
}
public String toString() {
return String.format("type=%s; address=%s; connid=%s", getType(), getAddress(), getConnectionId());
}
}
public static class Property <T> {
private String name;
private T defaultValue;
Property(String name, T value) {
this.name = name;
this.defaultValue = value;
}
public String getName() {
return this.name;
}
public T getDefaultValue() {
return this.defaultValue;
}
public T getFinalValue() {
File agentPropertiesFile = PropertiesUtil.findConfigFile("agent.properties");
if (agentPropertiesFile == null) {
LOGGER.debug(String.format("File [%s] was not found, we will use default defined values. Property [%s]: [%s].", "agent.properties", name, defaultValue));
return defaultValue;
} else {
try {
String configValue = PropertiesUtil.loadFromFile(agentPropertiesFile).getProperty(name);
if (StringUtils.isBlank(configValue)) {
LOGGER.debug(String.format("Property [%s] has empty or null value. Using default value [%s].", name, defaultValue));
return defaultValue;
} else {
if (defaultValue instanceof Integer) {
return (T)Integer.getInteger(configValue);
} else if (defaultValue instanceof Long) {
return (T)Long.getLong(configValue);
} else if (defaultValue instanceof String) {
return (T)configValue;
} else if (defaultValue instanceof Boolean) {
return (T)Boolean.valueOf(configValue);
} else {
return null;
}
}
} catch (IOException var5) {
LOGGER.debug(String.format("Failed to get property [%s]. Using default value [%s].", name, defaultValue), var5);
return defaultValue;
}
}
}
}
public static class ScriptResult {
private int exitCode = -1;
private String result = null;
public int getExitCode() {
return exitCode;
}
public void setExitCode(int exitCode) {
this.exitCode = exitCode;
}
public String getResult() {
return result;
}
public void setResult(String result) {
this.result = result;
}
}
}

View File

@ -0,0 +1,241 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.kvm.storage;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import org.joda.time.Duration;
import com.cloud.agent.api.to.HostTO;
import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool;
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.ProvisioningType;
public class MultipathSCSIPool implements KVMStoragePool {
private String uuid;
private String sourceHost;
private int sourcePort;
private String sourceDir;
private Storage.StoragePoolType storagePoolType;
private StorageAdaptor storageAdaptor;
private long capacity;
private long used;
private long available;
private Map<String, String> details;
public MultipathSCSIPool(String uuid, String host, int port, String path, Storage.StoragePoolType poolType, Map<String, String> poolDetails, StorageAdaptor adaptor) {
this.uuid = uuid;
sourceHost = host;
sourcePort = port;
sourceDir = path;
storagePoolType = poolType;
storageAdaptor = adaptor;
capacity = 0;
used = 0;
available = 0;
details = poolDetails;
}
public MultipathSCSIPool(String uuid, StorageAdaptor adapter) {
this.uuid = uuid;
sourceHost = null;
sourcePort = -1;
sourceDir = null;
storagePoolType = Storage.StoragePoolType.FiberChannel;
details = new HashMap<String,String>();
this.storageAdaptor = adapter;
}
@Override
public KVMPhysicalDisk createPhysicalDisk(String arg0, ProvisioningType arg1, long arg2, byte[] arg3) {
return null;
}
@Override
public KVMPhysicalDisk createPhysicalDisk(String arg0, PhysicalDiskFormat arg1, ProvisioningType arg2, long arg3,
byte[] arg4) {
return null;
}
@Override
public boolean connectPhysicalDisk(String volumeUuid, Map<String, String> details) {
return storageAdaptor.connectPhysicalDisk(volumeUuid, this, details);
}
@Override
public KVMPhysicalDisk getPhysicalDisk(String volumeId) {
return storageAdaptor.getPhysicalDisk(volumeId, this);
}
@Override
public boolean disconnectPhysicalDisk(String volumeUuid) {
return storageAdaptor.disconnectPhysicalDisk(volumeUuid, this);
}
@Override
public boolean deletePhysicalDisk(String volumeUuid, Storage.ImageFormat format) {
return true;
}
@Override
public List<KVMPhysicalDisk> listPhysicalDisks() {
return null;
}
@Override
public String getUuid() {
return uuid;
}
public void setCapacity(long capacity) {
this.capacity = capacity;
}
@Override
public long getCapacity() {
return this.capacity;
}
public void setUsed(long used) {
this.used = used;
}
@Override
public long getUsed() {
return this.used;
}
public void setAvailable(long available) {
this.available = available;
}
@Override
public long getAvailable() {
return this.available;
}
@Override
public boolean refresh() {
return false;
}
@Override
public boolean isExternalSnapshot() {
return true;
}
@Override
public String getLocalPath() {
return null;
}
@Override
public String getSourceHost() {
return this.sourceHost;
}
@Override
public String getSourceDir() {
return this.sourceDir;
}
@Override
public int getSourcePort() {
return this.sourcePort;
}
@Override
public String getAuthUserName() {
return null;
}
@Override
public String getAuthSecret() {
return null;
}
@Override
public Storage.StoragePoolType getType() {
return storagePoolType;
}
@Override
public boolean delete() {
return false;
}
@Override
public QemuImg.PhysicalDiskFormat getDefaultFormat() {
return QemuImg.PhysicalDiskFormat.RAW;
}
@Override
public boolean createFolder(String path) {
return false;
}
@Override
public boolean supportsConfigDriveIso() {
return false;
}
@Override
public Map<String, String> getDetails() {
return this.details;
}
@Override
public boolean isPoolSupportHA() {
return false;
}
@Override
public String getHearthBeatPath() {
return null;
}
@Override
public String createHeartBeatCommand(HAStoragePool primaryStoragePool, String hostPrivateIp,
boolean hostValidation) {
return null;
}
@Override
public String getStorageNodeId() {
return null;
}
@Override
public Boolean checkingHeartBeat(HAStoragePool pool, HostTO host) {
return null;
}
@Override
public Boolean vmActivityCheck(HAStoragePool pool, HostTO host, Duration activityScriptTimeout,
String volumeUUIDListString, String vmActivityCheckPath, long duration) {
return null;
}
public void resize(String path, String vmName, long newSize) {
((MultipathSCSIAdapterBase)storageAdaptor).resize(path, vmName, newSize);
}
}

View File

@ -6200,4 +6200,99 @@ public class LibvirtComputingResourceTest {
Mockito.verify(loggerMock).debug("Skipping the memory balloon stats period setting for the VM (Libvirt Domain) with ID [1] and name [fake-VM-name] because this"
+ " VM has no memory balloon.");
}
@Test
public void calculateCpuSharesTestMinSpeedNullAndHostCgroupV1ShouldNotConsiderCgroupLimit() {
int cpuCores = 2;
int cpuSpeed = 2000;
int maxCpuShares = 0;
int expectedCpuShares = 4000;
Mockito.doReturn(cpuCores).when(vmTO).getCpus();
Mockito.doReturn(null).when(vmTO).getMinSpeed();
Mockito.doReturn(cpuSpeed).when(vmTO).getSpeed();
Mockito.doReturn(maxCpuShares).when(libvirtComputingResourceSpy).getHostCpuMaxCapacity();
int calculatedCpuShares = libvirtComputingResourceSpy.calculateCpuShares(vmTO);
Assert.assertEquals(expectedCpuShares, calculatedCpuShares);
}
@Test
public void calculateCpuSharesTestMinSpeedNotNullAndHostCgroupV1ShouldNotConsiderCgroupLimit() {
int cpuCores = 2;
int cpuSpeed = 2000;
int maxCpuShares = 0;
int expectedCpuShares = 4000;
Mockito.doReturn(cpuCores).when(vmTO).getCpus();
Mockito.doReturn(cpuSpeed).when(vmTO).getMinSpeed();
Mockito.doReturn(maxCpuShares).when(libvirtComputingResourceSpy).getHostCpuMaxCapacity();
int calculatedCpuShares = libvirtComputingResourceSpy.calculateCpuShares(vmTO);
Assert.assertEquals(expectedCpuShares, calculatedCpuShares);
}
@Test
public void calculateCpuSharesTestMinSpeedNullAndHostCgroupV2ShouldConsiderCgroupLimit() {
int cpuCores = 2;
int cpuSpeed = 2000;
int maxCpuShares = 5000;
int expectedCpuShares = 8000;
Mockito.doReturn(cpuCores).when(vmTO).getCpus();
Mockito.doReturn(null).when(vmTO).getMinSpeed();
Mockito.doReturn(cpuSpeed).when(vmTO).getSpeed();
Mockito.doReturn(maxCpuShares).when(libvirtComputingResourceSpy).getHostCpuMaxCapacity();
int calculatedCpuShares = libvirtComputingResourceSpy.calculateCpuShares(vmTO);
Assert.assertEquals(expectedCpuShares, calculatedCpuShares);
}
@Test
public void calculateCpuSharesTestMinSpeedNotNullAndHostCgroupV2ShouldConsiderCgroupLimit() {
int cpuCores = 2;
int cpuSpeed = 2000;
int maxCpuShares = 5000;
int expectedCpuShares = 8000;
Mockito.doReturn(cpuCores).when(vmTO).getCpus();
Mockito.doReturn(cpuSpeed).when(vmTO).getMinSpeed();
Mockito.doReturn(maxCpuShares).when(libvirtComputingResourceSpy).getHostCpuMaxCapacity();
int calculatedCpuShares = libvirtComputingResourceSpy.calculateCpuShares(vmTO);
Assert.assertEquals(expectedCpuShares, calculatedCpuShares);
}
@Test
public void setMaxHostCpuSharesIfCGroupV2TestShouldCalculateMaxCpuCapacityIfHostUtilizesCgroupV2() {
int cpuCores = 2;
long cpuSpeed = 2500L;
int expectedShares = 5000;
String hostCgroupVersion = LibvirtComputingResource.CGROUP_V2;
try (MockedStatic<Script> ignored = Mockito.mockStatic(Script.class)) {
Mockito.when(Script.runSimpleBashScript(Mockito.anyString())).thenReturn(hostCgroupVersion);
libvirtComputingResourceSpy.calculateHostCpuMaxCapacity(cpuCores, cpuSpeed);
Assert.assertEquals(expectedShares, libvirtComputingResourceSpy.getHostCpuMaxCapacity());
}
}
@Test
public void setMaxHostCpuSharesIfCGroupV2TestShouldNotCalculateMaxCpuCapacityIfHostDoesNotUtilizesCgroupV2() {
int cpuCores = 2;
long cpuSpeed = 2500L;
int expectedShares = 0;
String hostCgroupVersion = "tmpfs";
try (MockedStatic<Script> ignored = Mockito.mockStatic(Script.class)) {
Mockito.when(Script.runSimpleBashScript(Mockito.anyString())).thenReturn(hostCgroupVersion);
libvirtComputingResourceSpy.calculateHostCpuMaxCapacity(cpuCores, cpuSpeed);
Assert.assertEquals(expectedShares, libvirtComputingResourceSpy.getHostCpuMaxCapacity());
}
}
}

View File

@ -24,6 +24,7 @@ import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
@ -36,6 +37,8 @@ import javax.xml.transform.TransformerException;
import javax.xml.xpath.XPathExpressionException;
import javax.xml.xpath.XPathFactory;
import org.apache.cloudstack.utils.security.ParserUtils;
import org.apache.commons.io.IOUtils;
import org.junit.Assert;
import org.junit.Test;
@ -43,10 +46,14 @@ import org.junit.runner.RunWith;
import org.libvirt.Connect;
import org.libvirt.StorageVol;
import org.mockito.InOrder;
import org.mockito.Mock;
import org.mockito.MockedStatic;
import org.mockito.Mockito;
import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.xml.sax.SAXException;
import com.cloud.agent.api.MigrateCommand;
@ -55,6 +62,7 @@ import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo.DiskType;
import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo.DriverType;
import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo.Source;
import com.cloud.agent.api.to.DpdkTO;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.resource.LibvirtConnection;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef;
@ -437,6 +445,16 @@ public class LibvirtMigrateCommandWrapperTest {
" </seclabel>\n" +
"</domain>";
@Mock
MigrateCommand migrateCommandMock;
@Mock
LibvirtComputingResource libvirtComputingResourceMock;
@Mock
VirtualMachineTO virtualMachineTOMock;
@Spy
LibvirtMigrateCommandWrapper libvirtMigrateCmdWrapper = new LibvirtMigrateCommandWrapper();
final String memInfo = "MemTotal: 5830236 kB\n" +
@ -860,4 +878,67 @@ public class LibvirtMigrateCommandWrapperTest {
Assert.assertTrue(replaced.contains("csdpdk-7"));
Assert.assertFalse(replaced.contains("csdpdk-1"));
}
@Test
public void updateVmSharesIfNeededTestNewCpuSharesEqualCurrentSharesShouldNotUpdateVmShares() throws ParserConfigurationException, IOException, TransformerException,
SAXException {
int newVmCpuShares = 1000;
int currentVmCpuShares = 1000;
Mockito.doReturn(newVmCpuShares).when(migrateCommandMock).getNewVmCpuShares();
Mockito.doReturn(virtualMachineTOMock).when(migrateCommandMock).getVirtualMachine();
Mockito.doReturn(currentVmCpuShares).when(libvirtComputingResourceMock).calculateCpuShares(virtualMachineTOMock);
String finalXml = libvirtMigrateCmdWrapper.updateVmSharesIfNeeded(migrateCommandMock, fullfile, libvirtComputingResourceMock);
Assert.assertEquals(finalXml, fullfile);
}
@Test
public void updateVmSharesIfNeededTestNewCpuSharesHigherThanCurrentSharesShouldUpdateVmShares() throws ParserConfigurationException, IOException, TransformerException,
SAXException {
int newVmCpuShares = 2000;
int currentVmCpuShares = 1000;
Mockito.doReturn(newVmCpuShares).when(migrateCommandMock).getNewVmCpuShares();
Mockito.doReturn(virtualMachineTOMock).when(migrateCommandMock).getVirtualMachine();
Mockito.doReturn(currentVmCpuShares).when(libvirtComputingResourceMock).calculateCpuShares(virtualMachineTOMock);
String finalXml = libvirtMigrateCmdWrapper.updateVmSharesIfNeeded(migrateCommandMock, fullfile, libvirtComputingResourceMock);
InputStream inputStream = IOUtils.toInputStream(finalXml, StandardCharsets.UTF_8);
DocumentBuilderFactory docFactory = ParserUtils.getSaferDocumentBuilderFactory();
DocumentBuilder docBuilder = docFactory.newDocumentBuilder();
Document document = docBuilder.parse(inputStream);
Element root = document.getDocumentElement();
Node sharesNode = root.getElementsByTagName("shares").item(0);
int updateShares = Integer.parseInt(sharesNode.getTextContent());
Assert.assertEquals(updateShares, newVmCpuShares);
}
@Test
public void updateVmSharesIfNeededTestNewCpuSharesLowerThanCurrentSharesShouldUpdateVmShares() throws ParserConfigurationException, IOException, TransformerException,
SAXException {
int newVmCpuShares = 500;
int currentVmCpuShares = 1000;
Mockito.doReturn(newVmCpuShares).when(migrateCommandMock).getNewVmCpuShares();
Mockito.doReturn(virtualMachineTOMock).when(migrateCommandMock).getVirtualMachine();
Mockito.doReturn(currentVmCpuShares).when(libvirtComputingResourceMock).calculateCpuShares(virtualMachineTOMock);
String finalXml = libvirtMigrateCmdWrapper.updateVmSharesIfNeeded(migrateCommandMock, fullfile, libvirtComputingResourceMock);
InputStream inputStream = IOUtils.toInputStream(finalXml, StandardCharsets.UTF_8);
DocumentBuilderFactory docFactory = ParserUtils.getSaferDocumentBuilderFactory();
DocumentBuilder docBuilder = docFactory.newDocumentBuilder();
Document document = docBuilder.parse(inputStream);
Element root = document.getDocumentElement();
Node sharesNode = root.getElementsByTagName("shares").item(0);
int updateShares = Integer.parseInt(sharesNode.getTextContent());
Assert.assertEquals(updateShares, newVmCpuShares);
}
}

View File

@ -0,0 +1,73 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.hypervisor.kvm.resource.wrapper;
import com.cloud.agent.api.PrepareForMigrationAnswer;
import com.cloud.agent.api.PrepareForMigrationCommand;
import com.cloud.agent.api.to.DpdkTO;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner;
import java.util.HashMap;
import java.util.Map;
@RunWith(MockitoJUnitRunner.class)
public class LibvirtPrepareForMigrationCommandWrapperTest {
@Mock
LibvirtComputingResource libvirtComputingResourceMock;
@Mock
PrepareForMigrationCommand prepareForMigrationCommandMock;
@Mock
VirtualMachineTO virtualMachineTOMock;
@Spy
LibvirtPrepareForMigrationCommandWrapper libvirtPrepareForMigrationCommandWrapperSpy = new LibvirtPrepareForMigrationCommandWrapper();
@Test
public void createPrepareForMigrationAnswerTestDpdkInterfaceNotEmptyShouldSetParamOnAnswer() {
Map<String, DpdkTO> dpdkInterfaceMapping = new HashMap<>();
dpdkInterfaceMapping.put("Interface", new DpdkTO());
PrepareForMigrationAnswer prepareForMigrationAnswer = libvirtPrepareForMigrationCommandWrapperSpy.createPrepareForMigrationAnswer(prepareForMigrationCommandMock, dpdkInterfaceMapping, libvirtComputingResourceMock,
virtualMachineTOMock);
Assert.assertEquals(prepareForMigrationAnswer.getDpdkInterfaceMapping(), dpdkInterfaceMapping);
}
@Test
public void createPrepareForMigrationAnswerTestVerifyThatCpuSharesIsSet() {
int cpuShares = 1000;
Mockito.doReturn(cpuShares).when(libvirtComputingResourceMock).calculateCpuShares(virtualMachineTOMock);
PrepareForMigrationAnswer prepareForMigrationAnswer = libvirtPrepareForMigrationCommandWrapperSpy.createPrepareForMigrationAnswer(prepareForMigrationCommandMock,null,
libvirtComputingResourceMock, virtualMachineTOMock);
Assert.assertEquals(cpuShares, prepareForMigrationAnswer.getNewVmCpuShares().intValue());
}
}

View File

@ -214,9 +214,11 @@ public class LibvirtScaleVmCommandWrapperTest extends TestCase {
@Test
public void validateExecuteHandleLibvirtException() throws LibvirtException {
String errorMessage = "";
int shares = vmTo.getCpus() * vmTo.getSpeed();
Mockito.doReturn(vmTo).when(scaleVmCommandMock).getVirtualMachine();
Mockito.doReturn(libvirtUtilitiesHelperMock).when(libvirtComputingResourceMock).getLibvirtUtilitiesHelper();
Mockito.doReturn(shares).when(libvirtComputingResourceMock).calculateCpuShares(vmTo);
Mockito.doThrow(libvirtException).when(libvirtUtilitiesHelperMock).getConnectionByVmName(Mockito.anyString());
Mockito.doReturn(errorMessage).when(libvirtException).getMessage();
@ -229,9 +231,12 @@ public class LibvirtScaleVmCommandWrapperTest extends TestCase {
@Test
public void validateExecuteSuccessfully() throws LibvirtException {
int shares = vmTo.getCpus() * vmTo.getSpeed();
Mockito.doReturn(vmTo).when(scaleVmCommandMock).getVirtualMachine();
Mockito.doReturn(libvirtUtilitiesHelperMock).when(libvirtComputingResourceMock).getLibvirtUtilitiesHelper();
Mockito.doReturn(connectMock).when(libvirtUtilitiesHelperMock).getConnectionByVmName(Mockito.anyString());
Mockito.doReturn(shares).when(libvirtComputingResourceMock).calculateCpuShares(vmTo);
Mockito.doReturn(domainMock).when(connectMock).domainLookupByName(Mockito.anyString());
Mockito.doNothing().when(libvirtScaleVmCommandWrapperSpy).scaleMemory(Mockito.any(), Mockito.anyLong(), Mockito.anyString());
Mockito.doNothing().when(libvirtScaleVmCommandWrapperSpy).scaleVcpus(Mockito.any(), Mockito.anyInt(), Mockito.anyString());

View File

@ -54,6 +54,7 @@ import com.vmware.vim25.FileQueryFlags;
import com.vmware.vim25.FolderFileInfo;
import com.vmware.vim25.HostDatastoreBrowserSearchResults;
import com.vmware.vim25.HostDatastoreBrowserSearchSpec;
import com.vmware.vim25.VirtualMachineConfigSummary;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.storage.command.CopyCommand;
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
@ -7156,6 +7157,8 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
}
UnmanagedInstanceTO instance = VmwareHelper.getUnmanagedInstance(hyperHost, vmMo);
if (instance != null) {
VirtualMachineConfigSummary configSummary = vmMo.getConfigSummary();
instance.setCpuSpeed(configSummary != null ? configSummary.getCpuReservation() : 0);
unmanagedInstances.put(instance.getName(), instance);
}
}

View File

@ -783,12 +783,12 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
return prefix;
}
protected KubernetesClusterVO updateKubernetesClusterEntry(final Long cores, final Long memory,
final Long size, final Long serviceOfferingId, final Boolean autoscaleEnabled, final Long minSize, final Long maxSize) {
protected KubernetesClusterVO updateKubernetesClusterEntry(final Long cores, final Long memory, final Long size,
final Long serviceOfferingId, final Boolean autoscaleEnabled, final Long minSize, final Long maxSize) {
return Transaction.execute(new TransactionCallback<KubernetesClusterVO>() {
@Override
public KubernetesClusterVO doInTransaction(TransactionStatus status) {
KubernetesClusterVO updatedCluster = kubernetesClusterDao.createForUpdate(kubernetesCluster.getId());
KubernetesClusterVO updatedCluster = kubernetesClusterDao.findById(kubernetesCluster.getId());
if (cores != null) {
updatedCluster.setCores(cores);
}

View File

@ -32,7 +32,6 @@ import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Level;
import com.cloud.dc.DataCenter;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.ManagementServerException;
import com.cloud.exception.NetworkRuleConflictException;
@ -299,8 +298,8 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
boolean result = false;
try {
result = userVmManager.upgradeVirtualMachine(userVM.getId(), serviceOffering.getId(), new HashMap<String, String>());
} catch (ResourceUnavailableException | ManagementServerException | ConcurrentOperationException | VirtualMachineMigrationException e) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster : %s failed, unable to scale cluster VM : %s", kubernetesCluster.getName(), userVM.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e);
} catch (RuntimeException | ResourceUnavailableException | ManagementServerException | VirtualMachineMigrationException e) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster : %s failed, unable to scale cluster VM : %s due to %s", kubernetesCluster.getName(), userVM.getDisplayName(), e.getMessage()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e);
}
if (!result) {
logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster : %s failed, unable to scale cluster VM : %s", kubernetesCluster.getName(), userVM.getDisplayName()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);

View File

@ -91,7 +91,7 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke
}
try {
result = SshHelper.sshExecute(publicIpAddress, sshPort, getControlNodeLoginUser(), sshKeyFile, null,
String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName),
String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-emptydir-data", hostName),
10000, 10000, 60000);
} catch (Exception e) {
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to drain Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e);

View File

@ -137,7 +137,7 @@ if [ -d "$BINARIES_DIR" ]; then
systemctl stop kubelet
cp -a ${BINARIES_DIR}/k8s/{kubelet,kubectl} /opt/bin
chmod +x {kubelet,kubectl}
chmod +x /opt/bin/{kubelet,kubectl}
systemctl daemon-reload
systemctl restart containerd

View File

@ -82,6 +82,24 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp
private static final String ONLINE = "online";
private static final String OFFLINE = "offline";
enum MissingInfoFilter {
Host_Stats("hostStats"),
CPU_CAPACITY("cpuCapacity"),
MEM_CAPACITY("memCapacity"),
CORE_CAPACITY("coreCapacity");
private final String name;
MissingInfoFilter(String name){
this.name = name;
}
@Override
public String toString() {
return name;
}
}
private static List<Item> metricsItems = new ArrayList<>();
@Inject
@ -129,8 +147,6 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp
Map<String, Integer> upHosts = new HashMap<>();
Map<String, Integer> downHosts = new HashMap<>();
HostStats hostStats;
for (final HostVO host : hostDao.listAll()) {
if (host == null || host.getType() != Host.Type.Routing || host.getDataCenterId() != dcId) {
continue;
@ -147,8 +163,6 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp
int isDedicated = (dr != null) ? 1 : 0;
metricsList.add(new ItemHostIsDedicated(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), isDedicated));
String hostTags = markTagMaps(host, totalHosts, upHosts, downHosts);
hostStats = ApiDBUtils.getHostStatistics(host.getId());
// Get account, domain details for dedicated hosts
if (isDedicated == 1) {
@ -160,16 +174,22 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp
metricsList.add(new ItemHostDedicatedToAccount(zoneName, host.getName(), accountName, domain.getPath(), isDedicated));
}
String hostTags = markTagMaps(host, totalHosts, upHosts, downHosts);
HostStats hostStats = ApiDBUtils.getHostStatistics(host.getId());
if (hostStats == null){
metricsList.add(new MissingHostInfo(zoneName, host.getName(), MissingInfoFilter.Host_Stats));
}
final String cpuFactor = String.valueOf(CapacityManager.CpuOverprovisioningFactor.valueIn(host.getClusterId()));
final CapacityVO cpuCapacity = capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_CPU);
final double cpuUsedMhz = hostStats.getCpuUtilization() * host.getCpus() * host.getSpeed() / 100.0 ;
if (host.isInMaintenanceStates()) {
metricsList.add(new ItemHostCpu(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), cpuFactor, ALLOCATED, 0L, isDedicated, hostTags));
metricsList.add(new ItemHostCpu(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), cpuFactor, USED, 0L, isDedicated, hostTags));
metricsList.add(new ItemHostCpu(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), cpuFactor, TOTAL, 0L, isDedicated, hostTags));
if (cpuCapacity == null && !host.isInMaintenanceStates()){
metricsList.add(new MissingHostInfo(zoneName, host.getName(), MissingInfoFilter.CPU_CAPACITY));
}
else if (cpuCapacity != null && cpuCapacity.getCapacityState() == CapacityState.Enabled) {
if (hostStats != null && cpuCapacity != null && cpuCapacity.getCapacityState() == CapacityState.Enabled) {
final double cpuUsedMhz = hostStats.getCpuUtilization() * host.getCpus() * host.getSpeed() / 100.0 ;
metricsList.add(new ItemHostCpu(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), cpuFactor, ALLOCATED, cpuCapacity.getUsedCapacity(), isDedicated, hostTags));
metricsList.add(new ItemHostCpu(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), cpuFactor, USED, cpuUsedMhz, isDedicated, hostTags));
metricsList.add(new ItemHostCpu(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), cpuFactor, TOTAL, cpuCapacity.getTotalCapacity(), isDedicated, hostTags));
@ -181,12 +201,12 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp
final String memoryFactor = String.valueOf(CapacityManager.MemOverprovisioningFactor.valueIn(host.getClusterId()));
final CapacityVO memCapacity = capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_MEMORY);
if (host.isInMaintenanceStates()) {
metricsList.add(new ItemHostMemory(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), memoryFactor, ALLOCATED, 0L, isDedicated, hostTags));
metricsList.add(new ItemHostMemory(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), memoryFactor, USED, 0, isDedicated, hostTags));
metricsList.add(new ItemHostMemory(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), memoryFactor, TOTAL, 0L, isDedicated, hostTags));
if (memCapacity == null && !host.isInMaintenanceStates()){
metricsList.add(new MissingHostInfo(zoneName, host.getName(), MissingInfoFilter.MEM_CAPACITY));
}
else if (memCapacity != null && memCapacity.getCapacityState() == CapacityState.Enabled) {
if (hostStats != null && memCapacity != null && memCapacity.getCapacityState() == CapacityState.Enabled) {
metricsList.add(new ItemHostMemory(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), memoryFactor, ALLOCATED, memCapacity.getUsedCapacity(), isDedicated, hostTags));
metricsList.add(new ItemHostMemory(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), memoryFactor, USED, hostStats.getUsedMemory(), isDedicated, hostTags));
metricsList.add(new ItemHostMemory(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), memoryFactor, TOTAL, memCapacity.getTotalCapacity(), isDedicated, hostTags));
@ -197,13 +217,13 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp
}
metricsList.add(new ItemHostVM(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), vmDao.listByHostId(host.getId()).size()));
final CapacityVO coreCapacity = capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_CPU_CORE);
if (host.isInMaintenanceStates()) {
metricsList.add(new ItemVMCore(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), USED, 0L, isDedicated, hostTags));
metricsList.add(new ItemVMCore(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), TOTAL, 0L, isDedicated, hostTags));
if (coreCapacity == null && !host.isInMaintenanceStates()){
metricsList.add(new MissingHostInfo(zoneName, host.getName(), MissingInfoFilter.CORE_CAPACITY));
}
else if (coreCapacity != null && coreCapacity.getCapacityState() == CapacityState.Enabled) {
if (hostStats != null && coreCapacity != null && coreCapacity.getCapacityState() == CapacityState.Enabled) {
metricsList.add(new ItemVMCore(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), USED, coreCapacity.getUsedCapacity(), isDedicated, hostTags));
metricsList.add(new ItemVMCore(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), TOTAL, coreCapacity.getTotalCapacity(), isDedicated, hostTags));
} else {
@ -213,17 +233,17 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp
}
final List<CapacityDaoImpl.SummedCapacity> cpuCapacity = capacityDao.findCapacityBy((int) Capacity.CAPACITY_TYPE_CPU, dcId, null, null);
if (cpuCapacity != null && cpuCapacity.size() > 0) {
if (cpuCapacity != null && !cpuCapacity.isEmpty()) {
metricsList.add(new ItemHostCpu(zoneName, zoneUuid, null, null, null, null, ALLOCATED, cpuCapacity.get(0).getAllocatedCapacity() != null ? cpuCapacity.get(0).getAllocatedCapacity() : 0, 0, ""));
}
final List<CapacityDaoImpl.SummedCapacity> memCapacity = capacityDao.findCapacityBy((int) Capacity.CAPACITY_TYPE_MEMORY, dcId, null, null);
if (memCapacity != null && memCapacity.size() > 0) {
if (memCapacity != null && !memCapacity.isEmpty()) {
metricsList.add(new ItemHostMemory(zoneName, zoneUuid, null, null, null, null, ALLOCATED, memCapacity.get(0).getAllocatedCapacity() != null ? memCapacity.get(0).getAllocatedCapacity() : 0, 0, ""));
}
final List<CapacityDaoImpl.SummedCapacity> coreCapacity = capacityDao.findCapacityBy((int) Capacity.CAPACITY_TYPE_CPU_CORE, dcId, null, null);
if (coreCapacity != null && coreCapacity.size() > 0) {
if (coreCapacity != null && !coreCapacity.isEmpty()) {
metricsList.add(new ItemVMCore(zoneName, zoneUuid, null, null, null, ALLOCATED, coreCapacity.get(0).getAllocatedCapacity() != null ? coreCapacity.get(0).getAllocatedCapacity() : 0, 0, ""));
}
@ -626,6 +646,25 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp
}
}
class MissingHostInfo extends Item {
String zoneName;
String hostName;
MissingInfoFilter filter;
public MissingHostInfo(String zoneName, String hostname, MissingInfoFilter filter) {
super("cloudstack_host_missing_info");
this.zoneName = zoneName;
this.hostName = hostname;
this.filter = filter;
}
@Override
public String toMetricsString() {
return String.format("%s{zone=\"%s\",hostname=\"%s\",filter=\"%s\"} -1", name, zoneName, hostName, filter);
}
}
class ItemHostCpu extends Item {
String zoneName;
String zoneUuid;

View File

@ -134,6 +134,9 @@
<module>storage/volume/scaleio</module>
<module>storage/volume/linstor</module>
<module>storage/volume/storpool</module>
<module>storage/volume/adaptive</module>
<module>storage/volume/flasharray</module>
<module>storage/volume/primera</module>
<module>storage/object/minio</module>
<module>storage/object/simulator</module>

View File

@ -0,0 +1,58 @@
# CloudStack Volume Provider Adaptive Plugin Base
The Adaptive Plugin Base is an abstract volume storage provider that
provides a generic implementation for managing volumes that are exposed
to hosts through FiberChannel and similar methods but managed independently
through a storage API or interface. The ProviderAdapter, and associated
classes, provide a decoupled interface from the rest of
Cloudstack that covers the exact actions needed
to interface with a storage provider. Each storage provider can extend
and implement the ProviderAdapter without needing to understand the internal
logic of volume management, database structure, etc.
## Implement the Provider Interface
To implement a provider, create another module -- or a standalone project --
and implement the following interfaces from the **org.apache.cloudstack.storage.datastore.adapter** package:
1. **ProviderAdapter** - this is the primary interface used to communicate with the storage provider when volume management actions are required.
2. **ProviderAdapterFactory** - the implementation of this class creates the correct ProviderAdapter when needed.
Follow Javadoc for each class on further instructions for implementing each function.
## Implement the Primary Datastore Provider Plugin
Once the provider interface is implemented, you will need to extend the **org.apache.cloudstack.storage.datastore.provider.AdaptiveProviderDatastoreProviderImpl** class. When extending it, you simply need to implement a default
constructor that creates an instance of the ProviderAdapterFactory implementation created in #2 above. Once created, you need to call the parent constructor and pass the factory object.
## Provide the Configuration for the Provider Plugin
Lastly, you need to include a module file and Spring configuration for your Primary Datastore Provider Plugin class so Cloudstack will load it during startup.
### Module Properties
This provides the hint to Cloudstack to load this as a module during startup.
```
#resources/META-INF/cloudstack/storage-volume-<providername>/module.properties
name=storage-volume-<providername>
parent=storage
```
### Spring Bean Context Configuration
This provides instructions of which provider implementation class to load when the Spring bean initilization is running.
```
<!-- resources/META-INF/cloudstack/storage-volume-<providername>/spring-storage-volume-<providername>-context.xml -->
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:context="http://www.springframework.org/schema/context"
xmlns:aop="http://www.springframework.org/schema/aop"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans.xsd
http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd
http://www.springframework.org/schema/context
http://www.springframework.org/schema/context/spring-context.xsd"
>
<bean id="<providername>DataStoreProvider"
class="org.apache.cloudstack.storage.datastore.provider.<providername>PrimaryDatastoreProviderImpl">
</bean>
</beans>
```
## Build and Deploy the Jar
Once you build the new jar, start Cloudstack Management Server or, if a standalone jar, add it to the classpath before start. You should now have a new storage provider of the designated name once Cloudstack finishes loading
all configured modules.

View File

@ -0,0 +1,62 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-plugin-storage-volume-adaptive</artifactId>
<name>Apache CloudStack Plugin - Storage Volume Adaptive Base Provider</name>
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack-plugins</artifactId>
<version>4.19.0.0-SNAPSHOT</version>
<relativePath>../../../pom.xml</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-storage-volume</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-storage-snapshot</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-default</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<executions>
<execution>
<phase>integration-test</phase>
<goals>
<goal>test</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,157 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
import java.util.Map;
/**
* A simple DataStore adaptive interface. This interface allows the ManagedVolumeDataStoreDriverImpl
* to interact with the external provider without the provider needing to interface with any CloudStack
* objects, factories or database tables, simplifying the implementation and maintenance of the provider
* interface.
*/
public interface ProviderAdapter {
// some common keys across providers. Provider code determines what to do with it
public static final String API_USERNAME_KEY = "api_username";
public static final String API_PASSWORD_KEY = "api_password";
public static final String API_TOKEN_KEY = "api_token";
public static final String API_PRIVATE_KEY = "api_privatekey";
public static final String API_URL_KEY = "api_url";
public static final String API_SKIP_TLS_VALIDATION_KEY = "api_skiptlsvalidation";
// one of: basicauth (default), apitoken, privatekey
public static final String API_AUTHENTICATION_TYPE_KEY = "api_authn_type";
/**
* Refresh the connector with the provided details
* @param details
*/
public void refresh(Map<String,String> details);
/**
* Return if currently connected/configured properly, otherwise throws a RuntimeException
* with information about what is misconfigured
* @return
*/
public void validate();
/**
* Forcefully remove/disconnect
*/
public void disconnect();
/**
* Create a new volume on the storage provider
* @param context
* @param volume
* @param diskOffering
* @param sizeInBytes
* @return
*/
public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject volume, ProviderAdapterDiskOffering diskOffering, long sizeInBytes);
/**
* Attach the volume to the target object for the provided context. Returns the scope-specific connection value (for example, the LUN)
* @param context
* @param request
* @return
*/
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject request);
/**
* Detach the host from the storage context
* @param context
* @param request
*/
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request);
/**
* Delete the provided volume/object
* @param context
* @param request
*/
public void delete(ProviderAdapterContext context, ProviderAdapterDataObject request);
/**
* Copy a source object to a destination volume. The source object can be a Volume, Snapshot, or Template
*/
public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceVolume, ProviderAdapterDataObject targetVolume);
/**
* Make a device-specific snapshot of the provided volume
*/
public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceVolume, ProviderAdapterDataObject targetSnapshot);
/**
* Revert the snapshot to its base volume. Replaces the base volume with the snapshot point on the storage array
* @param context
* @param request
* @return
*/
public ProviderVolume revert(ProviderAdapterContext context, ProviderAdapterDataObject request);
/**
* Resize a volume
* @param context
* @param request
* @param totalNewSizeInBytes
*/
public void resize(ProviderAdapterContext context, ProviderAdapterDataObject request, long totalNewSizeInBytes);
/**
* Return the managed volume info from storage system.
* @param context
* @param request
* @return ProviderVolume object or null if the object was not found but no errors were encountered.
*/
public ProviderVolume getVolume(ProviderAdapterContext context, ProviderAdapterDataObject request);
/**
* Return the managed snapshot info from storage system
* @param context
* @param request
* @return ProviderSnapshot object or null if the object was not found but no errors were encountered.
*/
public ProviderSnapshot getSnapshot(ProviderAdapterContext context, ProviderAdapterDataObject request);
/**
* Given an array-specific address, find the matching volume information from the array
* @param addressType
* @param address
* @return
*/
public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, ProviderVolume.AddressType addressType, String address);
/**
* Returns stats about the managed storage where the volumes and snapshots are created/managed
* @return
*/
public ProviderVolumeStorageStats getManagedStorageStats();
/**
* Returns stats about a specific volume
* @return
*/
public ProviderVolumeStats getVolumeStats(ProviderAdapterContext context, ProviderAdapterDataObject request);
/**
* Returns true if the given hostname is accessible to the storage provider.
* @param context
* @param request
* @return
*/
public boolean canAccessHost(ProviderAdapterContext context, String hostname);
}

View File

@ -0,0 +1,22 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
public class ProviderAdapterConstants {
public static final String EXTERNAL_UUID = "external_uuid";
public static final String EXTERNAL_NAME = "external_name";
}

View File

@ -0,0 +1,83 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
public class ProviderAdapterContext {
private String domainUuid;
private String domainName;
private Long domainId;
private String zoneUuid;
private String zoneName;
private Long zoneId;
private String accountUuid;
private String accountName;
private Long accountId;
public String getDomainUuid() {
return domainUuid;
}
public void setDomainUuid(String domainUuid) {
this.domainUuid = domainUuid;
}
public String getDomainName() {
return domainName;
}
public void setDomainName(String domainName) {
this.domainName = domainName;
}
public Long getDomainId() {
return domainId;
}
public void setDomainId(Long domainId) {
this.domainId = domainId;
}
public String getZoneUuid() {
return zoneUuid;
}
public void setZoneUuid(String zoneUuid) {
this.zoneUuid = zoneUuid;
}
public String getZoneName() {
return zoneName;
}
public void setZoneName(String zoneName) {
this.zoneName = zoneName;
}
public Long getZoneId() {
return zoneId;
}
public void setZoneId(Long zoneId) {
this.zoneId = zoneId;
}
public String getAccountUuid() {
return accountUuid;
}
public void setAccountUuid(String accountUuid) {
this.accountUuid = accountUuid;
}
public String getAccountName() {
return accountName;
}
public void setAccountName(String accountName) {
this.accountName = accountName;
}
public Long getAccountId() {
return accountId;
}
public void setAccountId(Long accountId) {
this.accountId = accountId;
}
}

View File

@ -0,0 +1,159 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
/**
* Represents a translation object for transmitting meta-data about a volume,
* snapshot or template between cloudstack and the storage provider
*/
public class ProviderAdapterDataObject {
public enum Type {
VOLUME(),
SNAPSHOT(),
TEMPLATE(),
ARCHIVE()
}
/**
* The cloudstack UUID of the object
*/
private String uuid;
/**
* The cloudstack name of the object (generated or user provided)
*/
private String name;
/**
* The type of the object
*/
private Type type;
/**
* The internal local ID of the object (not globally unique)
*/
private Long id;
/**
* The external name assigned on the storage array. it may be dynamiically
* generated or derived from cloudstack data
*/
private String externalName;
/**
* The external UUID of the object on the storage array. This may be different
* or the same as the cloudstack UUID depending on implementation.
*/
private String externalUuid;
/**
* The internal (non-global) ID of the datastore this object is defined in
*/
private Long dataStoreId;
/**
* The global ID of the datastore this object is defined in
*/
private String dataStoreUuid;
/**
* The name of the data store this object is defined in
*/
private String dataStoreName;
/**
* Represents the device connection id, typically a LUN, used to find the volume in conjunction with Address and AddressType.
*/
private String externalConnectionId;
public String getUuid() {
return uuid;
}
public void setUuid(String uuid) {
this.uuid = uuid;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Type getType() {
return type;
}
public void setType(Type type) {
this.type = type;
}
public String getExternalName() {
return externalName;
}
public void setExternalName(String externalName) {
this.externalName = externalName;
}
public String getExternalUuid() {
return externalUuid;
}
public void setExternalUuid(String externalUuid) {
this.externalUuid = externalUuid;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Long getDataStoreId() {
return dataStoreId;
}
public void setDataStoreId(Long dataStoreId) {
this.dataStoreId = dataStoreId;
}
public String getDataStoreUuid() {
return dataStoreUuid;
}
public void setDataStoreUuid(String dataStoreUuid) {
this.dataStoreUuid = dataStoreUuid;
}
public String getDataStoreName() {
return dataStoreName;
}
public void setDataStoreName(String dataStoreName) {
this.dataStoreName = dataStoreName;
}
public String getExternalConnectionId() {
return externalConnectionId;
}
public void setExternalConnectionId(String externalConnectionId) {
this.externalConnectionId = externalConnectionId;
}
}

View File

@ -0,0 +1,194 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
import java.util.Date;
import org.apache.commons.lang.NotImplementedException;
import com.cloud.offering.DiskOffering;
/**
* Wrapper Disk Offering that masks the cloudstack-dependent classes from the storage provider code
*/
public class ProviderAdapterDiskOffering {
private ProvisioningType type;
private DiskCacheMode diskCacheMode;
private DiskOffering hiddenDiskOffering;
private State state;
public ProviderAdapterDiskOffering(DiskOffering hiddenDiskOffering) {
this.hiddenDiskOffering = hiddenDiskOffering;
if (hiddenDiskOffering.getProvisioningType() != null) {
this.type = ProvisioningType.getProvisioningType(hiddenDiskOffering.getProvisioningType().toString());
}
if (hiddenDiskOffering.getCacheMode() != null) {
this.diskCacheMode = DiskCacheMode.getDiskCasehMode(hiddenDiskOffering.getCacheMode().toString());
}
if (hiddenDiskOffering.getState() != null) {
this.state = State.valueOf(hiddenDiskOffering.getState().toString());
}
}
public Long getBytesReadRate() {
return hiddenDiskOffering.getBytesReadRate();
}
public Long getBytesReadRateMax() {
return hiddenDiskOffering.getBytesReadRateMax();
}
public Long getBytesReadRateMaxLength() {
return hiddenDiskOffering.getBytesReadRateMaxLength();
}
public Long getBytesWriteRate() {
return hiddenDiskOffering.getBytesWriteRate();
}
public Long getBytesWriteRateMax() {
return hiddenDiskOffering.getBytesWriteRateMax();
}
public Long getBytesWriteRateMaxLength() {
return hiddenDiskOffering.getBytesWriteRateMaxLength();
}
public DiskCacheMode getCacheMode() {
return diskCacheMode;
}
public Date getCreated() {
return hiddenDiskOffering.getCreated();
}
public long getDiskSize() {
return hiddenDiskOffering.getDiskSize();
}
public boolean getDiskSizeStrictness() {
return hiddenDiskOffering.getDiskSizeStrictness();
}
public String getDisplayText() {
return hiddenDiskOffering.getDisplayText();
}
public boolean getEncrypt() {
return hiddenDiskOffering.getEncrypt();
}
public Integer getHypervisorSnapshotReserve() {
return hiddenDiskOffering.getHypervisorSnapshotReserve();
}
public long getId() {
return hiddenDiskOffering.getId();
}
public Long getIopsReadRate() {
return hiddenDiskOffering.getIopsReadRate();
}
public Long getIopsReadRateMax() {
return hiddenDiskOffering.getIopsReadRateMax();
}
public Long getIopsReadRateMaxLength() {
return hiddenDiskOffering.getIopsReadRateMaxLength();
}
public Long getIopsWriteRate() {
return hiddenDiskOffering.getIopsWriteRate();
}
public Long getIopsWriteRateMax() {
return hiddenDiskOffering.getIopsWriteRateMax();
}
public Long getIopsWriteRateMaxLength() {
return hiddenDiskOffering.getIopsWriteRateMaxLength();
}
public Long getMaxIops() {
return hiddenDiskOffering.getMaxIops();
}
public Long getMinIops() {
return hiddenDiskOffering.getMinIops();
}
public String getName() {
return hiddenDiskOffering.getName();
}
public State getState() {
return state;
}
public String getTags() {
return hiddenDiskOffering.getTags();
}
public String[] getTagsArray() {
return hiddenDiskOffering.getTagsArray();
}
public String getUniqueName() {
return hiddenDiskOffering.getUniqueName();
}
public String getUuid() {
return hiddenDiskOffering.getUuid();
}
public ProvisioningType getType() {
return type;
}
public void setType(ProvisioningType type) {
this.type = type;
}
public static enum ProvisioningType {
THIN("thin"),
SPARSE("sparse"),
FAT("fat");
private final String provisionType;
private ProvisioningType(String provisionType){
this.provisionType = provisionType;
}
public String toString(){
return this.provisionType;
}
public static ProvisioningType getProvisioningType(String provisioningType){
if(provisioningType.equals(THIN.provisionType)){
return ProvisioningType.THIN;
} else if(provisioningType.equals(SPARSE.provisionType)){
return ProvisioningType.SPARSE;
} else if (provisioningType.equals(FAT.provisionType)){
return ProvisioningType.FAT;
} else {
throw new NotImplementedException("Invalid provisioning type specified: " + provisioningType);
}
}
}
enum State {
Inactive, Active,
}
enum DiskCacheMode {
NONE("none"), WRITEBACK("writeback"), WRITETHROUGH("writethrough");
private final String _diskCacheMode;
DiskCacheMode(String cacheMode) {
_diskCacheMode = cacheMode;
}
@Override
public String toString() {
return _diskCacheMode;
}
public static DiskCacheMode getDiskCasehMode(String cacheMode) {
if (cacheMode.equals(NONE._diskCacheMode)) {
return NONE;
} else if (cacheMode.equals(WRITEBACK._diskCacheMode)) {
return WRITEBACK;
} else if (cacheMode.equals(WRITETHROUGH._diskCacheMode)) {
return WRITETHROUGH;
} else {
throw new NotImplementedException("Invalid cache mode specified: " + cacheMode);
}
}
};
}

View File

@ -0,0 +1,24 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
import java.util.Map;
public interface ProviderAdapterFactory {
public String getProviderName();
public ProviderAdapter create(String url, Map<String, String> details);
}

View File

@ -0,0 +1,28 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
public interface ProviderSnapshot extends ProviderVolume {
/**
* Returns true if the provider supports directly attaching the snapshot.
* If false is returned, it indicates that cloudstack needs to perform
* a temporary volume copy prior to copying the snapshot to a new
* volume on another provider
* @return
*/
public Boolean canAttachDirectly();
}

View File

@ -0,0 +1,40 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
public interface ProviderVolume {
public Boolean isDestroyed();
public String getId();
public void setId(String id);
public String getName();
public void setName(String name);
public Integer getPriority();
public void setPriority(Integer priority);
public String getState();
public AddressType getAddressType();
public void setAddressType(AddressType addressType);
public String getAddress();
public Long getAllocatedSizeInBytes();
public Long getUsedBytes();
public String getExternalUuid();
public String getExternalName();
public String getExternalConnectionId();
public enum AddressType {
FIBERWWN
}
}

View File

@ -0,0 +1,58 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
public class ProviderVolumeNamer {
private static final String SNAPSHOT_PREFIX = "snap";
private static final String VOLUME_PREFIX = "vol";
private static final String TEMPLATE_PREFIX = "tpl";
/** Simple method to allow sharing storage setup, primarily in lab/testing environment */
private static final String ENV_PREFIX = System.getProperty("adaptive.storage.provider.envIdentifier");
public static String generateObjectName(ProviderAdapterContext context, ProviderAdapterDataObject obj) {
ProviderAdapterDataObject.Type objType = obj.getType();
String prefix = null;
if (objType == ProviderAdapterDataObject.Type.SNAPSHOT) {
prefix = SNAPSHOT_PREFIX;
} else if (objType == ProviderAdapterDataObject.Type.VOLUME) {
prefix = VOLUME_PREFIX;
} else if (objType == ProviderAdapterDataObject.Type.TEMPLATE) {
prefix = TEMPLATE_PREFIX;
} else {
throw new RuntimeException("Unknown ManagedDataObject type provided: " + obj.getType());
}
if (ENV_PREFIX != null) {
prefix = ENV_PREFIX + "-" + prefix;
}
return prefix + "-" + obj.getDataStoreId() + "-" + context.getDomainId() + "-" + context.getAccountId() + "-" + obj.getId();
}
public static String generateObjectComment(ProviderAdapterContext context, ProviderAdapterDataObject obj) {
return "CSInfo [Account=" + context.getAccountName()
+ "; Domain=" + context.getDomainName()
+ "; DomainUUID=" + context.getDomainUuid()
+ "; Account=" + context.getAccountName()
+ "; AccountUUID=" + context.getAccountUuid()
+ "; ObjectEndUserName=" + obj.getName()
+ "; ObjectUUID=" + obj.getUuid() + "]";
}
}

View File

@ -0,0 +1,55 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
public class ProviderVolumeStats {
private Long allocatedInBytes;
private Long virtualUsedInBytes;
private Long actualUsedInBytes;
private Long iops;
private Long throughput;
public Long getAllocatedInBytes() {
return allocatedInBytes;
}
public void setAllocatedInBytes(Long allocatedInBytes) {
this.allocatedInBytes = allocatedInBytes;
}
public Long getVirtualUsedInBytes() {
return virtualUsedInBytes;
}
public void setVirtualUsedInBytes(Long virtualUsedInBytes) {
this.virtualUsedInBytes = virtualUsedInBytes;
}
public Long getActualUsedInBytes() {
return actualUsedInBytes;
}
public void setActualUsedInBytes(Long actualUsedInBytes) {
this.actualUsedInBytes = actualUsedInBytes;
}
public Long getIops() {
return iops;
}
public void setIops(Long iops) {
this.iops = iops;
}
public Long getThroughput() {
return throughput;
}
public void setThroughput(Long throughput) {
this.throughput = throughput;
}
}

View File

@ -0,0 +1,71 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
public class ProviderVolumeStorageStats {
/**
* Total capacity in bytes currently physically used on the storage system within the scope of given API configuration
*/
private long capacityInBytes;
/**
* Virtual amount of bytes allocated for use. Typically what the users of the volume think they have before
* any compression, deduplication, or thin-provisioning semantics are accounted for.
*/
private Long virtualUsedInBytes;
/**
* Actual physical bytes used on the storage system within the scope of the given API configuration
*/
private Long actualUsedInBytes;
/**
* Current IOPS
*/
private Long iops;
/**
* Current raw throughput
*/
private Long throughput;
public Long getVirtualUsedInBytes() {
return virtualUsedInBytes;
}
public void setVirtualUsedInBytes(Long virtualUsedInBytes) {
this.virtualUsedInBytes = virtualUsedInBytes;
}
public Long getActualUsedInBytes() {
return actualUsedInBytes;
}
public void setActualUsedInBytes(Long actualUsedInBytes) {
this.actualUsedInBytes = actualUsedInBytes;
}
public Long getIops() {
return iops;
}
public void setIops(Long iops) {
this.iops = iops;
}
public Long getThroughput() {
return throughput;
}
public void setThroughput(Long throughput) {
this.throughput = throughput;
}
public Long getCapacityInBytes() {
return capacityInBytes;
}
public void setCapacityInBytes(Long capacityInBytes) {
this.capacityInBytes = capacityInBytes;
}
}

View File

@ -0,0 +1,901 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.driver;
import java.util.Map;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import java.util.HashMap;
import java.util.List;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.storage.command.CommandResult;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.command.CreateObjectAnswer;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterConstants;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterContext;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDataObject;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering;
import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot;
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume;
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStats;
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStorageStats;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.provider.AdaptivePrimaryDatastoreAdapterFactoryMap;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.storage.volume.VolumeObject;
import org.apache.cloudstack.storage.snapshot.SnapshotObject;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.to.DataObjectType;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.domain.DomainVO;
import com.cloud.domain.dao.DomainDao;
import com.cloud.host.Host;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.projects.dao.ProjectDao;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.ResizeVolumePayload;
import com.cloud.storage.SnapshotVO;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.StoragePool;
import com.cloud.storage.VMTemplateStoragePoolVO;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeDetailVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotDetailsDao;
import com.cloud.storage.dao.SnapshotDetailsVO;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VMTemplatePoolDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.dao.VolumeDetailsDao;
import com.cloud.user.AccountManager;
import com.cloud.user.AccountVO;
import com.cloud.user.dao.AccountDao;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VirtualMachine;
public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDriverImpl {
static final Logger s_logger = Logger.getLogger(AdaptiveDataStoreDriverImpl.class);
private String providerName = null;
@Inject
AccountManager _accountMgr;
@Inject
DiskOfferingDao _diskOfferingDao;
@Inject
VolumeDao _volumeDao;
@Inject
PrimaryDataStoreDao _storagePoolDao;
@Inject
ProjectDao _projectDao;
@Inject
SnapshotDataStoreDao _snapshotDataStoreDao;
@Inject
SnapshotDetailsDao _snapshotDetailsDao;
@Inject
VolumeDetailsDao _volumeDetailsDao;
@Inject
VMTemplatePoolDao _vmTemplatePoolDao;
@Inject
AccountDao _accountDao;
@Inject
StoragePoolDetailsDao _storagePoolDetailsDao;
@Inject
SnapshotDao _snapshotDao;
@Inject
VMTemplateDao _vmTemplateDao;
@Inject
DataCenterDao _datacenterDao;
@Inject
DomainDao _domainDao;
@Inject
VolumeService _volumeService;
private AdaptivePrimaryDatastoreAdapterFactoryMap _adapterFactoryMap = null;
public AdaptiveDataStoreDriverImpl(AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap) {
this._adapterFactoryMap = factoryMap;
}
@Override
public DataTO getTO(DataObject data) {
return null;
}
@Override
public DataStoreTO getStoreTO(DataStore store) {
return null;
}
public ProviderAdapter getAPI(StoragePool pool, Map<String, String> details) {
return _adapterFactoryMap.getAPI(pool.getUuid(), pool.getStorageProviderName(), details);
}
@Override
public void createAsync(DataStore dataStore, DataObject dataObject,
AsyncCompletionCallback<CreateCmdResult> callback) {
CreateCmdResult result = null;
try {
s_logger.info("Volume creation starting for data store [" + dataStore.getName() +
"] and data object [" + dataObject.getUuid() + "] of type [" + dataObject.getType() + "]");
// quota size of the cloudbyte volume will be increased with the given
// HypervisorSnapshotReserve
Long volumeSizeBytes = dataObject.getSize();
// cloudstack talks bytes, primera talks MiB
StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId());
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
ProviderAdapter api = getAPI(storagePool, details);
ProviderAdapterContext context = newManagedVolumeContext(dataObject);
ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool);
ProviderAdapterDiskOffering inDiskOffering = null;
// only get the offering if its a volume type. If its a template type we skip this.
if (DataObjectType.VOLUME.equals(dataObject.getType())) {
// get the disk offering as provider may need to see details of this to
// provision the correct type of volume
VolumeVO volumeVO = _volumeDao.findById(dataObject.getId());
DiskOfferingVO diskOffering = _diskOfferingDao.findById(volumeVO.getDiskOfferingId());
if (diskOffering.isUseLocalStorage()) {
throw new CloudRuntimeException(
"Disk offering requires local storage but this storage provider does not suppport local storage. Please contact the cloud adminstrator to have the disk offering configuration updated to avoid this conflict.");
}
inDiskOffering = new ProviderAdapterDiskOffering(diskOffering);
}
// if its a template and it already exist, just return the info -- may mean a previous attempt to
// copy this template failed after volume creation and its state has not advanced yet.
ProviderVolume volume = null;
if (DataObjectType.TEMPLATE.equals(dataObject.getType())) {
volume = api.getVolume(context, dataIn);
if (volume != null) {
s_logger.info("Template volume already exists [" + dataObject.getUuid() + "]");
}
}
// create the volume if it didn't already exist
if (volume == null) {
// klunky - if this fails AND this detail property is set, it means upstream may have already created it
// in VolumeService and DataMotionStrategy tries to do it again before copying...
try {
volume = api.create(context, dataIn, inDiskOffering, volumeSizeBytes);
} catch (Exception e) {
VolumeDetailVO csId = _volumeDetailsDao.findDetail(dataObject.getId(), "cloneOfTemplate");
if (csId != null && csId.getId() > 0) {
volume = api.getVolume(context, dataIn);
} else {
throw e;
}
}
s_logger.info("New volume created on remote storage for [" + dataObject.getUuid() + "]");
}
// set these from the discovered or created volume before proceeding
dataIn.setExternalName(volume.getExternalName());
dataIn.setExternalUuid(volume.getExternalUuid());
// add the volume to the host set
String connectionId = api.attach(context, dataIn);
// update the cloudstack metadata about the volume
persistVolumeOrTemplateData(storagePool, details, dataObject, volume, connectionId);
result = new CreateCmdResult(dataObject.getUuid(), new Answer(null));
result.setSuccess(true);
s_logger.info("Volume creation complete for [" + dataObject.getUuid() + "]");
} catch (Throwable e) {
s_logger.error("Volume creation failed for dataObject [" + dataObject.getUuid() + "]: " + e.toString(), e);
result = new CreateCmdResult(null, new Answer(null));
result.setResult(e.toString());
result.setSuccess(false);
throw new CloudRuntimeException(e.getMessage());
} finally {
if (callback != null)
callback.complete(result);
}
}
@Override
public void deleteAsync(DataStore dataStore, DataObject dataObject,
AsyncCompletionCallback<CommandResult> callback) {
s_logger.debug("Delete volume started");
CommandResult result = new CommandResult();
try {
StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId());
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
ProviderAdapter api = getAPI(storagePool, details);
ProviderAdapterContext context = newManagedVolumeContext(dataObject);
ProviderAdapterDataObject inData = newManagedDataObject(dataObject, storagePool);
// skip adapter delete if neither external identifier is set. Probably means the volume
// create failed before this chould be set
if (!(inData.getExternalName() == null && inData.getExternalUuid() == null)) {
api.delete(context, inData);
}
result.setResult("Successfully deleted volume");
result.setSuccess(true);
} catch (Throwable e) {
s_logger.error("Result to volume delete failed with exception", e);
result.setResult(e.toString());
} finally {
if (callback != null)
callback.complete(result);
}
}
@Override
public void copyAsync(DataObject srcdata, DataObject destdata,
AsyncCompletionCallback<CopyCommandResult> callback) {
CopyCommandResult result = null;
try {
s_logger.info("Copying volume " + srcdata.getUuid() + " to " + destdata.getUuid() + "]");
if (!canCopy(srcdata, destdata)) {
throw new CloudRuntimeException(
"The data store provider is unable to perform copy operations because the source or destination object is not the correct type of volume");
}
try {
StoragePoolVO storagePool = _storagePoolDao.findById(srcdata.getDataStore().getId());
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
ProviderAdapter api = getAPI(storagePool, details);
s_logger.info("Copy volume " + srcdata.getUuid() + " to " + destdata.getUuid());
ProviderVolume outVolume;
ProviderAdapterContext context = newManagedVolumeContext(destdata);
ProviderAdapterDataObject sourceIn = newManagedDataObject(srcdata, storagePool);
ProviderAdapterDataObject destIn = newManagedDataObject(destdata, storagePool);
outVolume = api.copy(context, sourceIn, destIn);
// populate this data - it may be needed later
destIn.setExternalName(outVolume.getExternalName());
destIn.setExternalConnectionId(outVolume.getExternalConnectionId());
destIn.setExternalUuid(outVolume.getExternalUuid());
// if we copied from one volume to another, the target volume's disk offering or user input may be of a larger size
// we won't, however, shrink a volume if its smaller.
if (outVolume.getAllocatedSizeInBytes() < destdata.getSize()) {
s_logger.info("Resizing volume " + destdata.getUuid() + " to requested target volume size of " + destdata.getSize());
api.resize(context, destIn, destdata.getSize());
}
String connectionId = api.attach(context, destIn);
String finalPath;
// format: type=fiberwwn; address=<address>; connid=<connid>
if (connectionId != null) {
finalPath = String.format("type=%s; address=%s; connid=%s", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase(), connectionId);
} else {
finalPath = String.format("type=%s; address=%s;", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase());
}
persistVolumeData(storagePool, details, destdata, outVolume, connectionId);
s_logger.info("Copy completed from [" + srcdata.getUuid() + "] to [" + destdata.getUuid() + "]");
VolumeObjectTO voto = new VolumeObjectTO();
voto.setPath(finalPath);
result = new CopyCommandResult(finalPath, new CopyCmdAnswer(voto));
result.setSuccess(true);
} catch (Throwable e) {
s_logger.error("Result to volume copy failed with exception", e);
result = new CopyCommandResult(null, null);
result.setSuccess(false);
result.setResult(e.toString());
}
} finally {
if (callback != null)
callback.complete(result);
}
}
@Override
public void copyAsync(DataObject srcData, DataObject destData, Host destHost,
AsyncCompletionCallback<CopyCommandResult> callback) {
copyAsync(srcData, destData, callback);
}
@Override
public boolean canCopy(DataObject srcData, DataObject destData) {
s_logger.debug("canCopy: Checking srcData [" + srcData.getUuid() + ":" + srcData.getType() + ":"
+ srcData.getDataStore().getId() + " AND destData ["
+ destData.getUuid() + ":" + destData.getType() + ":" + destData.getDataStore().getId() + "]");
try {
if (!isSameProvider(srcData)) {
s_logger.debug("canCopy: No we can't -- the source provider is NOT the correct type for this driver!");
return false;
}
if (!isSameProvider(destData)) {
s_logger.debug("canCopy: No we can't -- the destination provider is NOT the correct type for this driver!");
return false;
}
s_logger.debug(
"canCopy: Source and destination are the same so we can copy via storage endpoint, checking that the source actually exists");
StoragePoolVO poolVO = _storagePoolDao.findById(srcData.getDataStore().getId());
Map<String, String> details = _storagePoolDao.getDetails(srcData.getDataStore().getId());
ProviderAdapter api = getAPI(poolVO, details);
/**
* The storage provider generates its own names for snapshots which we store and
* retrieve when needed
*/
ProviderAdapterContext context = newManagedVolumeContext(srcData);
ProviderAdapterDataObject srcDataObject = newManagedDataObject(srcData, poolVO);
if (srcData instanceof SnapshotObject) {
ProviderSnapshot snapshot = api.getSnapshot(context, srcDataObject);
if (snapshot == null) {
return false;
} else {
return true;
}
} else {
ProviderVolume vol = api.getVolume(context, srcDataObject);
if (vol == null) {
return false;
} else {
return true;
}
}
} catch (Throwable e) {
s_logger.warn("Problem checking if we canCopy", e);
return false;
}
}
@Override
public void resize(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
s_logger.debug("Resize volume started");
CreateCmdResult result = null;
try {
// Boolean status = false;
VolumeObject vol = (VolumeObject) data;
StoragePool pool = (StoragePool) data.getDataStore();
ResizeVolumePayload resizeParameter = (ResizeVolumePayload) vol.getpayload();
StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId());
if (!(poolVO.isManaged())) {
super.resize(data, callback);
return;
}
try {
Map<String, String> details = _storagePoolDao.getDetails(pool.getId());
ProviderAdapter api = getAPI(pool, details);
// doesn't support shrink (maybe can truncate but separate API calls to
// investigate)
if (vol.getSize() > resizeParameter.newSize) {
throw new CloudRuntimeException("Storage provider does not support shrinking an existing volume");
}
ProviderAdapterContext context = newManagedVolumeContext(data);
ProviderAdapterDataObject dataIn = newManagedDataObject(data, poolVO);
if (s_logger.isDebugEnabled()) s_logger.debug("Calling provider API to resize volume " + data.getUuid() + " to " + resizeParameter.newSize);
api.resize(context, dataIn, resizeParameter.newSize);
if (vol.isAttachedVM()) {
if (VirtualMachine.State.Running.equals(vol.getAttachedVM().getState())) {
if (s_logger.isDebugEnabled()) s_logger.debug("Notify currently attached VM of volume resize for " + data.getUuid() + " to " + resizeParameter.newSize);
_volumeService.resizeVolumeOnHypervisor(vol.getId(), resizeParameter.newSize, vol.getAttachedVM().getHostId(), vol.getAttachedVM().getInstanceName());
}
}
result = new CreateCmdResult(data.getUuid(), new Answer(null));
result.setSuccess(true);
} catch (Throwable e) {
s_logger.error("Resize volume failed, please contact cloud support.", e);
result = new CreateCmdResult(null, new Answer(null));
result.setResult(e.toString());
result.setSuccess(false);
}
} finally {
if (callback != null)
callback.complete(result);
}
}
@Override
public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo,
QualityOfServiceState qualityOfServiceState) {
s_logger.info("handleQualityOfServiceVolumeMigration: " + volumeInfo.getUuid() + " " +
volumeInfo.getPath() + ": " + qualityOfServiceState.toString());
}
@Override
public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool) {
VolumeInfo volume = (VolumeInfo) dataObject;
long volumeSize = volume.getSize();
Integer hypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve();
if (hypervisorSnapshotReserve != null) {
if (hypervisorSnapshotReserve < 25) {
hypervisorSnapshotReserve = 25;
}
volumeSize += volumeSize * (hypervisorSnapshotReserve / 100f);
}
return volumeSize;
}
@Override
public ChapInfo getChapInfo(DataObject dataObject) {
return null;
}
@Override
public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CreateCmdResult> callback) {
CreateCmdResult result = null;
try {
s_logger.debug("taking volume snapshot");
SnapshotObjectTO snapshotTO = (SnapshotObjectTO) snapshot.getTO();
VolumeInfo baseVolume = snapshot.getBaseVolume();
DataStore ds = baseVolume.getDataStore();
StoragePoolVO storagePool = _storagePoolDao.findById(ds.getId());
Map<String, String> details = _storagePoolDao.getDetails(ds.getId());
ProviderAdapter api = getAPI(storagePool, details);
ProviderAdapterContext context = newManagedVolumeContext(snapshot);
ProviderAdapterDataObject inVolumeDO = newManagedDataObject(baseVolume, storagePool);
ProviderAdapterDataObject inSnapshotDO = newManagedDataObject(snapshot, storagePool);
ProviderSnapshot outSnapshot = api.snapshot(context, inVolumeDO, inSnapshotDO);
// add the snapshot to the host group (needed for copying to non-provider storage
// to create templates, etc)
String connectionId = null;
String finalAddress = outSnapshot.getAddress();
if (outSnapshot.canAttachDirectly()) {
connectionId = api.attach(context, inSnapshotDO);
if (connectionId != null) {
finalAddress = finalAddress + "::" + connectionId;
}
}
snapshotTO.setPath(finalAddress);
snapshotTO.setName(outSnapshot.getName());
snapshotTO.setHypervisorType(HypervisorType.KVM);
// unclear why this is needed vs snapshotTO.setPath, but without it the path on
// the target snapshot object isn't set
// so a volume created from it also is not set and can't be attached to a VM
SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(snapshot.getId(),
DiskTO.PATH, finalAddress, true);
_snapshotDetailsDao.persist(snapshotDetail);
// save the name (reuse on revert)
snapshotDetail = new SnapshotDetailsVO(snapshot.getId(),
ProviderAdapterConstants.EXTERNAL_NAME, outSnapshot.getExternalName(), true);
_snapshotDetailsDao.persist(snapshotDetail);
// save the uuid (reuse on revert)
snapshotDetail = new SnapshotDetailsVO(snapshot.getId(),
ProviderAdapterConstants.EXTERNAL_UUID, outSnapshot.getExternalUuid(), true);
_snapshotDetailsDao.persist(snapshotDetail);
result = new CreateCmdResult(finalAddress, new CreateObjectAnswer(snapshotTO));
result.setResult("Snapshot completed with new WWN " + finalAddress);
result.setSuccess(true);
} catch (Throwable e) {
s_logger.debug("Failed to take snapshot: " + e.getMessage());
result = new CreateCmdResult(null, null);
result.setResult(e.toString());
} finally {
if (callback != null)
callback.complete(result);
}
}
@Override
public void revertSnapshot(SnapshotInfo snapshot, SnapshotInfo snapshotOnPrimaryStore,
AsyncCompletionCallback<CommandResult> callback) {
CommandResult result = new CommandResult();
ProviderAdapter api = null;
try {
DataStore ds = snapshotOnPrimaryStore.getDataStore();
StoragePoolVO storagePool = _storagePoolDao.findById(ds.getId());
Map<String, String> details = _storagePoolDao.getDetails(ds.getId());
api = getAPI(storagePool, details);
String externalName = null;
String externalUuid = null;
List<SnapshotDetailsVO> list = _snapshotDetailsDao.findDetails(snapshot.getId(),
ProviderAdapterConstants.EXTERNAL_NAME);
if (list != null && list.size() > 0) {
externalName = list.get(0).getValue();
}
list = _snapshotDetailsDao.findDetails(snapshot.getId(), ProviderAdapterConstants.EXTERNAL_UUID);
if (list != null && list.size() > 0) {
externalUuid = list.get(0).getValue();
}
ProviderAdapterContext context = newManagedVolumeContext(snapshot);
ProviderAdapterDataObject inSnapshotDO = newManagedDataObject(snapshot, storagePool);
inSnapshotDO.setExternalName(externalName);
inSnapshotDO.setExternalUuid(externalUuid);
// perform promote (async, wait for job to finish)
api.revert(context, inSnapshotDO);
// set command as success
result.setSuccess(true);
} catch (Throwable e) {
s_logger.warn("revertSnapshot failed", e);
result.setResult(e.toString());
result.setSuccess(false);
} finally {
if (callback != null)
callback.complete(result);
}
}
@Override
public long getUsedBytes(StoragePool storagePool) {
long usedSpaceBytes = 0;
// Volumes
List<VolumeVO> volumes = _volumeDao.findByPoolIdAndState(storagePool.getId(), Volume.State.Ready);
if (volumes != null) {
for (VolumeVO volume : volumes) {
usedSpaceBytes += volume.getSize();
long vmSnapshotChainSize = volume.getVmSnapshotChainSize() == null ? 0
: volume.getVmSnapshotChainSize();
usedSpaceBytes += vmSnapshotChainSize;
}
}
// Snapshots
List<SnapshotDataStoreVO> snapshots = _snapshotDataStoreDao.listByStoreIdAndState(storagePool.getId(),
ObjectInDataStoreStateMachine.State.Ready);
if (snapshots != null) {
for (SnapshotDataStoreVO snapshot : snapshots) {
usedSpaceBytes += snapshot.getSize();
}
}
// Templates
List<VMTemplateStoragePoolVO> templates = _vmTemplatePoolDao.listByPoolIdAndState(storagePool.getId(),
ObjectInDataStoreStateMachine.State.Ready);
if (templates != null) {
for (VMTemplateStoragePoolVO template : templates) {
usedSpaceBytes += template.getTemplateSize();
}
}
s_logger.debug("Used/Allocated storage space (in bytes): " + String.valueOf(usedSpaceBytes));
return usedSpaceBytes;
}
@Override
public long getUsedIops(StoragePool storagePool) {
return super.getUsedIops(storagePool);
}
@Override
public Map<String, String> getCapabilities() {
Map<String, String> mapCapabilities = new HashMap<String, String>();
mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString());
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString());
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); // set to false because it causes weird behavior when copying templates to root volumes
mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString());
// indicates the datastore can create temporary volumes for use when copying
// data from a snapshot
mapCapabilities.put("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT", Boolean.TRUE.toString());
return mapCapabilities;
}
@Override
public boolean canProvideStorageStats() {
return true;
}
@Override
public Pair<Long, Long> getStorageStats(StoragePool storagePool) {
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
String capacityBytesStr = details.get("capacityBytes");
Long capacityBytes = null;
if (capacityBytesStr == null) {
ProviderAdapter api = getAPI(storagePool, details);
ProviderVolumeStorageStats stats = api.getManagedStorageStats();
if (stats == null) {
return null;
}
capacityBytes = stats.getCapacityInBytes();
} else {
capacityBytes = Long.parseLong(capacityBytesStr);
}
Long usedBytes = this.getUsedBytes(storagePool);
return new Pair<Long, Long>(capacityBytes, usedBytes);
}
@Override
public boolean canProvideVolumeStats() {
return true;
}
public String getProviderName() {
return providerName;
}
public void setProviderName(String providerName) {
this.providerName = providerName;
}
@Override
public Pair<Long, Long> getVolumeStats(StoragePool storagePool, String volumePath) {
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
ProviderAdapter api = getAPI(storagePool, details);
ProviderVolume.AddressType addressType = null;
if (volumePath.indexOf(";") > 1) {
String[] fields = volumePath.split(";");
if (fields.length > 0) {
for (String field: fields) {
if (field.trim().startsWith("address=")) {
String[] toks = field.split("=");
if (toks.length > 1) {
volumePath = toks[1];
}
} else if (field.trim().startsWith("type=")) {
String[] toks = field.split("=");
if (toks.length > 1) {
addressType = ProviderVolume.AddressType.valueOf(toks[1]);
}
}
}
}
} else {
addressType = ProviderVolume.AddressType.FIBERWWN;
}
// limited context since this is not at an account level
ProviderAdapterContext context = new ProviderAdapterContext();
context.setZoneId(storagePool.getDataCenterId());
ProviderVolume volume = api.getVolumeByAddress(context, addressType, volumePath);
if (volume == null) {
return null;
}
ProviderAdapterDataObject object = new ProviderAdapterDataObject();
object.setExternalUuid(volume.getExternalUuid());
object.setExternalName(volume.getExternalName());
object.setType(ProviderAdapterDataObject.Type.VOLUME);
ProviderVolumeStats stats = api.getVolumeStats(context, object);
Long provisionedSizeInBytes = stats.getActualUsedInBytes();
Long allocatedSizeInBytes = stats.getAllocatedInBytes();
if (provisionedSizeInBytes == null || allocatedSizeInBytes == null) {
return null;
}
return new Pair<Long, Long>(provisionedSizeInBytes, allocatedSizeInBytes);
}
@Override
public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
Map<String, String> details = _storagePoolDao.getDetails(pool.getId());
ProviderAdapter api = getAPI(pool, details);
ProviderAdapterContext context = new ProviderAdapterContext();
context.setZoneId(host.getDataCenterId());
return api.canAccessHost(context, host.getName());
}
void persistVolumeOrTemplateData(StoragePoolVO storagePool, Map<String, String> storagePoolDetails,
DataObject dataObject, ProviderVolume volume, String connectionId) {
if (dataObject.getType() == DataObjectType.VOLUME) {
persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connectionId);
} else if (dataObject.getType() == DataObjectType.TEMPLATE) {
persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connectionId);
}
}
void persistVolumeData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject,
ProviderVolume managedVolume, String connectionId) {
VolumeVO volumeVO = _volumeDao.findById(dataObject.getId());
// if its null check if the storage provider returned one that is already set
if (connectionId == null) {
connectionId = managedVolume.getExternalConnectionId();
}
String finalPath;
// format: type=fiberwwn; address=<address>; connid=<connid>
if (connectionId != null) {
finalPath = String.format("type=%s; address=%s; connid=%s", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase(), connectionId);
} else {
finalPath = String.format("type=%s; address=%s;", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase());
}
volumeVO.setPath(finalPath);
volumeVO.setFormat(ImageFormat.RAW);
volumeVO.setPoolId(storagePool.getId());
volumeVO.setExternalUuid(managedVolume.getExternalUuid());
volumeVO.setDisplay(true);
volumeVO.setDisplayVolume(true);
_volumeDao.update(volumeVO.getId(), volumeVO);
volumeVO = _volumeDao.findById(volumeVO.getId());
VolumeDetailVO volumeDetailVO = new VolumeDetailVO(volumeVO.getId(),
DiskTO.PATH, finalPath, true);
_volumeDetailsDao.persist(volumeDetailVO);
volumeDetailVO = new VolumeDetailVO(volumeVO.getId(),
ProviderAdapterConstants.EXTERNAL_NAME, managedVolume.getExternalName(), true);
_volumeDetailsDao.persist(volumeDetailVO);
volumeDetailVO = new VolumeDetailVO(volumeVO.getId(),
ProviderAdapterConstants.EXTERNAL_UUID, managedVolume.getExternalUuid(), true);
_volumeDetailsDao.persist(volumeDetailVO);
}
void persistTemplateData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject,
ProviderVolume volume, String connectionId) {
TemplateInfo templateInfo = (TemplateInfo) dataObject;
VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(),
templateInfo.getId(), null);
// template pool ref doesn't have a details object so we'll save:
// 1. external name ==> installPath
// 2. address ==> local download path
if (connectionId == null) {
templatePoolRef.setInstallPath(String.format("type=%s; address=%s", volume.getAddressType().toString(),
volume.getAddress().toLowerCase()));
} else {
templatePoolRef.setInstallPath(String.format("type=%s; address=%s; connid=%s", volume.getAddressType().toString(),
volume.getAddress().toLowerCase(), connectionId));
}
templatePoolRef.setLocalDownloadPath(volume.getExternalName());
templatePoolRef.setTemplateSize(volume.getAllocatedSizeInBytes());
_vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef);
}
ProviderAdapterContext newManagedVolumeContext(DataObject obj) {
ProviderAdapterContext ctx = new ProviderAdapterContext();
if (obj instanceof VolumeInfo) {
VolumeVO vol = _volumeDao.findById(obj.getId());
ctx.setAccountId(vol.getAccountId());
ctx.setDomainId(vol.getDomainId());
} else if (obj instanceof SnapshotInfo) {
SnapshotVO snap = _snapshotDao.findById(obj.getId());
ctx.setAccountId(snap.getAccountId());
ctx.setDomainId(snap.getDomainId());
} else if (obj instanceof TemplateInfo) {
VMTemplateVO template = _vmTemplateDao.findById(obj.getId());
ctx.setAccountId(template.getAccountId());
// templates don't have a domain ID so always set to 0
ctx.setDomainId(0L);
}
if (ctx.getAccountId() != null) {
AccountVO acct = _accountDao.findById(ctx.getAccountId());
if (acct != null) {
ctx.setAccountUuid(acct.getUuid());
ctx.setAccountName(acct.getName());
}
}
if (ctx.getDomainId() != null) {
DomainVO domain = _domainDao.findById(ctx.getDomainId());
if (domain != null) {
ctx.setDomainUuid(domain.getUuid());
ctx.setDomainName(domain.getName());
}
}
return ctx;
}
boolean isSameProvider(DataObject obj) {
StoragePoolVO storagePool = this._storagePoolDao.findById(obj.getDataStore().getId());
if (storagePool != null && storagePool.getStorageProviderName().equals(this.getProviderName())) {
return true;
} else {
return false;
}
}
ProviderAdapterDataObject newManagedDataObject(DataObject data, StoragePool storagePool) {
ProviderAdapterDataObject dataIn = new ProviderAdapterDataObject();
if (data instanceof VolumeInfo) {
List<VolumeDetailVO> list = _volumeDetailsDao.findDetails(data.getId(),
ProviderAdapterConstants.EXTERNAL_NAME);
String externalName = null;
if (list != null && list.size() > 0) {
externalName = list.get(0).getValue();
}
list = _volumeDetailsDao.findDetails(data.getId(), ProviderAdapterConstants.EXTERNAL_UUID);
String externalUuid = null;
if (list != null && list.size() > 0) {
externalUuid = list.get(0).getValue();
}
dataIn.setName(((VolumeInfo) data).getName());
dataIn.setExternalName(externalName);
dataIn.setExternalUuid(externalUuid);
} else if (data instanceof SnapshotInfo) {
List<SnapshotDetailsVO> list = _snapshotDetailsDao.findDetails(data.getId(),
ProviderAdapterConstants.EXTERNAL_NAME);
String externalName = null;
if (list != null && list.size() > 0) {
externalName = list.get(0).getValue();
}
list = _snapshotDetailsDao.findDetails(data.getId(), ProviderAdapterConstants.EXTERNAL_UUID);
String externalUuid = null;
if (list != null && list.size() > 0) {
externalUuid = list.get(0).getValue();
}
dataIn = new ProviderAdapterDataObject();
dataIn.setName(((SnapshotInfo) data).getName());
dataIn.setExternalName(externalName);
dataIn.setExternalUuid(externalUuid);
} else if (data instanceof TemplateInfo) {
TemplateInfo ti = (TemplateInfo)data;
dataIn.setName(ti.getName());
VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(), ti.getId(), null);
dataIn.setExternalName(templatePoolRef.getLocalDownloadPath());
}
dataIn.setId(data.getId());
dataIn.setDataStoreId(data.getDataStore().getId());
dataIn.setDataStoreUuid(data.getDataStore().getUuid());
dataIn.setDataStoreName(data.getDataStore().getName());
dataIn.setUuid(data.getUuid());
dataIn.setType(ProviderAdapterDataObject.Type.valueOf(data.getType().toString()));
return dataIn;
}
}

View File

@ -0,0 +1,407 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.lifecycle;
import java.io.UnsupportedEncodingException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter;
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStorageStats;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.provider.AdaptivePrimaryDatastoreAdapterFactoryMap;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Logger;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.HostVO;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ResourceManager;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolAutomation;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.host.Host;
/**
* Manages the lifecycle of a Managed Data Store in CloudStack
*/
public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
@Inject
private PrimaryDataStoreDao _storagePoolDao;
private static final Logger s_logger = Logger.getLogger(AdaptiveDataStoreLifeCycleImpl.class);
@Inject
PrimaryDataStoreHelper _dataStoreHelper;
@Inject
protected ResourceManager _resourceMgr;
@Inject
private StoragePoolAutomation _storagePoolAutomation;
@Inject
private PrimaryDataStoreDao _primaryDataStoreDao;
@Inject
private StorageManager _storageMgr;
@Inject
private ClusterDao _clusterDao;
AdaptivePrimaryDatastoreAdapterFactoryMap _adapterFactoryMap;
public AdaptiveDataStoreLifeCycleImpl(AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap) {
_adapterFactoryMap = factoryMap;
}
/**
* Initialize the storage pool
* https://hostname:port?cpg=<cpgname>&snapcpg=<snapcpg>&hostset=<hostsetname>&disabletlsvalidation=true&
*/
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
// https://hostanme:443/cpgname/hostsetname. hostset should map to the cluster or zone (all nodes in the cluster or zone MUST be in the hostset and be configured outside cloudstack for now)
String url = (String) dsInfos.get("url");
Long zoneId = (Long) dsInfos.get("zoneId");
Long podId = (Long)dsInfos.get("podId");
Long clusterId = (Long)dsInfos.get("clusterId");
String dsName = (String) dsInfos.get("name");
String providerName = (String) dsInfos.get("providerName");
Long capacityBytes = (Long) dsInfos.get("capacityBytes");
Long capacityIops = (Long)dsInfos.get("capacityIops");
String tags = (String)dsInfos.get("tags");
@SuppressWarnings("unchecked")
Map<String, String> details = (Map<String, String>) dsInfos.get("details");
// validate inputs are valid/provided as required
if (zoneId == null) throw new CloudRuntimeException("Zone Id must be specified.");
URL uri = null;
try {
uri = new URL(url);
} catch (Exception ignored) {
throw new CloudRuntimeException(url + " is not a valid uri");
}
String username = null;
String password = null;
String token = null;
String userInfo = uri.getUserInfo();
if (userInfo == null || userInfo.split(":").length < 2) {
// check if it was passed in the details object
username = details.get(ProviderAdapter.API_USERNAME_KEY);
if (username != null) {
password = details.get(ProviderAdapter.API_PASSWORD_KEY);
userInfo = username + ":" + password;
} else {
token = details.get(ProviderAdapter.API_TOKEN_KEY);
}
} else {
try {
userInfo = java.net.URLDecoder.decode(userInfo, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
throw new CloudRuntimeException("Unexpected error parsing the provided user info; check that it does not include any invalid characters");
}
username = userInfo.split(":")[0];
password = userInfo.split(":")[1];
}
s_logger.info("Registering block storage provider with user=" + username);
if (clusterId != null) {
Hypervisor.HypervisorType hypervisorType = getHypervisorTypeForCluster(clusterId);
if (!hypervisorType.equals(HypervisorType.KVM)) {
throw new CloudRuntimeException("Unsupported hypervisor type for provided cluster: " + hypervisorType.toString());
}
// Primary datastore is cluster-wide, check and set the podId and clusterId parameters
if (podId == null) {
throw new CloudRuntimeException("Pod Id must also be specified when the Cluster Id is specified for Cluster-wide primary storage.");
}
s_logger.info("Registering with clusterid=" + clusterId + " which is confirmed to be a KVM host");
} else if (podId != null) {
throw new CloudRuntimeException("Cluster Id must also be specified when the Pod Id is specified for Cluster-wide primary storage.");
}
// validate we don't have any duplication going on
List<StoragePoolVO> storagePoolVO = _primaryDataStoreDao.findPoolsByProvider(providerName);
if (CollectionUtils.isNotEmpty(storagePoolVO)) {
for (StoragePoolVO poolVO : storagePoolVO) {
Map <String, String> poolDetails = _primaryDataStoreDao.getDetails(poolVO.getId());
String otherPoolUrl = poolDetails.get(ProviderAdapter.API_URL_KEY);
if (dsName.equals(poolVO.getName())) {
throw new InvalidParameterValueException("A pool with the name [" + dsName + "] already exists, choose another name");
}
if (uri.toString().equals(otherPoolUrl)) {
throw new IllegalArgumentException("Provider URL [" + otherPoolUrl + "] is already in use by another storage pool named [" + poolVO.getName() + "], please validate you have correct API and CPG");
}
}
}
s_logger.info("Validated no other pool exists with this name: " + dsName);
try {
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
parameters.setHost(uri.getHost());
parameters.setPort(uri.getPort());
parameters.setPath(uri.getPath() + "?" + uri.getQuery());
parameters.setType(StoragePoolType.FiberChannel);
parameters.setZoneId(zoneId);
parameters.setPodId(podId);
parameters.setClusterId(clusterId);
parameters.setName(dsName);
parameters.setProviderName(providerName);
parameters.setManaged(true);
parameters.setCapacityBytes(capacityBytes);
parameters.setUsedBytes(0);
parameters.setCapacityIops(capacityIops);
parameters.setHypervisorType(HypervisorType.KVM);
parameters.setTags(tags);
parameters.setUserInfo(userInfo);
parameters.setUuid(UUID.randomUUID().toString());
details.put(ProviderAdapter.API_URL_KEY, uri.toString());
if (username != null) {
details.put(ProviderAdapter.API_USERNAME_KEY, username);
}
if (password != null) {
details.put(ProviderAdapter.API_PASSWORD_KEY, DBEncryptionUtil.encrypt(password));
}
if (token != null) {
details.put(ProviderAdapter.API_TOKEN_KEY, DBEncryptionUtil.encrypt(details.get(ProviderAdapter.API_TOKEN_KEY)));
}
// this appears to control placing the storage pool above network file system based storage pools in priority
details.put(Storage.Capability.HARDWARE_ACCELERATION.toString(), "true");
// this new capablity indicates the storage pool allows volumes to migrate to/from other pools (i.e. to/from NFS pools)
details.put(Storage.Capability.ALLOW_MIGRATE_OTHER_POOLS.toString(), "true");
parameters.setDetails(details);
// make sure the storage array is connectable and the pod and hostgroup objects exist
ProviderAdapter api = _adapterFactoryMap.getAPI(parameters.getUuid(), providerName, details);
// validate the provided details are correct/valid for the provider
api.validate();
// if we have user-provided capacity bytes, validate they do not exceed the manaaged storage capacity bytes
ProviderVolumeStorageStats stats = api.getManagedStorageStats();
if (capacityBytes != null && capacityBytes != 0) {
if (stats.getCapacityInBytes() > 0) {
if (stats.getCapacityInBytes() < capacityBytes) {
throw new InvalidParameterValueException("Capacity bytes provided exceeds the capacity of the storage endpoint: provided by user: " + capacityBytes + ", storage capacity from storage provider: " + stats.getCapacityInBytes());
}
}
parameters.setCapacityBytes(capacityBytes);
}
// if we have no user-provided capacity bytes, use the ones provided by storage
else {
if (stats.getCapacityInBytes() <= 0) {
throw new InvalidParameterValueException("Capacity bytes note available from the storage provider, user provided capacity bytes must be specified");
}
parameters.setCapacityBytes(stats.getCapacityInBytes());
}
s_logger.info("Persisting [" + dsName + "] storage pool metadata to database");
return _dataStoreHelper.createPrimaryDataStore(parameters);
} catch (Throwable e) {
s_logger.error("Problem persisting storage pool", e);
throw new CloudRuntimeException(e);
}
}
/**
* Get the type of Hypervisor from the cluster id
* @param clusterId
* @return
*/
private Hypervisor.HypervisorType getHypervisorTypeForCluster(long clusterId) {
ClusterVO cluster = _clusterDao.findById(clusterId);
if (cluster == null) {
throw new CloudRuntimeException("Unable to locate the specified cluster: " + clusterId);
}
return cluster.getHypervisorType();
}
/**
* Attach the pool to a cluster (all hosts in a single cluster)
*/
@Override
public boolean attachCluster(DataStore store, ClusterScope scope) {
s_logger.info("Attaching storage pool [" + store.getName() + "] to cluster [" + scope.getScopeId() + "]");
_dataStoreHelper.attachCluster(store);
StoragePoolVO dataStoreVO = _storagePoolDao.findById(store.getId());
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store;
// Check if there is host up in this cluster
List<HostVO> allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId());
if (allHosts.isEmpty()) {
_primaryDataStoreDao.expunge(primarystore.getId());
throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primarystore.getClusterId());
}
if (dataStoreVO.isManaged()) {
//boolean success = false;
for (HostVO h : allHosts) {
s_logger.debug("adding host " + h.getName() + " to storage pool " + store.getName());
}
}
s_logger.debug("In createPool Adding the pool to each of the hosts");
List<HostVO> poolHosts = new ArrayList<HostVO>();
for (HostVO h : allHosts) {
try {
_storageMgr.connectHostToSharedPool(h.getId(), primarystore.getId());
poolHosts.add(h);
} catch (Exception e) {
s_logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e);
}
}
if (poolHosts.isEmpty()) {
s_logger.warn("No host can access storage pool " + primarystore + " on cluster " + primarystore.getClusterId());
_primaryDataStoreDao.expunge(primarystore.getId());
throw new CloudRuntimeException("Failed to access storage pool");
}
return true;
}
@Override
public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
s_logger.info("Attaching storage pool [" + store.getName() + "] to host [" + scope.getScopeId() + "]");
_dataStoreHelper.attachHost(store, scope, existingInfo);
return true;
}
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
s_logger.info("Attaching storage pool [" + dataStore.getName() + "] to zone [" + scope.getScopeId() + "]");
List<HostVO> hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId());
List<HostVO> poolHosts = new ArrayList<HostVO>();
for (HostVO host : hosts) {
try {
_storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId());
poolHosts.add(host);
} catch (Exception e) {
s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
}
}
if (poolHosts.isEmpty()) {
s_logger.warn("No host can access storage pool " + dataStore + " in this zone.");
_primaryDataStoreDao.expunge(dataStore.getId());
throw new CloudRuntimeException("Failed to create storage pool as it is not accessible to hosts.");
}
_dataStoreHelper.attachZone(dataStore, hypervisorType);
return true;
}
/**
* Put the storage pool in maintenance mode
*/
@Override
public boolean maintain(DataStore store) {
s_logger.info("Placing storage pool [" + store.getName() + "] in maintainence mode");
if (_storagePoolAutomation.maintain(store)) {
return _dataStoreHelper.maintain(store);
} else {
return false;
}
}
/**
* Cancel maintenance mode
*/
@Override
public boolean cancelMaintain(DataStore store) {
s_logger.info("Canceling storage pool maintainence for [" + store.getName() + "]");
if (_dataStoreHelper.cancelMaintain(store)) {
return _storagePoolAutomation.cancelMaintain(store);
} else {
return false;
}
}
/**
* Delete the data store
*/
@Override
public boolean deleteDataStore(DataStore store) {
s_logger.info("Delete datastore called for [" + store.getName() + "]");
return _dataStoreHelper.deletePrimaryDataStore(store);
}
/**
* Migrate objects in this store to another store
*/
@Override
public boolean migrateToObjectStore(DataStore store) {
s_logger.info("Migrate datastore called for [" + store.getName() + "]. This is not currently implemented for this provider at this time");
return false;
}
/**
* Update the storage pool configuration
*/
@Override
public void updateStoragePool(StoragePool storagePool, Map<String, String> details) {
_adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), details);
}
/**
* Enable the storage pool (allows volumes from this pool)
*/
@Override
public void enableStoragePool(DataStore store) {
s_logger.info("Enabling storage pool [" + store.getName() + "]");
_dataStoreHelper.enable(store);
}
/**
* Disable storage pool (stops new volume provisioning from pool)
*/
@Override
public void disableStoragePool(DataStore store) {
s_logger.info("Disabling storage pool [" + store.getName() + "]");
_dataStoreHelper.disable(store);
}
}

View File

@ -0,0 +1,134 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.provider;
import java.util.HashMap;
import java.util.Map;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory;
import org.apache.log4j.Logger;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.exception.CloudRuntimeException;
public class AdaptivePrimaryDatastoreAdapterFactoryMap {
private final Logger logger = Logger.getLogger(ProviderAdapter.class);
private Map<String,ProviderAdapterFactory> factoryMap = new HashMap<String,ProviderAdapterFactory>();
private Map<String,ProviderAdapter> apiMap = new HashMap<String,ProviderAdapter>();
public AdaptivePrimaryDatastoreAdapterFactoryMap() {
}
/**
* Given a storage pool return current client. Reconfigure if changes are
* discovered
*/
public final ProviderAdapter getAPI(String uuid, String providerName, Map<String, String> details) {
ProviderAdapter api = apiMap.get(uuid);
if (api == null) {
synchronized (this) {
api = apiMap.get(uuid);
if (api == null) {
api = createNewAdapter(uuid, providerName, details);
apiMap.put(uuid, api);
logger.debug("Cached the new ProviderAdapter for storage pool " + uuid);
}
}
}
return api;
}
/**
* Update the API with the given UUID. allows for URL changes and authentication updates
* @param uuid
* @param providerName
* @param details
*/
public final void updateAPI(String uuid, String providerName, Map<String, String> details) {
// attempt to create (which validates) the new info before updating the cache
ProviderAdapter adapter = createNewAdapter(uuid, providerName, details);
// if its null its likely because no action has occured yet to trigger the API object to be loaded
if (adapter == null) {
throw new CloudRuntimeException("Adapter configruation failed for an unknown reason");
}
ProviderAdapter oldAdapter = apiMap.get(uuid);
apiMap.put(uuid, adapter);
try {
if (oldAdapter != null) oldAdapter.disconnect();
} catch (Throwable e) {
logger.debug("Failure closing the old ProviderAdapter during an update of the cached data after validation of the new adapter configuration, likely the configuration is no longer valid", e);
}
}
public void register(ProviderAdapterFactory factory) {
factoryMap.put(factory.getProviderName(), factory);
}
protected ProviderAdapter createNewAdapter(String uuid, String providerName, Map<String, String> details) {
String authnType = details.get(ProviderAdapter.API_AUTHENTICATION_TYPE_KEY);
if (authnType == null) authnType = "basicauth";
String lookupKey = null;
if (authnType.equals("basicauth")) {
lookupKey = details.get(ProviderAdapter.API_USERNAME_KEY);
if (lookupKey == null) {
throw new RuntimeException("Storage provider configuration property [" + ProviderAdapter.API_USERNAME_KEY + "] is required when using authentication type [" + authnType + "]");
}
} else if (authnType.equals("apitoken")) {
lookupKey = details.get(ProviderAdapter.API_TOKEN_KEY);
if (lookupKey == null) {
throw new RuntimeException("Storage provider configuration property [" + ProviderAdapter.API_TOKEN_KEY + "] is required when using authentication type [" + authnType + "]");
}
} else {
throw new RuntimeException("Storage provider configuration property [" + ProviderAdapter.API_AUTHENTICATION_TYPE_KEY + "] not set to valid value");
}
String url = details.get(ProviderAdapter.API_URL_KEY);
if (url == null) {
throw new RuntimeException("URL required when configuring a Managed Block API storage provider");
}
logger.debug("Looking for Provider [" + providerName + "] at [" + url + "]");
ProviderAdapterFactory factory = factoryMap.get(providerName);
if (factory == null) {
throw new RuntimeException("Unable to find a storage provider API factory for provider: " + providerName);
}
// decrypt password or token before sending to provider
if (authnType.equals("basicauth")) {
try {
details.put(ProviderAdapter.API_PASSWORD_KEY, DBEncryptionUtil.decrypt(details.get(ProviderAdapter.API_PASSWORD_KEY)));
} catch (Exception e) {
logger.warn("Failed to decrypt managed block API property: [" + ProviderAdapter.API_PASSWORD_KEY + "], trying to use as-is");
}
} else if (authnType.equals("apitoken")) {
try {
details.put(ProviderAdapter.API_TOKEN_KEY, DBEncryptionUtil.decrypt(details.get(ProviderAdapter.API_TOKEN_KEY)));
} catch (Exception e) {
logger.warn("Failed to decrypt managed block API property: [" + ProviderAdapter.API_TOKEN_KEY + "], trying to use as-is");
}
}
ProviderAdapter api = factory.create(url, details);
api.validate();
logger.debug("Creating new ProviderAdapter object for endpoint: " + providerName + "@" + url);
return api;
}
}

View File

@ -0,0 +1,86 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.provider;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.utils.component.ComponentContext;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory;
import org.apache.cloudstack.storage.datastore.driver.AdaptiveDataStoreDriverImpl;
import org.apache.cloudstack.storage.datastore.lifecycle.AdaptiveDataStoreLifeCycleImpl;
@Component
public abstract class AdaptivePrimaryDatastoreProviderImpl implements PrimaryDataStoreProvider {
static final Logger s_logger = Logger.getLogger(AdaptivePrimaryDatastoreProviderImpl.class);
AdaptiveDataStoreDriverImpl driver;
HypervisorHostListener listener;
AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap = new AdaptivePrimaryDatastoreAdapterFactoryMap();
DataStoreLifeCycle lifecycle;
AdaptivePrimaryDatastoreProviderImpl(ProviderAdapterFactory f) {
s_logger.info("Creating " + f.getProviderName());
factoryMap.register(f);
}
@Override
public DataStoreLifeCycle getDataStoreLifeCycle() {
return this.lifecycle;
}
@Override
public boolean configure(Map<String, Object> params) {
s_logger.info("Configuring " + getName());
driver = new AdaptiveDataStoreDriverImpl(factoryMap);
driver.setProviderName(getName());
lifecycle = ComponentContext.inject(new AdaptiveDataStoreLifeCycleImpl(factoryMap));
driver = ComponentContext.inject(driver);
listener = ComponentContext.inject(new AdaptivePrimaryHostListener(factoryMap));
return true;
}
@Override
public PrimaryDataStoreDriver getDataStoreDriver() {
return this.driver;
}
@Override
public HypervisorHostListener getHostListener() {
return this.listener;
}
@Override
public Set<DataStoreProviderType> getTypes() {
Set<DataStoreProviderType> types = new HashSet<DataStoreProviderType>();
types.add(DataStoreProviderType.PRIMARY);
return types;
}
}

View File

@ -0,0 +1,83 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.provider;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.log4j.Logger;
import com.cloud.exception.StorageConflictException;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.dao.StoragePoolHostDao;
public class AdaptivePrimaryHostListener implements HypervisorHostListener {
static final Logger s_logger = Logger.getLogger(AdaptivePrimaryHostListener.class);
@Inject
StoragePoolHostDao storagePoolHostDao;
public AdaptivePrimaryHostListener(AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap) {
}
@Override
public boolean hostAboutToBeRemoved(long hostId) {
s_logger.debug("hostAboutToBeRemoved called");
return true;
}
@Override
public boolean hostAdded(long hostId) {
s_logger.debug("hostAdded called");
return true;
}
@Override
public boolean hostConnect(long hostId, long poolId) throws StorageConflictException {
s_logger.debug("hostConnect called for hostid [" + hostId + "], poolId [" + poolId + "]");
StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId);
if (storagePoolHost == null) {
storagePoolHost = new StoragePoolHostVO(poolId, hostId, "");
storagePoolHostDao.persist(storagePoolHost);
}
return true;
}
@Override
public boolean hostDisconnected(long hostId, long poolId) {
s_logger.debug("hostDisconnected called for hostid [" + hostId + "], poolId [" + poolId + "]");
StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId);
if (storagePoolHost != null) {
storagePoolHostDao.deleteStoragePoolHostDetails(hostId, poolId);
}
return true;
}
@Override
public boolean hostEnabled(long hostId) {
s_logger.debug("hostEnabled called");
return true;
}
@Override
public boolean hostRemoved(long hostId, long clusterId) {
s_logger.debug("hostRemoved called");
return true;
}
}

View File

@ -0,0 +1,52 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-plugin-storage-volume-flasharray</artifactId>
<name>Apache CloudStack Plugin - Storage Volume - Pure Flash Array</name>
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack-plugins</artifactId>
<version>4.19.0.0-SNAPSHOT</version>
<relativePath>../../../pom.xml</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-adaptive</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<executions>
<execution>
<phase>integration-test</phase>
<goals>
<goal>test</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,36 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import java.util.Map;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory;
public class FlashArrayAdapterFactory implements ProviderAdapterFactory {
@Override
public String getProviderName() {
return "Flash Array";
}
@Override
public ProviderAdapter create(String url, Map<String, String> details) {
return new FlashArrayAdapter(url, details);
}
}

View File

@ -0,0 +1,34 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayApiToken {
@JsonProperty("api_token")
private String apiToken;
public void setApiToken(String apiToken) {
this.apiToken = apiToken;
}
public String getApiToken() {
return apiToken;
}
}

View File

@ -0,0 +1,68 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayConnection {
@JsonProperty("host_group")
private FlashArrayConnectionHostgroup hostGroup;
@JsonProperty("host")
private FlashArrayConnectionHost host;
@JsonProperty("volume")
private FlashArrayVolume volume;
@JsonProperty("lun")
private Integer lun;
public FlashArrayConnectionHostgroup getHostGroup() {
return hostGroup;
}
public void setHostGroup(FlashArrayConnectionHostgroup hostGroup) {
this.hostGroup = hostGroup;
}
public FlashArrayConnectionHost getHost() {
return host;
}
public void setHost(FlashArrayConnectionHost host) {
this.host = host;
}
public FlashArrayVolume getVolume() {
return volume;
}
public void setVolume(FlashArrayVolume volume) {
this.volume = volume;
}
public Integer getLun() {
return lun;
}
public void setLun(Integer lun) {
this.lun = lun;
}
}

View File

@ -0,0 +1,39 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayConnectionHost {
@JsonProperty("name")
private String name;
public FlashArrayConnectionHost() {}
public FlashArrayConnectionHost(String name) {
this.name = name;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}

View File

@ -0,0 +1,40 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayConnectionHostgroup {
@JsonProperty("name")
private String name;
public FlashArrayConnectionHostgroup() {}
public FlashArrayConnectionHostgroup(String name) {
this.name = name;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}

View File

@ -0,0 +1,72 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayGroupMemberReference {
@JsonProperty("group")
private FlashArrayGroupNameWrapper group;
@JsonProperty("member")
private FlashArrayGroupMemberNameWrapper member;
public static class FlashArrayGroupNameWrapper {
@JsonProperty("name")
private String name;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
public static class FlashArrayGroupMemberNameWrapper {
@JsonProperty("name")
private String name;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
public FlashArrayGroupNameWrapper getGroup() {
return group;
}
public void setGroup(FlashArrayGroupNameWrapper group) {
this.group = group;
}
public FlashArrayGroupMemberNameWrapper getMember() {
return member;
}
public void setMember(FlashArrayGroupMemberNameWrapper member) {
this.member = member;
}
}

View File

@ -0,0 +1,38 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import java.util.ArrayList;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayGroupMemberReferenceList {
@JsonProperty("items")
private ArrayList<FlashArrayGroupMemberReference> items;
public ArrayList<FlashArrayGroupMemberReference> getItems() {
return items;
}
public void setItems(ArrayList<FlashArrayGroupMemberReference> items) {
this.items = items;
}
}

Some files were not shown because too many files have changed in this diff Show More