From 7ea068c4dcfa0c33ec5f258a2a89db52bef09b71 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Thu, 7 Dec 2023 09:10:11 +0100 Subject: [PATCH 01/22] kvm: fix error 'Failed to find passphrase for keystore: cloud.jks' when enable SSL for kvm agent (#7923) --- .../java/com/cloud/agent/properties/AgentProperties.java | 9 +++++++++ .../kvm/resource/LibvirtComputingResource.java | 6 +++--- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java index 5c7f4ed4b23..84a66d70761 100644 --- a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java +++ b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java @@ -14,6 +14,8 @@ */ package com.cloud.agent.properties; +import org.apache.cloudstack.utils.security.KeyStoreUtils; + /** * Class of constant agent's properties available to configure on * "agent.properties". @@ -728,6 +730,13 @@ public class AgentProperties{ */ public static final Property CONTROL_CIDR = new Property<>("control.cidr", "169.254.0.0/16"); + /** + * Keystore passphrase + * Data type: String.
+ * Default value: null + */ + public static final Property KEYSTORE_PASSPHRASE = new Property<>(KeyStoreUtils.KS_PASSPHRASE_PROPERTY, null, String.class); + public static class Property { private String name; private T defaultValue; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index a34edba7fd0..b7611cd07bb 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -1012,7 +1012,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } } - enableSSLForKvmAgent(params); + enableSSLForKvmAgent(); configureLocalStorage(); /* Directory to use for Qemu sockets like for the Qemu Guest Agent */ @@ -1319,13 +1319,13 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } } - private void enableSSLForKvmAgent(final Map params) { + private void enableSSLForKvmAgent() { final File keyStoreFile = PropertiesUtil.findConfigFile(KeyStoreUtils.KS_FILENAME); if (keyStoreFile == null) { s_logger.info("Failed to find keystore file: " + KeyStoreUtils.KS_FILENAME); return; } - String keystorePass = (String)params.get(KeyStoreUtils.KS_PASSPHRASE_PROPERTY); + String keystorePass = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KEYSTORE_PASSPHRASE); if (StringUtils.isBlank(keystorePass)) { s_logger.info("Failed to find passphrase for keystore: " + KeyStoreUtils.KS_FILENAME); return; From 9773ba3e951e7d75284f027dc2a009cd04bc3197 Mon Sep 17 00:00:00 2001 From: Rene Glover Date: Thu, 7 Dec 2023 23:51:25 -0600 Subject: [PATCH 02/22] Allow autoscale group and name override (#8324) Updates AutoScaleManager/AutoScaleManagerImpl so that getNextVmHostName and checkAutoScaleVmGroupName can be overridden in derivative implementations to allow for custom naming conditions and restrictions. If possible, would like to include this in 4.19 since it is a trivial change. This can be used to create an extension of AutoScaleManagerImpl.java, overriding these 2 methods --- .../main/java/com/cloud/network/as/AutoScaleManager.java | 4 ++++ .../java/com/cloud/network/as/AutoScaleManagerImpl.java | 6 ++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManager.java b/server/src/main/java/com/cloud/network/as/AutoScaleManager.java index 1d829b1fcc5..cf6aab6a7bb 100644 --- a/server/src/main/java/com/cloud/network/as/AutoScaleManager.java +++ b/server/src/main/java/com/cloud/network/as/AutoScaleManager.java @@ -55,4 +55,8 @@ public interface AutoScaleManager extends AutoScaleService { void checkIfVmActionAllowed(Long vmId); void removeVmFromVmGroup(Long vmId); + + String getNextVmHostName(AutoScaleVmGroupVO asGroup); + + void checkAutoScaleVmGroupName(String groupName); } diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java index 2f69ac6e9ba..c10ff89fa3d 100644 --- a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java +++ b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java @@ -1938,7 +1938,8 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } } - private String getNextVmHostName(AutoScaleVmGroupVO asGroup) { + @Override + public String getNextVmHostName(AutoScaleVmGroupVO asGroup) { String vmHostNameSuffix = "-" + asGroup.getNextVmSeq() + "-" + RandomStringUtils.random(VM_HOSTNAME_RANDOM_SUFFIX_LENGTH, 0, 0, true, false, (char[])null, new SecureRandom()).toLowerCase(); // Truncate vm group name because max length of vm name is 63 @@ -1946,7 +1947,8 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage return VM_HOSTNAME_PREFIX + asGroup.getName().substring(0, subStringLength) + vmHostNameSuffix; } - private void checkAutoScaleVmGroupName(String groupName) { + @Override + public void checkAutoScaleVmGroupName(String groupName) { String errorMessage = ""; if (groupName == null || groupName.length() > 255 || groupName.length() < 1) { errorMessage = "AutoScale Vm Group name must be between 1 and 255 characters long"; From 7eb36367c905848d290c9a1871df8bfe400628fe Mon Sep 17 00:00:00 2001 From: Harikrishna Date: Fri, 8 Dec 2023 13:21:16 +0530 Subject: [PATCH 03/22] Add lock mechanism considering template id, pool id, host id in PowerFlex Storage (#8233) Observed a failure to start new virtual machine with PowerFlex storage. Traced it to concurrent VM starts using the same template and the same host to copy. Second mapping attempt failed. While creating the volume clone from the seeded template in primary storage, adding a lock with the string containing IDs of template, storage pool and destination host avoids the situation of concurrent mapping attempts with the same host. --- .../storage/volume/VolumeServiceImpl.java | 30 +++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index ffc12b98c84..47577cc52b2 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -1478,8 +1478,8 @@ public class VolumeServiceImpl implements VolumeService { createManagedVolumeCloneTemplateAsync(volumeInfo, templateOnPrimary, destPrimaryDataStore, future); } else { // We have a template on PowerFlex primary storage. Create new volume and copy to it. - s_logger.debug("Copying the template to the volume on primary storage"); - createManagedVolumeCopyManagedTemplateAsync(volumeInfo, destPrimaryDataStore, templateOnPrimary, destHost, future); + createManagedVolumeCopyManagedTemplateAsyncWithLock(volumeInfo, destPrimaryDataStore, templateOnPrimary, + destHost, future, destDataStoreId, srcTemplateInfo.getId()); } } else { s_logger.debug("Primary storage does not support cloning or no support for UUID resigning on the host side; copying the template normally"); @@ -1490,6 +1490,32 @@ public class VolumeServiceImpl implements VolumeService { return future; } + private void createManagedVolumeCopyManagedTemplateAsyncWithLock(VolumeInfo volumeInfo, PrimaryDataStore destPrimaryDataStore, TemplateInfo templateOnPrimary, + Host destHost, AsyncCallFuture future, long destDataStoreId, long srcTemplateId) { + GlobalLock lock = null; + try { + String tmplIdManagedPoolIdDestinationHostLockString = "tmplId:" + srcTemplateId + "managedPoolId:" + destDataStoreId + "destinationHostId:" + destHost.getId(); + lock = GlobalLock.getInternLock(tmplIdManagedPoolIdDestinationHostLockString); + if (lock == null) { + throw new CloudRuntimeException("Unable to create volume from template, couldn't get global lock on " + tmplIdManagedPoolIdDestinationHostLockString); + } + + int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600); + if (!lock.lock(storagePoolMaxWaitSeconds)) { + s_logger.debug("Unable to create volume from template, couldn't lock on " + tmplIdManagedPoolIdDestinationHostLockString); + throw new CloudRuntimeException("Unable to create volume from template, couldn't lock on " + tmplIdManagedPoolIdDestinationHostLockString); + } + + s_logger.debug("Copying the template to the volume on primary storage"); + createManagedVolumeCopyManagedTemplateAsync(volumeInfo, destPrimaryDataStore, templateOnPrimary, destHost, future); + } finally { + if (lock != null) { + lock.unlock(); + lock.releaseRef(); + } + } + } + private boolean computeSupportsVolumeClone(long zoneId, HypervisorType hypervisorType) { if (HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType)) { return true; From bba554bcc4735009e7552e4116bcad8b1cda7b14 Mon Sep 17 00:00:00 2001 From: Peinthor Rene Date: Fri, 8 Dec 2023 12:32:18 +0100 Subject: [PATCH 04/22] linstor: Fix possible NPE if Linstor storage-pool data missing (#8319) If Linstor doesn't return storage pool info, certain values are null. Now we assume the values are 0 if we get null values. --- .../kvm/storage/LinstorStorageAdaptor.java | 40 +++---------------- .../storage/datastore/util/LinstorUtil.java | 3 +- 2 files changed, 8 insertions(+), 35 deletions(-) diff --git a/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java b/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java index 2847484e30e..3a703cdb426 100644 --- a/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java +++ b/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java @@ -28,6 +28,7 @@ import java.util.StringJoiner; import javax.annotation.Nonnull; +import org.apache.cloudstack.storage.datastore.util.LinstorUtil; import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; @@ -489,39 +490,8 @@ public class LinstorStorageAdaptor implements StorageAdaptor { } public long getCapacity(LinstorStoragePool pool) { - DevelopersApi linstorApi = getLinstorAPI(pool); final String rscGroupName = pool.getResourceGroup(); - try { - List rscGrps = linstorApi.resourceGroupList( - Collections.singletonList(rscGroupName), - null, - null, - null); - - if (rscGrps.isEmpty()) { - final String errMsg = String.format("Linstor: Resource group '%s' not found", rscGroupName); - s_logger.error(errMsg); - throw new CloudRuntimeException(errMsg); - } - - List storagePools = linstorApi.viewStoragePools( - Collections.emptyList(), - rscGrps.get(0).getSelectFilter().getStoragePoolList(), - null, - null, - null - ); - - final long capacity = storagePools.stream() - .filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS) - .mapToLong(sp -> sp.getTotalCapacity() != null ? sp.getTotalCapacity() : 0) - .sum() * 1024; // linstor uses kiB - s_logger.debug("Linstor: GetCapacity() -> " + capacity); - return capacity; - } catch (ApiException apiEx) { - s_logger.error(apiEx.getMessage()); - throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); - } + return LinstorUtil.getCapacityBytes(pool.getSourceHost(), rscGroupName); } public long getAvailable(LinstorStoragePool pool) { @@ -550,7 +520,7 @@ public class LinstorStorageAdaptor implements StorageAdaptor { final long free = storagePools.stream() .filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS) - .mapToLong(StoragePool::getFreeCapacity).sum() * 1024; // linstor uses KiB + .mapToLong(sp -> sp.getFreeCapacity() != null ? sp.getFreeCapacity() : 0L).sum() * 1024; // linstor uses KiB s_logger.debug("Linstor: getAvailable() -> " + free); return free; @@ -586,7 +556,9 @@ public class LinstorStorageAdaptor implements StorageAdaptor { final long used = storagePools.stream() .filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS) - .mapToLong(sp -> sp.getTotalCapacity() - sp.getFreeCapacity()).sum() * 1024; // linstor uses Kib + .mapToLong(sp -> sp.getTotalCapacity() != null && sp.getFreeCapacity() != null ? + sp.getTotalCapacity() - sp.getFreeCapacity() : 0L) + .sum() * 1024; // linstor uses Kib s_logger.debug("Linstor: getUsed() -> " + used); return used; } catch (ApiException apiEx) { diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java index f1760a003ab..ddd15a5984a 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java @@ -72,7 +72,8 @@ public class LinstorUtil { return storagePools.stream() .filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS) - .mapToLong(StoragePool::getTotalCapacity).sum() * 1024; // linstor uses kiB + .mapToLong(sp -> sp.getTotalCapacity() != null ? sp.getTotalCapacity() : 0L) + .sum() * 1024; // linstor uses kiB } catch (ApiException apiEx) { s_logger.error(apiEx.getMessage()); throw new CloudRuntimeException(apiEx); From f42feb1568d38375aa79da074cb07fb171208b7b Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Fri, 8 Dec 2023 12:35:09 +0100 Subject: [PATCH 05/22] CKS: update imagePullPolicy to IfNotPresent in yaml files (#8296) The kubernetes dashboard yaml file has the following setting image: kubernetesui/dashboard:v2.7.0 imagePullPolicy: Always see https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml The similar config can be found at https://raw.githubusercontent.com/weaveworks/weave/master/prog/weave-kube/weave-daemonset-k8s-1.11.yaml Due to it, CKS does not work in the following cases reach the dockerhub rate limitations (see https://docs.docker.com/docker-hub/download-rate-limit/) The VMs do not have internet connection Since the CKS ISO contains the image, it is not necessary to pull the images again. --- scripts/util/create-kubernetes-binaries-iso.sh | 3 +++ tools/marvin/marvin/config/test_data.py | 18 +++++++++++++++--- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/scripts/util/create-kubernetes-binaries-iso.sh b/scripts/util/create-kubernetes-binaries-iso.sh index e7981d6ac0b..d5fb014f220 100755 --- a/scripts/util/create-kubernetes-binaries-iso.sh +++ b/scripts/util/create-kubernetes-binaries-iso.sh @@ -145,6 +145,9 @@ if [ -z "${kubeadm_file_permissions}" ]; then fi chmod ${kubeadm_file_permissions} "${working_dir}/k8s/kubeadm" +echo "Updating imagePullPolicy to IfNotPresent in yaml files..." +sed -i "s/imagePullPolicy:.*/imagePullPolicy: IfNotPresent/g" ${working_dir}/*.yaml + mkisofs -o "${output_dir}/${build_name}" -J -R -l "${iso_dir}" rm -rf "${iso_dir}" diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py index c4ac6008b1f..ef9bfd774f7 100644 --- a/tools/marvin/marvin/config/test_data.py +++ b/tools/marvin/marvin/config/test_data.py @@ -2260,11 +2260,23 @@ test_data = { "url": "http://download.cloudstack.org/cks/setup-1.26.0.iso", "mincpunumber": 2, "minmemory": 2048 + }, + "1.27.8": { + "semanticversion": "1.27.8", + "url": "http://download.cloudstack.org/cks/setup-1.27.8.iso", + "mincpunumber": 2, + "minmemory": 2048 + }, + "1.28.4": { + "semanticversion": "1.28.4", + "url": "http://download.cloudstack.org/cks/setup-1.28.4.iso", + "mincpunumber": 2, + "minmemory": 2048 } }, - "cks_kubernetes_version": "1.26.0", - "cks_kubernetes_version_upgrade_from": "1.25.0", - "cks_kubernetes_version_upgrade_to": "1.26.0", + "cks_kubernetes_version": "1.28.4", + "cks_kubernetes_version_upgrade_from": "1.27.8", + "cks_kubernetes_version_upgrade_to": "1.28.4", "cks_service_offering": { "name": "CKS-Instance", "displaytext": "CKS Instance", From 2993c993632454ffe8be39607b94181834942f39 Mon Sep 17 00:00:00 2001 From: Sina Kashipazha Date: Fri, 8 Dec 2023 15:21:06 +0100 Subject: [PATCH 06/22] Add missing hosts info to the prometheus exporter output. (#8328) Sometimes the hostStats object of the agents becomes null in the management server. It is a rare situation, and we haven't found the root cause yet, but it occurs occasionally in our CloudStack deployments with many hosts. The hostStat is null, even though the agent is UP and hosting multiple VMs. It is possible to access the VM consoles and execute tasks on them. This pull request doesn't address the issue directly; rather it displays those hosts in Prometheus so we can restart the agent and get the necessary information. --- .../metrics/PrometheusExporterImpl.java | 85 ++++++++++++++----- 1 file changed, 62 insertions(+), 23 deletions(-) diff --git a/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java b/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java index 3b111da5961..17fbd48181a 100644 --- a/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java +++ b/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java @@ -82,6 +82,24 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp private static final String ONLINE = "online"; private static final String OFFLINE = "offline"; + enum MissingInfoFilter { + Host_Stats("hostStats"), + CPU_CAPACITY("cpuCapacity"), + MEM_CAPACITY("memCapacity"), + CORE_CAPACITY("coreCapacity"); + + private final String name; + + MissingInfoFilter(String name){ + this.name = name; + } + + @Override + public String toString() { + return name; + } + } + private static List metricsItems = new ArrayList<>(); @Inject @@ -129,8 +147,6 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp Map upHosts = new HashMap<>(); Map downHosts = new HashMap<>(); - HostStats hostStats; - for (final HostVO host : hostDao.listAll()) { if (host == null || host.getType() != Host.Type.Routing || host.getDataCenterId() != dcId) { continue; @@ -147,8 +163,6 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp int isDedicated = (dr != null) ? 1 : 0; metricsList.add(new ItemHostIsDedicated(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), isDedicated)); - String hostTags = markTagMaps(host, totalHosts, upHosts, downHosts); - hostStats = ApiDBUtils.getHostStatistics(host.getId()); // Get account, domain details for dedicated hosts if (isDedicated == 1) { @@ -160,16 +174,22 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp metricsList.add(new ItemHostDedicatedToAccount(zoneName, host.getName(), accountName, domain.getPath(), isDedicated)); } + String hostTags = markTagMaps(host, totalHosts, upHosts, downHosts); + HostStats hostStats = ApiDBUtils.getHostStatistics(host.getId()); + + if (hostStats == null){ + metricsList.add(new MissingHostInfo(zoneName, host.getName(), MissingInfoFilter.Host_Stats)); + } + final String cpuFactor = String.valueOf(CapacityManager.CpuOverprovisioningFactor.valueIn(host.getClusterId())); final CapacityVO cpuCapacity = capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_CPU); - final double cpuUsedMhz = hostStats.getCpuUtilization() * host.getCpus() * host.getSpeed() / 100.0 ; - if (host.isInMaintenanceStates()) { - metricsList.add(new ItemHostCpu(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), cpuFactor, ALLOCATED, 0L, isDedicated, hostTags)); - metricsList.add(new ItemHostCpu(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), cpuFactor, USED, 0L, isDedicated, hostTags)); - metricsList.add(new ItemHostCpu(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), cpuFactor, TOTAL, 0L, isDedicated, hostTags)); + if (cpuCapacity == null && !host.isInMaintenanceStates()){ + metricsList.add(new MissingHostInfo(zoneName, host.getName(), MissingInfoFilter.CPU_CAPACITY)); } - else if (cpuCapacity != null && cpuCapacity.getCapacityState() == CapacityState.Enabled) { + + if (hostStats != null && cpuCapacity != null && cpuCapacity.getCapacityState() == CapacityState.Enabled) { + final double cpuUsedMhz = hostStats.getCpuUtilization() * host.getCpus() * host.getSpeed() / 100.0 ; metricsList.add(new ItemHostCpu(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), cpuFactor, ALLOCATED, cpuCapacity.getUsedCapacity(), isDedicated, hostTags)); metricsList.add(new ItemHostCpu(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), cpuFactor, USED, cpuUsedMhz, isDedicated, hostTags)); metricsList.add(new ItemHostCpu(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), cpuFactor, TOTAL, cpuCapacity.getTotalCapacity(), isDedicated, hostTags)); @@ -181,12 +201,12 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp final String memoryFactor = String.valueOf(CapacityManager.MemOverprovisioningFactor.valueIn(host.getClusterId())); final CapacityVO memCapacity = capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_MEMORY); - if (host.isInMaintenanceStates()) { - metricsList.add(new ItemHostMemory(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), memoryFactor, ALLOCATED, 0L, isDedicated, hostTags)); - metricsList.add(new ItemHostMemory(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), memoryFactor, USED, 0, isDedicated, hostTags)); - metricsList.add(new ItemHostMemory(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), memoryFactor, TOTAL, 0L, isDedicated, hostTags)); + + if (memCapacity == null && !host.isInMaintenanceStates()){ + metricsList.add(new MissingHostInfo(zoneName, host.getName(), MissingInfoFilter.MEM_CAPACITY)); } - else if (memCapacity != null && memCapacity.getCapacityState() == CapacityState.Enabled) { + + if (hostStats != null && memCapacity != null && memCapacity.getCapacityState() == CapacityState.Enabled) { metricsList.add(new ItemHostMemory(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), memoryFactor, ALLOCATED, memCapacity.getUsedCapacity(), isDedicated, hostTags)); metricsList.add(new ItemHostMemory(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), memoryFactor, USED, hostStats.getUsedMemory(), isDedicated, hostTags)); metricsList.add(new ItemHostMemory(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), memoryFactor, TOTAL, memCapacity.getTotalCapacity(), isDedicated, hostTags)); @@ -197,13 +217,13 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp } metricsList.add(new ItemHostVM(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), vmDao.listByHostId(host.getId()).size())); - final CapacityVO coreCapacity = capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_CPU_CORE); - if (host.isInMaintenanceStates()) { - metricsList.add(new ItemVMCore(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), USED, 0L, isDedicated, hostTags)); - metricsList.add(new ItemVMCore(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), TOTAL, 0L, isDedicated, hostTags)); + + if (coreCapacity == null && !host.isInMaintenanceStates()){ + metricsList.add(new MissingHostInfo(zoneName, host.getName(), MissingInfoFilter.CORE_CAPACITY)); } - else if (coreCapacity != null && coreCapacity.getCapacityState() == CapacityState.Enabled) { + + if (hostStats != null && coreCapacity != null && coreCapacity.getCapacityState() == CapacityState.Enabled) { metricsList.add(new ItemVMCore(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), USED, coreCapacity.getUsedCapacity(), isDedicated, hostTags)); metricsList.add(new ItemVMCore(zoneName, zoneUuid, host.getName(), host.getUuid(), host.getPrivateIpAddress(), TOTAL, coreCapacity.getTotalCapacity(), isDedicated, hostTags)); } else { @@ -213,17 +233,17 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp } final List cpuCapacity = capacityDao.findCapacityBy((int) Capacity.CAPACITY_TYPE_CPU, dcId, null, null); - if (cpuCapacity != null && cpuCapacity.size() > 0) { + if (cpuCapacity != null && !cpuCapacity.isEmpty()) { metricsList.add(new ItemHostCpu(zoneName, zoneUuid, null, null, null, null, ALLOCATED, cpuCapacity.get(0).getAllocatedCapacity() != null ? cpuCapacity.get(0).getAllocatedCapacity() : 0, 0, "")); } final List memCapacity = capacityDao.findCapacityBy((int) Capacity.CAPACITY_TYPE_MEMORY, dcId, null, null); - if (memCapacity != null && memCapacity.size() > 0) { + if (memCapacity != null && !memCapacity.isEmpty()) { metricsList.add(new ItemHostMemory(zoneName, zoneUuid, null, null, null, null, ALLOCATED, memCapacity.get(0).getAllocatedCapacity() != null ? memCapacity.get(0).getAllocatedCapacity() : 0, 0, "")); } final List coreCapacity = capacityDao.findCapacityBy((int) Capacity.CAPACITY_TYPE_CPU_CORE, dcId, null, null); - if (coreCapacity != null && coreCapacity.size() > 0) { + if (coreCapacity != null && !coreCapacity.isEmpty()) { metricsList.add(new ItemVMCore(zoneName, zoneUuid, null, null, null, ALLOCATED, coreCapacity.get(0).getAllocatedCapacity() != null ? coreCapacity.get(0).getAllocatedCapacity() : 0, 0, "")); } @@ -626,6 +646,25 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp } } + class MissingHostInfo extends Item { + + String zoneName; + String hostName; + MissingInfoFilter filter; + + public MissingHostInfo(String zoneName, String hostname, MissingInfoFilter filter) { + super("cloudstack_host_missing_info"); + this.zoneName = zoneName; + this.hostName = hostname; + this.filter = filter; + } + + @Override + public String toMetricsString() { + return String.format("%s{zone=\"%s\",hostname=\"%s\",filter=\"%s\"} -1", name, zoneName, hostName, filter); + } + } + class ItemHostCpu extends Item { String zoneName; String zoneUuid; From 4e46f5ad17fa16684b689c061c7cc52ad2e5b7cc Mon Sep 17 00:00:00 2001 From: Fabricio Duarte Date: Fri, 8 Dec 2023 11:30:19 -0300 Subject: [PATCH 07/22] Add logs to listLoadBalancerRuleInstances API (#8094) --- .../ListLoadBalancerRuleInstancesCmd.java | 68 ++++++++----------- .../lb/LoadBalancingRulesManagerImpl.java | 61 +++++++++++------ 2 files changed, 69 insertions(+), 60 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRuleInstancesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRuleInstancesCmd.java index 77aaa6bc1d3..723e0efec12 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRuleInstancesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRuleInstancesCmd.java @@ -97,52 +97,44 @@ public class ListLoadBalancerRuleInstancesCmd extends BaseListCmd implements Use public void execute() { Pair, List> vmServiceMap = _lbService.listLoadBalancerInstances(this); List result = vmServiceMap.first(); + s_logger.debug(String.format("A total of [%s] user VMs were obtained when listing the load balancer instances: [%s].", result.size(), result)); + List serviceStates = vmServiceMap.second(); + s_logger.debug(String.format("A total of [%s] service states were obtained when listing the load balancer instances: [%s].", serviceStates.size(), serviceStates)); if (!isListLbVmip()) { - // list lb instances - ListResponse response = new ListResponse(); - List vmResponses = new ArrayList(); - if (result != null) { - vmResponses = _responseGenerator.createUserVmResponse(ResponseView.Restricted, "loadbalancerruleinstance", result.toArray(new UserVm[result.size()])); + ListResponse response = new ListResponse<>(); + List vmResponses = _responseGenerator.createUserVmResponse(ResponseView.Restricted, "loadbalancerruleinstance", result.toArray(new UserVm[0])); - - for (int i = 0; i < result.size(); i++) { - vmResponses.get(i).setServiceState(serviceStates.get(i)); - } + for (int i = 0; i < result.size(); i++) { + vmResponses.get(i).setServiceState(serviceStates.get(i)); } + response.setResponses(vmResponses); response.setResponseName(getCommandName()); setResponseObject(response); - - - } else { - ListResponse lbRes = new ListResponse(); - - List vmResponses = new ArrayList(); - List listlbVmRes = new ArrayList(); - - if (result != null) { - vmResponses = _responseGenerator.createUserVmResponse(getResponseView(), "loadbalancerruleinstance", result.toArray(new UserVm[result.size()])); - - - List ipaddr = null; - - for (int i=0;i lbRes = new ListResponse<>(); + + List vmResponses = _responseGenerator.createUserVmResponse(getResponseView(), "loadbalancerruleinstance", result.toArray(new UserVm[0])); + List lbRuleVmMapList = new ArrayList<>(); + + for (int i=0; i extends ManagerBase implements Boolean applied = cmd.isApplied(); if (applied == null) { + s_logger.info(String.format("The [%s] parameter was not passed. Using the default value [%s].", ApiConstants.APPLIED, Boolean.TRUE)); applied = Boolean.TRUE; } LoadBalancerVO loadBalancer = _lbDao.findById(loadBalancerId); if (loadBalancer == null) { - return null; - } - - _accountMgr.checkAccess(caller, null, true, loadBalancer); - - List loadBalancerInstances = new ArrayList(); - List serviceStates = new ArrayList(); - List vmLoadBalancerMappings = null; - vmLoadBalancerMappings = _lb2VmMapDao.listByLoadBalancerId(loadBalancerId); - if(vmLoadBalancerMappings == null) { - String msg = "no VM Loadbalancer Mapping found"; + String msg = String.format("Unable to find the load balancer with ID [%s].", cmd.getId()); s_logger.error(msg); throw new CloudRuntimeException(msg); } - Map vmServiceState = new HashMap(vmLoadBalancerMappings.size()); - List appliedInstanceIdList = new ArrayList(); - if ((vmLoadBalancerMappings != null) && !vmLoadBalancerMappings.isEmpty()) { - for (LoadBalancerVMMapVO vmLoadBalancerMapping : vmLoadBalancerMappings) { - appliedInstanceIdList.add(vmLoadBalancerMapping.getInstanceId()); - vmServiceState.put(vmLoadBalancerMapping.getInstanceId(), vmLoadBalancerMapping.getState()); - } + String loadBalancerAsString = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(loadBalancer, "uuid", "name"); + + _accountMgr.checkAccess(caller, null, true, loadBalancer); + + List loadBalancerInstances = new ArrayList<>(); + List serviceStates = new ArrayList<>(); + List vmLoadBalancerMappings = _lb2VmMapDao.listByLoadBalancerId(loadBalancerId); + + if (vmLoadBalancerMappings == null) { + String msg = String.format("Unable to find map of VMs related to load balancer [%s].", loadBalancerAsString); + s_logger.error(msg); + throw new CloudRuntimeException(msg); + } + + Map vmServiceState = new HashMap<>(vmLoadBalancerMappings.size()); + List appliedInstanceIdList = new ArrayList<>(); + + for (LoadBalancerVMMapVO vmLoadBalancerMapping : vmLoadBalancerMappings) { + appliedInstanceIdList.add(vmLoadBalancerMapping.getInstanceId()); + vmServiceState.put(vmLoadBalancerMapping.getInstanceId(), vmLoadBalancerMapping.getState()); } List userVms = _vmDao.listByIds(appliedInstanceIdList); @@ -2364,13 +2369,25 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements continue; } + String userVmAsString = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(userVm, "uuid", "name"); + boolean isApplied = appliedInstanceIdList.contains(userVm.getId()); - if ((isApplied && applied) || (!isApplied && !applied)) { - loadBalancerInstances.add(userVm); - serviceStates.add(vmServiceState.get(userVm.getId())); + String isAppliedMsg = isApplied ? "is applied" : "is not applied"; + s_logger.debug(String.format("The user VM [%s] %s to a rule of the load balancer [%s].", userVmAsString, isAppliedMsg, loadBalancerAsString)); + + if (isApplied != applied) { + s_logger.debug(String.format("Skipping adding service state from the user VM [%s] to the service state list. This happens because the VM %s to the load " + + "balancer rule and the [%s] parameter was passed as [%s].", userVmAsString, isAppliedMsg, ApiConstants.APPLIED, applied)); + continue; } + + loadBalancerInstances.add(userVm); + String serviceState = vmServiceState.get(userVm.getId()); + s_logger.debug(String.format("Adding the service state [%s] from the user VM [%s] to the service state list.", serviceState, userVmAsString)); + serviceStates.add(serviceState); } - return new Pair, List>(loadBalancerInstances, serviceStates); + + return new Pair<>(loadBalancerInstances, serviceStates); } @Override From 231a9eae2eb606def265af54c4eaa8fb8c7fc526 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Fri, 8 Dec 2023 21:38:35 +0530 Subject: [PATCH 08/22] ui: add action to declare/cancel host as degraded (#8327) --- ui/public/locales/en.json | 2 ++ ui/src/config/section/infra/hosts.js | 20 ++++++++++++++++++++ ui/src/core/lazy_lib/icons_use.js | 4 ++++ 3 files changed, 26 insertions(+) diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 69a794079b5..67351fd21b7 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -404,6 +404,7 @@ "label.cachemode": "Write-cache type", "label.cancel": "Cancel", "label.cancelmaintenance": "Cancel maintenance", +"label.cancel.host.as.degraded": "Cancel host as degraded", "label.capacity": "Capacity", "label.capacitybytes": "Capacity bytes", "label.capacityiops": "IOPS total", @@ -570,6 +571,7 @@ "label.db.usage.metrics": "DB/Usage server", "label.dbislocal": "The db runs locally", "label.dc.name": "DC name", +"label.declare.host.as.degraded": "Declare host as degraded", "label.decline.invitation": "Decline invitation", "label.dedicate": "Dedicate", "label.dedicate.cluster": "Dedicate cluster", diff --git a/ui/src/config/section/infra/hosts.js b/ui/src/config/section/infra/hosts.js index 9f2c6292052..cb0de90b26c 100644 --- a/ui/src/config/section/infra/hosts.js +++ b/ui/src/config/section/infra/hosts.js @@ -290,6 +290,26 @@ export default { } } }, + { + api: 'declareHostAsDegraded', + icon: 'exception-outlined', + label: 'label.declare.host.as.degraded', + message: 'label.declare.host.as.degraded', + dataView: true, + show: (record) => { + return record.resourcestate !== 'Degraded' && (record.state === 'Alert' || record.state === 'Disconnected') + } + }, + { + api: 'cancelHostAsDegraded', + icon: 'file-done-outlined', + label: 'label.cancel.host.as.degraded', + message: 'label.cancel.host.as.degraded', + dataView: true, + show: (record) => { + return record.resourcestate === 'Degraded' + } + }, { api: 'deleteHost', icon: 'delete-outlined', diff --git a/ui/src/core/lazy_lib/icons_use.js b/ui/src/core/lazy_lib/icons_use.js index ec2d67deaf9..bbda90e3bef 100644 --- a/ui/src/core/lazy_lib/icons_use.js +++ b/ui/src/core/lazy_lib/icons_use.js @@ -72,10 +72,12 @@ import { DragOutlined, EditOutlined, EnvironmentOutlined, + ExceptionOutlined, ExclamationCircleOutlined, EyeInvisibleOutlined, EyeOutlined, FieldTimeOutlined, + FileDoneOutlined, FileProtectOutlined, FilterOutlined, FilterTwoTone, @@ -226,10 +228,12 @@ export default { app.component('DragOutlined', DragOutlined) app.component('EditOutlined', EditOutlined) app.component('EnvironmentOutlined', EnvironmentOutlined) + app.component('ExceptionOutlined', ExceptionOutlined) app.component('ExclamationCircleOutlined', ExclamationCircleOutlined) app.component('EyeInvisibleOutlined', EyeInvisibleOutlined) app.component('EyeOutlined', EyeOutlined) app.component('FieldTimeOutlined', FieldTimeOutlined) + app.component('FileDoneOutlined', FileDoneOutlined) app.component('FileProtectOutlined', FileProtectOutlined) app.component('FilterOutlined', FilterOutlined) app.component('FilterTwoTone', FilterTwoTone) From 1031c31e6aa5b53388ddd0033365b27d29e9197e Mon Sep 17 00:00:00 2001 From: Rene Glover Date: Sat, 9 Dec 2023 00:01:33 -0600 Subject: [PATCH 09/22] FiberChannel Multipath for KVM + Pure Flash Array and HPE-Primera Support (#7889) This PR provides a new primary storage volume type called "FiberChannel" that allows access to volumes connected to hosts over fiber channel connections. It requires Multipath to provide path discovery and failover. Second, the PR adds an AdaptivePrimaryDatastoreProvider that abstracts how volumes are managed/orchestrated from the connector to communicate with the primary storage provider, using a ProviderAdapter interface, allowing the code interacting with the primary storage provider API's to be simpler and have no direct dependencies on Cloudstack code. Lastly, the PR provides an implementation of the ProviderAdapter classes for the HP Enterprise Primera line of storage solutions and the Pure Flash Array line of storage solutions. --- .../main/java/com/cloud/storage/Storage.java | 10 +- .../admin/storage/UpdateStoragePoolCmd.java | 32 + client/pom.xml | 10 + .../cloud/vm/VirtualMachineManagerImpl.java | 6 + .../motion/AncientDataMotionStrategy.java | 29 +- .../StorageSystemDataMotionStrategy.java | 264 ++-- .../storage/volume/VolumeServiceImpl.java | 53 +- .../acl/ProjectRoleBasedApiAccessChecker.java | 8 +- .../LibvirtMigrateVolumeCommandWrapper.java | 4 + .../LibvirtResizeVolumeCommandWrapper.java | 10 + .../kvm/storage/FiberChannelAdapter.java | 88 ++ .../kvm/storage/KVMStorageProcessor.java | 37 +- .../kvm/storage/MultipathSCSIAdapterBase.java | 758 ++++++++++++ .../kvm/storage/MultipathSCSIPool.java | 241 ++++ plugins/pom.xml | 3 + plugins/storage/volume/adaptive/README.md | 58 + plugins/storage/volume/adaptive/pom.xml | 62 + .../datastore/adapter/ProviderAdapter.java | 157 +++ .../adapter/ProviderAdapterConstants.java | 22 + .../adapter/ProviderAdapterContext.java | 83 ++ .../adapter/ProviderAdapterDataObject.java | 159 +++ .../adapter/ProviderAdapterDiskOffering.java | 194 +++ .../adapter/ProviderAdapterFactory.java | 24 + .../datastore/adapter/ProviderSnapshot.java | 28 + .../datastore/adapter/ProviderVolume.java | 40 + .../adapter/ProviderVolumeNamer.java | 58 + .../adapter/ProviderVolumeStats.java | 55 + .../adapter/ProviderVolumeStorageStats.java | 71 ++ .../driver/AdaptiveDataStoreDriverImpl.java | 901 ++++++++++++++ .../AdaptiveDataStoreLifeCycleImpl.java | 407 ++++++ ...tivePrimaryDatastoreAdapterFactoryMap.java | 134 ++ .../AdaptivePrimaryDatastoreProviderImpl.java | 86 ++ .../provider/AdaptivePrimaryHostListener.java | 83 ++ plugins/storage/volume/flasharray/pom.xml | 52 + .../adapter/flasharray/FlashArrayAdapter.java | 1086 +++++++++++++++++ .../flasharray/FlashArrayAdapterFactory.java | 36 + .../flasharray/FlashArrayApiToken.java | 34 + .../flasharray/FlashArrayConnection.java | 68 ++ .../flasharray/FlashArrayConnectionHost.java | 39 + .../FlashArrayConnectionHostgroup.java | 40 + .../FlashArrayGroupMemberReference.java | 72 ++ .../FlashArrayGroupMemberReferenceList.java | 38 + .../flasharray/FlashArrayHostgroup.java | 58 + .../adapter/flasharray/FlashArrayList.java | 60 + .../adapter/flasharray/FlashArrayPod.java | 66 + .../adapter/flasharray/FlashArrayTag.java | 77 ++ .../adapter/flasharray/FlashArrayTagList.java | 39 + .../adapter/flasharray/FlashArrayVolume.java | 253 ++++ .../flasharray/FlashArrayVolumePod.java | 43 + .../flasharray/FlashArrayVolumeSource.java | 47 + .../flasharray/FlashArrayVolumeSpace.java | 122 ++ ...lashArrayPrimaryDatastoreProviderImpl.java | 32 + .../module.properties | 18 + ...ring-storage-volume-flasharray-context.xml | 35 + plugins/storage/volume/primera/pom.xml | 52 + .../adapter/primera/PrimeraAdapter.java | 930 ++++++++++++++ .../primera/PrimeraAdapterFactory.java | 36 + .../datastore/adapter/primera/PrimeraCpg.java | 203 +++ .../primera/PrimeraCpgDiskPattern.java | 35 + .../adapter/primera/PrimeraCpgLDLayout.java | 49 + .../primera/PrimeraCpgPrivateSpaceMiB.java | 54 + .../adapter/primera/PrimeraCpgSAGrowth.java | 40 + .../adapter/primera/PrimeraCpgSAUsage.java | 54 + .../adapter/primera/PrimeraCpgSDGrowth.java | 54 + .../adapter/primera/PrimeraCpgSDUsage.java | 54 + .../adapter/primera/PrimeraCpgUsrUsage.java | 54 + .../adapter/primera/PrimeraHostset.java | 141 +++ .../datastore/adapter/primera/PrimeraKey.java | 35 + .../adapter/primera/PrimeraTaskReference.java | 44 + .../adapter/primera/PrimeraTaskStatus.java | 174 +++ .../adapter/primera/PrimeraVlun.java | 180 +++ .../adapter/primera/PrimeraVlunList.java | 49 + .../adapter/primera/PrimeraVolume.java | 420 +++++++ .../primera/PrimeraVolumeAdminSpace.java | 54 + .../PrimeraVolumeCapacityEfficiency.java | 40 + .../primera/PrimeraVolumeCopyRequest.java | 43 + .../PrimeraVolumeCopyRequestParameters.java | 101 ++ .../adapter/primera/PrimeraVolumeLink.java | 40 + .../primera/PrimeraVolumeLinkList.java | 37 + .../primera/PrimeraVolumePolicies.java | 82 ++ .../primera/PrimeraVolumePromoteRequest.java | 57 + .../adapter/primera/PrimeraVolumeRequest.java | 110 ++ .../PrimeraVolumeRevertSnapshotRequest.java | 50 + .../primera/PrimeraVolumeSnapshotRequest.java | 43 + ...rimeraVolumeSnapshotRequestParameters.java | 85 ++ .../primera/PrimeraVolumeSnapshotSpace.java | 54 + .../primera/PrimeraVolumeUpdateRequest.java | 35 + .../primera/PrimeraVolumeUserSpace.java | 54 + .../PrimeraPrimaryDatastoreProviderImpl.java | 32 + .../storage-volume-primera/module.properties | 18 + .../spring-storage-volume-primera-context.xml | 35 + scripts/storage/multipath/cleanStaleMaps.sh | 31 + scripts/storage/multipath/connectVolume.sh | 133 ++ scripts/storage/multipath/copyVolume.sh | 32 + scripts/storage/multipath/disconnectVolume.sh | 71 ++ scripts/storage/multipath/resizeVolume.sh | 70 ++ .../main/java/com/cloud/api/ApiDBUtils.java | 12 +- .../ParamGenericValidationWorker.java | 4 +- .../java/com/cloud/server/StatsCollector.java | 6 +- .../com/cloud/storage/StorageManagerImpl.java | 44 +- .../cloud/storage/VolumeApiServiceImpl.java | 4 +- ui/public/locales/en.json | 4 + ui/src/views/infra/AddPrimaryStorage.vue | 89 +- 103 files changed, 10465 insertions(+), 153 deletions(-) create mode 100644 plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java create mode 100644 plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java create mode 100644 plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIPool.java create mode 100644 plugins/storage/volume/adaptive/README.md create mode 100644 plugins/storage/volume/adaptive/pom.xml create mode 100644 plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java create mode 100644 plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterConstants.java create mode 100644 plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterContext.java create mode 100644 plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterDataObject.java create mode 100644 plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterDiskOffering.java create mode 100644 plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterFactory.java create mode 100644 plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderSnapshot.java create mode 100644 plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolume.java create mode 100644 plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeNamer.java create mode 100644 plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeStats.java create mode 100644 plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeStorageStats.java create mode 100644 plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java create mode 100644 plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java create mode 100644 plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java create mode 100644 plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreProviderImpl.java create mode 100644 plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java create mode 100644 plugins/storage/volume/flasharray/pom.xml create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapterFactory.java create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayApiToken.java create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnection.java create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnectionHost.java create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnectionHostgroup.java create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayGroupMemberReference.java create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayGroupMemberReferenceList.java create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHostgroup.java create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayList.java create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayPod.java create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayTag.java create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayTagList.java create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolume.java create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumePod.java create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumeSource.java create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumeSpace.java create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/provider/FlashArrayPrimaryDatastoreProviderImpl.java create mode 100644 plugins/storage/volume/flasharray/src/main/resources/META-INF/cloudstack/storage-volume-flasharray/module.properties create mode 100644 plugins/storage/volume/flasharray/src/main/resources/META-INF/cloudstack/storage-volume-flasharray/spring-storage-volume-flasharray-context.xml create mode 100644 plugins/storage/volume/primera/pom.xml create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapterFactory.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpg.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgDiskPattern.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgLDLayout.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgPrivateSpaceMiB.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSAGrowth.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSAUsage.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSDGrowth.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSDUsage.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgUsrUsage.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostset.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraKey.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraTaskReference.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraTaskStatus.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVlun.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVlunList.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolume.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeAdminSpace.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCapacityEfficiency.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequest.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequestParameters.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeLink.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeLinkList.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePolicies.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePromoteRequest.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeRequest.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeRevertSnapshotRequest.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotRequest.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotRequestParameters.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotSpace.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeUpdateRequest.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeUserSpace.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/provider/PrimeraPrimaryDatastoreProviderImpl.java create mode 100644 plugins/storage/volume/primera/src/main/resources/META-INF/cloudstack/storage-volume-primera/module.properties create mode 100644 plugins/storage/volume/primera/src/main/resources/META-INF/cloudstack/storage-volume-primera/spring-storage-volume-primera-context.xml create mode 100644 scripts/storage/multipath/cleanStaleMaps.sh create mode 100644 scripts/storage/multipath/connectVolume.sh create mode 100644 scripts/storage/multipath/copyVolume.sh create mode 100644 scripts/storage/multipath/disconnectVolume.sh create mode 100644 scripts/storage/multipath/resizeVolume.sh diff --git a/api/src/main/java/com/cloud/storage/Storage.java b/api/src/main/java/com/cloud/storage/Storage.java index 1ee7200a313..8a2ec1a8905 100644 --- a/api/src/main/java/com/cloud/storage/Storage.java +++ b/api/src/main/java/com/cloud/storage/Storage.java @@ -77,13 +77,18 @@ public class Storage { } public static enum Capability { - HARDWARE_ACCELERATION("HARDWARE_ACCELERATION"); + HARDWARE_ACCELERATION("HARDWARE_ACCELERATION"), + ALLOW_MIGRATE_OTHER_POOLS("ALLOW_MIGRATE_OTHER_POOLS"); private final String capability; private Capability(String capability) { this.capability = capability; } + + public String toString() { + return this.capability; + } } public static enum ProvisioningType { @@ -150,7 +155,8 @@ public class Storage { ManagedNFS(true, false, false), Linstor(true, true, false), DatastoreCluster(true, true, false), // for VMware, to abstract pool of clusters - StorPool(true, true, true); + StorPool(true, true, true), + FiberChannel(true, true, false); // Fiber Channel Pool for KVM hypervisors is used to find the volume by WWN value (/dev/disk/by-id/wwn-) private final boolean shared; private final boolean overprovisioning; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java index 09ec5394921..7a907e0f76a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.api.command.admin.storage; import java.util.List; +import java.util.Map; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.log4j.Logger; @@ -32,6 +33,7 @@ import org.apache.cloudstack.api.response.StoragePoolResponse; import com.cloud.storage.StoragePool; import com.cloud.user.Account; +@SuppressWarnings("rawtypes") @APICommand(name = "updateStoragePool", description = "Updates a storage pool.", responseObject = StoragePoolResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateStoragePoolCmd extends BaseCmd { @@ -61,6 +63,20 @@ public class UpdateStoragePoolCmd extends BaseCmd { " enable it back.") private Boolean enabled; + @Parameter(name = ApiConstants.DETAILS, + type = CommandType.MAP, + required = false, + description = "the details for the storage pool", + since = "4.19.0") + private Map details; + + @Parameter(name = ApiConstants.URL, + type = CommandType.STRING, + required = false, + description = "the URL of the storage pool", + since = "4.19.0") + private String url; + @Parameter(name = ApiConstants.IS_TAG_A_RULE, type = CommandType.BOOLEAN, description = ApiConstants.PARAMETER_DESCRIPTION_IS_TAG_A_RULE) private Boolean isTagARule; @@ -115,6 +131,22 @@ public class UpdateStoragePoolCmd extends BaseCmd { return ApiCommandResourceType.StoragePool; } + public Map getDetails() { + return details; + } + + public void setDetails(Map details) { + this.details = details; + } + + public String getUrl() { + return url; + } + + public void setUrl(String url) { + this.url = url; + } + @Override public void execute() { StoragePool result = _storageService.updateStoragePool(this); diff --git a/client/pom.xml b/client/pom.xml index 0451e8e09e8..a7665e8e3e8 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -111,6 +111,16 @@ cloud-plugin-storage-volume-storpool ${project.version} + + org.apache.cloudstack + cloud-plugin-storage-volume-primera + ${project.version} + + + org.apache.cloudstack + cloud-plugin-storage-volume-flasharray + ${project.version} + org.apache.cloudstack cloud-server diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index b2eaaf7ea6e..3d107278eb7 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -2957,6 +2957,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac *
    *
  • If the current storage pool of the volume is not a managed storage, we do not need to validate anything here. *
  • If the current storage pool is a managed storage and the target storage pool ID is different from the current one, we throw an exception. + *
  • If the current storage pool is a managed storage and explicitly declared its capable of migration to alternate storage pools *
*/ protected void executeManagedStorageChecksWhenTargetStoragePoolProvided(StoragePoolVO currentPool, VolumeVO volume, StoragePoolVO targetPool) { @@ -2966,6 +2967,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (currentPool.getId() == targetPool.getId()) { return; } + + Map details = _storagePoolDao.getDetails(currentPool.getId()); + if (details != null && Boolean.parseBoolean(details.get(Storage.Capability.ALLOW_MIGRATE_OTHER_POOLS.toString()))) { + return; + } throw new CloudRuntimeException(String.format("Currently, a volume on managed storage can only be 'migrated' to itself " + "[volumeId=%s, currentStoragePoolId=%s, targetStoragePoolId=%s].", volume.getUuid(), currentPool.getUuid(), targetPool.getUuid())); } diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java index e450addb261..370753ed923 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java @@ -193,7 +193,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { destData.getType() == DataObjectType.TEMPLATE)) { // volume transfer from primary to secondary. Volume transfer between primary pools are already handled by copyVolumeBetweenPools // Delete cache in order to certainly transfer a latest image. - s_logger.debug("Delete " + cacheType + " cache(id: " + cacheId + + if (s_logger.isDebugEnabled()) s_logger.debug("Delete " + cacheType + " cache(id: " + cacheId + ", uuid: " + cacheUuid + ")"); cacheMgr.deleteCacheObject(srcForCopy); } else { @@ -205,7 +205,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { ", uuid: " + cacheUuid + ")"); cacheMgr.deleteCacheObject(srcForCopy); } else { - s_logger.debug("Decrease reference count of " + cacheType + + if (s_logger.isDebugEnabled()) s_logger.debug("Decrease reference count of " + cacheType + " cache(id: " + cacheId + ", uuid: " + cacheUuid + ")"); cacheMgr.releaseCacheObject(srcForCopy); } @@ -213,7 +213,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } return answer; } catch (Exception e) { - s_logger.debug("copy object failed: ", e); + if (s_logger.isDebugEnabled()) s_logger.debug("copy object failed: ", e); if (cacheData != null) { cacheMgr.deleteCacheObject(cacheData); } @@ -331,7 +331,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } return answer; } catch (Exception e) { - s_logger.debug("Failed to send to storage pool", e); + if (s_logger.isDebugEnabled()) s_logger.debug("Failed to send to storage pool", e); throw new CloudRuntimeException("Failed to send to storage pool", e); } } @@ -388,7 +388,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { if (answer == null || !answer.getResult()) { if (answer != null) { - s_logger.debug("copy to image store failed: " + answer.getDetails()); + if (s_logger.isDebugEnabled()) s_logger.debug("copy to image store failed: " + answer.getDetails()); } objOnImageStore.processEvent(Event.OperationFailed); imageStore.delete(objOnImageStore); @@ -411,7 +411,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { if (answer == null || !answer.getResult()) { if (answer != null) { - s_logger.debug("copy to primary store failed: " + answer.getDetails()); + if (s_logger.isDebugEnabled()) s_logger.debug("copy to primary store failed: " + answer.getDetails()); } objOnImageStore.processEvent(Event.OperationFailed); imageStore.delete(objOnImageStore); @@ -471,13 +471,17 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { s_logger.error(errMsg); answer = new Answer(command, false, errMsg); } else { + if (s_logger.isDebugEnabled()) s_logger.debug("Sending MIGRATE_COPY request to node " + ep); answer = ep.sendMessage(command); + if (s_logger.isDebugEnabled()) s_logger.debug("Received MIGRATE_COPY response from node with answer: " + answer); } if (answer == null || !answer.getResult()) { throw new CloudRuntimeException("Failed to migrate volume " + volume + " to storage pool " + destPool); } else { // Update the volume details after migration. + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE_COPY updating volume"); + VolumeVO volumeVo = volDao.findById(volume.getId()); Long oldPoolId = volume.getPoolId(); volumeVo.setPath(((MigrateVolumeAnswer)answer).getVolumePath()); @@ -496,6 +500,8 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } volumeVo.setFolder(folder); volDao.update(volume.getId(), volumeVo); + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE_COPY update volume data complete"); + } return answer; @@ -507,7 +513,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { Answer answer = null; String errMsg = null; try { - s_logger.debug("copyAsync inspecting src type " + srcData.getType().toString() + " copyAsync inspecting dest type " + destData.getType().toString()); + if (s_logger.isDebugEnabled()) s_logger.debug("copyAsync inspecting src type " + srcData.getType().toString() + " copyAsync inspecting dest type " + destData.getType().toString()); if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.VOLUME) { answer = copyVolumeFromSnapshot(srcData, destData); } else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.TEMPLATE) { @@ -516,11 +522,16 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { answer = cloneVolume(srcData, destData); } else if (destData.getType() == DataObjectType.VOLUME && srcData.getType() == DataObjectType.VOLUME && srcData.getDataStore().getRole() == DataStoreRole.Primary && destData.getDataStore().getRole() == DataStoreRole.Primary) { + if (s_logger.isDebugEnabled()) s_logger.debug("About to MIGRATE copy between datasources"); if (srcData.getId() == destData.getId()) { // The volume has to be migrated across storage pools. + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using migrateVolumeToPool STARTING"); answer = migrateVolumeToPool(srcData, destData); + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using migrateVolumeToPool DONE: " + answer.getResult()); } else { + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using copyVolumeBetweenPools STARTING"); answer = copyVolumeBetweenPools(srcData, destData); + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using copyVolumeBetweenPools DONE: " + answer.getResult()); } } else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.SNAPSHOT) { answer = copySnapshot(srcData, destData); @@ -532,7 +543,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { errMsg = answer.getDetails(); } } catch (Exception e) { - s_logger.debug("copy failed", e); + if (s_logger.isDebugEnabled()) s_logger.debug("copy failed", e); errMsg = e.toString(); } CopyCommandResult result = new CopyCommandResult(null, answer); @@ -627,7 +638,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } return answer; } catch (Exception e) { - s_logger.debug("copy snasphot failed: ", e); + if (s_logger.isDebugEnabled()) s_logger.debug("copy snasphot failed: ", e); if (cacheData != null) { cacheMgr.deleteCacheObject(cacheData); } diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 1419ae36d25..b24452336bd 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -106,6 +106,7 @@ import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.Storage.ProvisioningType; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; @@ -186,6 +187,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { private EndPointSelector selector; @Inject VMTemplatePoolDao templatePoolDao; + @Inject + private VolumeDataFactory _volFactory; @Override public StrategyPriority canHandle(DataObject srcData, DataObject destData) { @@ -400,15 +403,15 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } else if (!isVolumeOnManagedStorage(destVolumeInfo)) { handleVolumeMigrationFromManagedStorageToNonManagedStorage(srcVolumeInfo, destVolumeInfo, callback); } else { - String errMsg = "The source volume to migrate and the destination volume are both on managed storage. " + - "Migration in this case is not yet supported."; - - handleError(errMsg, callback); + handleVolumeMigrationFromManagedStorageToManagedStorage(srcVolumeInfo, destVolumeInfo, callback); } } else if (!isVolumeOnManagedStorage(destVolumeInfo)) { - String errMsg = "The 'StorageSystemDataMotionStrategy' does not support this migration use case."; - - handleError(errMsg, callback); + if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) { + String errMsg = String.format("Currently migrating volumes between managed storage providers is not supported on %s hypervisor", srcVolumeInfo.getHypervisorType().toString()); + handleError(errMsg, callback); + } else { + handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback); + } } else { handleVolumeMigrationFromNonManagedStorageToManagedStorage(srcVolumeInfo, destVolumeInfo, callback); } @@ -453,7 +456,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { String volumePath = null; try { - if (!ImageFormat.QCOW2.equals(srcVolumeInfo.getFormat())) { + if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) { throw new CloudRuntimeException("Currently, only the KVM hypervisor type is supported for the migration of a volume " + "from managed storage to non-managed storage."); } @@ -485,7 +488,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeCopyFromManagedStorageToSecondaryStorage': " + ex.getMessage(); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { CopyCmdAnswer copyCmdAnswer; @@ -512,12 +515,22 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } } + private void handleVolumeMigrationFromManagedStorageToManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, + AsyncCompletionCallback callback) { + if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) { + String errMsg = String.format("Currently migrating volumes between managed storage providers is not supported on %s hypervisor", srcVolumeInfo.getHypervisorType().toString()); + handleError(errMsg, callback); + } else { + handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback); + } + } + private void handleVolumeMigrationFromManagedStorageToNonManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, AsyncCompletionCallback callback) { String errMsg = null; try { - if (!ImageFormat.QCOW2.equals(srcVolumeInfo.getFormat())) { + if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) { throw new CloudRuntimeException("Currently, only the KVM hypervisor type is supported for the migration of a volume " + "from managed storage to non-managed storage."); } @@ -525,10 +538,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { HypervisorType hypervisorType = HypervisorType.KVM; VirtualMachine vm = srcVolumeInfo.getAttachedVM(); - if (vm != null && vm.getState() != VirtualMachine.State.Stopped) { - throw new CloudRuntimeException("Currently, if a volume to migrate from managed storage to non-managed storage is attached to " + - "a VM, the VM must be in the Stopped state."); - } + checkAvailableForMigration(vm); long destStoragePoolId = destVolumeInfo.getPoolId(); StoragePoolVO destStoragePoolVO = _storagePoolDao.findById(destStoragePoolId); @@ -553,7 +563,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromManagedStorageToNonManagedStorage': " + ex.getMessage(); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { CopyCmdAnswer copyCmdAnswer; @@ -579,9 +589,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { private void verifyFormatWithPoolType(ImageFormat imageFormat, StoragePoolType poolType) { if (imageFormat != ImageFormat.VHD && imageFormat != ImageFormat.OVA && imageFormat != ImageFormat.QCOW2 && - !(imageFormat == ImageFormat.RAW && StoragePoolType.PowerFlex == poolType)) { - throw new CloudRuntimeException("Only the following image types are currently supported: " + - ImageFormat.VHD.toString() + ", " + ImageFormat.OVA.toString() + ", " + ImageFormat.QCOW2.toString() + ", and " + ImageFormat.RAW.toString() + "(for PowerFlex)"); + !(imageFormat == ImageFormat.RAW && (StoragePoolType.PowerFlex == poolType || + StoragePoolType.FiberChannel == poolType))) { + throw new CloudRuntimeException(String.format("Only the following image types are currently supported: %s, %s, %s, %s (for PowerFlex and FiberChannel)", + ImageFormat.VHD.toString(), ImageFormat.OVA.toString(), ImageFormat.QCOW2.toString(), ImageFormat.RAW.toString())); } } @@ -685,14 +696,14 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { handleVolumeMigrationForXenServer(srcVolumeInfo, destVolumeInfo); } else { - handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo); + handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback); } } catch (Exception ex) { errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromNonManagedStorageToManagedStorage': " + ex.getMessage(); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { CopyCmdAnswer copyCmdAnswer; @@ -826,24 +837,73 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { _volumeDao.update(srcVolumeInfo.getId(), volumeVO); } - private void handleVolumeMigrationForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) { + private void handleVolumeMigrationForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, AsyncCompletionCallback callback) { VirtualMachine vm = srcVolumeInfo.getAttachedVM(); - if (vm != null && vm.getState() != VirtualMachine.State.Stopped) { - throw new CloudRuntimeException("Currently, if a volume to migrate from non-managed storage to managed storage on KVM is attached to " + - "a VM, the VM must be in the Stopped state."); + checkAvailableForMigration(vm); + + String errMsg = null; + try { + destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null); + VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId()); + updatePathFromScsiName(volumeVO); + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + HostVO hostVO = getHostOnWhichToExecuteMigrationCommand(srcVolumeInfo, destVolumeInfo); + + // migrate the volume via the hypervisor + String path = migrateVolumeForKVM(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage"); + + updateVolumePath(destVolumeInfo.getId(), path); + volumeVO = _volumeDao.findById(destVolumeInfo.getId()); + // only set this if it was not set. default to QCOW2 for KVM + if (volumeVO.getFormat() == null) { + volumeVO.setFormat(ImageFormat.QCOW2); + _volumeDao.update(volumeVO.getId(), volumeVO); + } + } catch (Exception ex) { + errMsg = "Primary storage migration failed due to an unexpected error: " + + ex.getMessage(); + if (ex instanceof CloudRuntimeException) { + throw ex; + } else { + throw new CloudRuntimeException(errMsg, ex); + } + } finally { + CopyCmdAnswer copyCmdAnswer; + if (errMsg != null) { + copyCmdAnswer = new CopyCmdAnswer(errMsg); + } + else { + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + DataTO dataTO = destVolumeInfo.getTO(); + copyCmdAnswer = new CopyCmdAnswer(dataTO); + } + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + result.setResult(errMsg); + callback.complete(result); } + } - destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null); + private void checkAvailableForMigration(VirtualMachine vm) { + if (vm != null && (vm.getState() != VirtualMachine.State.Stopped && vm.getState() != VirtualMachine.State.Migrating)) { + throw new CloudRuntimeException("Currently, if a volume to migrate from non-managed storage to managed storage on KVM is attached to " + + "a VM, the VM must be in the Stopped or Migrating state."); + } + } - VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId()); - - volumeVO.setPath(volumeVO.get_iScsiName()); - - _volumeDao.update(volumeVO.getId(), volumeVO); - - destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + /** + * Only update the path from the iscsiName if the iscsiName is set. Otherwise take no action to avoid nullifying the path + * with a previously set path value. + */ + private void updatePathFromScsiName(VolumeVO volumeVO) { + if (volumeVO.get_iScsiName() != null) { + volumeVO.setPath(volumeVO.get_iScsiName()); + _volumeDao.update(volumeVO.getId(), volumeVO); + } + } + private HostVO getHostOnWhichToExecuteMigrationCommand(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) { long srcStoragePoolId = srcVolumeInfo.getPoolId(); StoragePoolVO srcStoragePoolVO = _storagePoolDao.findById(srcStoragePoolId); @@ -856,14 +916,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { hostVO = getHost(destVolumeInfo.getDataCenterId(), HypervisorType.KVM, false); } - // migrate the volume via the hypervisor - migrateVolumeForKVM(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage"); - - volumeVO = _volumeDao.findById(destVolumeInfo.getId()); - - volumeVO.setFormat(ImageFormat.QCOW2); - - _volumeDao.update(volumeVO.getId(), volumeVO); + return hostVO; } /** @@ -1075,7 +1128,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { catch (Exception ex) { errMsg = ex.getMessage(); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { if (usingBackendSnapshot) { @@ -1293,7 +1346,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { catch (Exception ex) { errMsg = "Copy operation failed in 'StorageSystemDataMotionStrategy.handleCreateManagedVolumeFromNonManagedSnapshot': " + ex.getMessage(); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.NO_MIGRATION); @@ -1674,6 +1727,42 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { return copyCmdAnswer; } + /** + * Use normal volume semantics (create a volume known to cloudstack, ask the storage driver to create it as a copy of the snapshot) + + * @param volumeVO + * @param snapshotInfo + */ + public void prepTempVolumeForCopyFromSnapshot(SnapshotInfo snapshotInfo) { + VolumeVO volumeVO = null; + try { + volumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP", + snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, ""); + volumeVO.setPoolId(snapshotInfo.getDataStore().getId()); + _volumeDao.persist(volumeVO); + VolumeInfo tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); + + if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) { + snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null); + // refresh volume info as data could have changed + tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); + // save the "temp" volume info into the snapshot details (we need this to clean up at the end) + _snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID", tempVolumeInfo.getUuid(), true); + _snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyPath", tempVolumeInfo.getPath(), true); + // NOTE: for this to work, the Driver must return a custom SnapshotObjectTO object from getTO() + // whenever the TemporaryVolumeCopyPath is set. + } else { + throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so"); + } + } catch (Throwable e) { + // cleanup temporary volume + if (volumeVO != null) { + _volumeDao.remove(volumeVO.getId()); + } + throw e; + } + } + /** * If the underlying storage system is making use of read-only snapshots, this gives the storage system the opportunity to * create a volume from the snapshot so that we can copy the VHD file that should be inside of the snapshot to secondary storage. @@ -1685,8 +1774,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { * resign the SR and the VDI that should be inside of the snapshot before copying the VHD file to secondary storage. */ private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo) { - SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create"); + if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) { + prepTempVolumeForCopyFromSnapshot(snapshotInfo); + return; + } + + SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create"); try { snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); } @@ -1701,6 +1795,24 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { * invocation of createVolumeFromSnapshot(SnapshotInfo). */ private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) { + VolumeVO volumeVO = null; + // cleanup any temporary volume previously created for copy from a snapshot + if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) { + SnapshotDetailsVO tempUuid = null; + tempUuid = _snapshotDetailsDao.findDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID"); + if (tempUuid == null || tempUuid.getValue() == null) { + return; + } + + volumeVO = _volumeDao.findByUuid(tempUuid.getValue()); + if (volumeVO != null) { + _volumeDao.remove(volumeVO.getId()); + } + _snapshotDetailsDao.remove(tempUuid.getId()); + _snapshotDetailsDao.removeDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID"); + return; + } + SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete"); try { @@ -2363,7 +2475,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { try { StoragePoolVO storagePoolVO = _storagePoolDao.findById(volumeInfo.getPoolId()); - if (!ImageFormat.QCOW2.equals(volumeInfo.getFormat()) && !(ImageFormat.RAW.equals(volumeInfo.getFormat()) && StoragePoolType.PowerFlex == storagePoolVO.getPoolType())) { + if (!ImageFormat.QCOW2.equals(volumeInfo.getFormat()) && + !(ImageFormat.RAW.equals(volumeInfo.getFormat()) && ( + StoragePoolType.PowerFlex == storagePoolVO.getPoolType() || + StoragePoolType.FiberChannel == storagePoolVO.getPoolType()))) { throw new CloudRuntimeException("When using managed storage, you can only create a template from a volume on KVM currently."); } @@ -2506,7 +2621,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { long snapshotId = snapshotInfo.getId(); - if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex) { + // if the snapshot required a temporary volume be created check if the UUID is set so we can + // retrieve the temporary volume's path to use during remote copy + List storedDetails = _snapshotDetailsDao.findDetails(snapshotInfo.getId(), "TemporaryVolumeCopyPath"); + if (storedDetails != null && storedDetails.size() > 0) { + String value = storedDetails.get(0).getValue(); + snapshotDetails.put(DiskTO.PATH, value); + } else if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex || storagePoolVO.getPoolType() == StoragePoolType.FiberChannel) { snapshotDetails.put(DiskTO.IQN, snapshotInfo.getPath()); } else { snapshotDetails.put(DiskTO.IQN, getSnapshotProperty(snapshotId, DiskTO.IQN)); @@ -2718,8 +2839,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } private String migrateVolumeForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, HostVO hostVO, String errMsg) { - boolean srcVolumeDetached = srcVolumeInfo.getAttachedVM() == null; - try { Map srcDetails = getVolumeDetails(srcVolumeInfo); Map destDetails = getVolumeDetails(destVolumeInfo); @@ -2727,16 +2846,11 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { MigrateVolumeCommand migrateVolumeCommand = new MigrateVolumeCommand(srcVolumeInfo.getTO(), destVolumeInfo.getTO(), srcDetails, destDetails, StorageManager.KvmStorageOfflineMigrationWait.value()); - if (srcVolumeDetached) { - _volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); - } - + _volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); - _volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); MigrateVolumeAnswer migrateVolumeAnswer = (MigrateVolumeAnswer)agentManager.send(hostVO.getId(), migrateVolumeCommand); - if (migrateVolumeAnswer == null || !migrateVolumeAnswer.getResult()) { if (migrateVolumeAnswer != null && StringUtils.isNotEmpty(migrateVolumeAnswer.getDetails())) { throw new CloudRuntimeException(migrateVolumeAnswer.getDetails()); @@ -2745,42 +2859,22 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { throw new CloudRuntimeException(errMsg); } } - - if (srcVolumeDetached) { - _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); - } - - try { - _volumeService.revokeAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); - } - catch (Exception e) { - // This volume should be deleted soon, so just log a warning here. - LOGGER.warn(e.getMessage(), e); - } - return migrateVolumeAnswer.getVolumePath(); - } - catch (Exception ex) { + } catch (CloudRuntimeException ex) { + throw ex; + } catch (Exception ex) { + throw new CloudRuntimeException("Unexpected error during volume migration: " + ex.getMessage(), ex); + } finally { try { - _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); - } - catch (Exception e) { - // This volume should be deleted soon, so just log a warning here. - LOGGER.warn(e.getMessage(), e); - } - - if (srcVolumeDetached) { _volumeService.revokeAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); + _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.NO_MIGRATION); + } catch (Throwable e) { + LOGGER.warn("During cleanup post-migration and exception occured: " + e); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Exception during post-migration cleanup.", e); + } } - - String msg = "Failed to perform volume migration : "; - - LOGGER.warn(msg, ex); - - throw new CloudRuntimeException(msg + ex.getMessage(), ex); - } - finally { - handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.NO_MIGRATION); } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 47577cc52b2..c0ef227251c 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -882,9 +882,7 @@ public class VolumeServiceImpl implements VolumeService { */ private TemplateInfo createManagedTemplateVolume(TemplateInfo srcTemplateInfo, PrimaryDataStore destPrimaryDataStore) { // create a template volume on primary storage - AsyncCallFuture createTemplateFuture = new AsyncCallFuture<>(); TemplateInfo templateOnPrimary = (TemplateInfo)destPrimaryDataStore.create(srcTemplateInfo, srcTemplateInfo.getDeployAsIsConfiguration()); - VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), srcTemplateInfo.getDeployAsIsConfiguration()); if (templatePoolRef == null) { @@ -897,7 +895,6 @@ public class VolumeServiceImpl implements VolumeService { // At this point, we have an entry in the DB that points to our cached template. // We need to lock it as there may be other VMs that may get started using the same template. // We want to avoid having to create multiple cache copies of the same template. - int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600); long templatePoolRefId = templatePoolRef.getId(); @@ -909,28 +906,27 @@ public class VolumeServiceImpl implements VolumeService { try { // create a cache volume on the back-end - templateOnPrimary.processEvent(Event.CreateOnlyRequested); + CreateAsyncCompleteCallback callback = new CreateAsyncCompleteCallback(); - CreateVolumeContext createContext = new CreateVolumeContext<>(null, templateOnPrimary, createTemplateFuture); - AsyncCallbackDispatcher createCaller = AsyncCallbackDispatcher.create(this); - - createCaller.setCallback(createCaller.getTarget().createManagedTemplateImageCallback(null, null)).setContext(createContext); - - destPrimaryDataStore.getDriver().createAsync(destPrimaryDataStore, templateOnPrimary, createCaller); - - VolumeApiResult result = createTemplateFuture.get(); - - if (result.isFailed()) { - String errMesg = result.getResult(); - + destPrimaryDataStore.getDriver().createAsync(destPrimaryDataStore, templateOnPrimary, callback); + // validate we got a good result back + if (callback.result == null || callback.result.isFailed()) { + String errMesg; + if (callback.result == null) { + errMesg = "Unknown/unable to determine result"; + } else { + errMesg = callback.result.getResult(); + } + templateOnPrimary.processEvent(Event.OperationFailed); throw new CloudRuntimeException("Unable to create template " + templateOnPrimary.getId() + " on primary storage " + destPrimaryDataStore.getId() + ":" + errMesg); } + + templateOnPrimary.processEvent(Event.OperationSuccessed); + } catch (Throwable e) { s_logger.debug("Failed to create template volume on storage", e); - templateOnPrimary.processEvent(Event.OperationFailed); - throw new CloudRuntimeException(e.getMessage()); } finally { _tmpltPoolDao.releaseFromLockTable(templatePoolRefId); @@ -939,6 +935,17 @@ public class VolumeServiceImpl implements VolumeService { return templateOnPrimary; } + private static class CreateAsyncCompleteCallback implements AsyncCompletionCallback { + + public CreateCmdResult result; + + @Override + public void complete(CreateCmdResult result) { + this.result = result; + } + + } + /** * This function copies a template from secondary storage to a template volume * created on managed storage. This template volume will be used as a cache. @@ -1464,6 +1471,16 @@ public class VolumeServiceImpl implements VolumeService { if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { copyTemplateToManagedTemplateVolume(srcTemplateInfo, templateOnPrimary, templatePoolRef, destPrimaryDataStore, destHost); } + } catch (Exception e) { + if (templateOnPrimary != null) { + templateOnPrimary.processEvent(Event.OperationFailed); + } + VolumeApiResult result = new VolumeApiResult(volumeInfo); + result.setResult(e.getLocalizedMessage()); + result.setSuccess(false); + future.complete(result); + s_logger.warn("Failed to create template on primary storage", e); + return future; } finally { if (lock != null) { lock.unlock(); diff --git a/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java b/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java index 9363ebd2379..0306a062df9 100644 --- a/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java +++ b/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java @@ -61,7 +61,9 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP @Override public boolean isEnabled() { if (!roleService.isEnabled()) { - LOGGER.trace("RoleService is disabled. We will not use ProjectRoleBasedApiAccessChecker."); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("RoleService is disabled. We will not use ProjectRoleBasedApiAccessChecker."); + } } return roleService.isEnabled(); } @@ -119,7 +121,9 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP Account userAccount = accountService.getAccount(user.getAccountId()); if (accountService.isRootAdmin(userAccount.getId()) || accountService.isDomainAdmin(userAccount.getAccountId())) { - LOGGER.info(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", userAccount.getAccountName())); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", userAccount.getAccountName())); + } return true; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java index 5c893e5d12f..2a09c340891 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java @@ -279,6 +279,10 @@ public class LibvirtMigrateVolumeCommandWrapper extends CommandWrapper srcDetails = command.getSrcDetails(); String srcPath = srcDetails != null ? srcDetails.get(DiskTO.IQN) : srcVolumeObjectTO.getPath(); + // its possible a volume has details but is not using IQN addressing... + if (srcPath == null) { + srcPath = srcVolumeObjectTO.getPath(); + } VolumeObjectTO destVolumeObjectTO = (VolumeObjectTO)command.getDestData(); PrimaryDataStoreTO destPrimaryDataStore = (PrimaryDataStoreTO)destVolumeObjectTO.getDataStore(); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java index 36ff69d83af..4f1ad728b5d 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java @@ -50,6 +50,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; import com.cloud.hypervisor.kvm.storage.KVMStoragePool; import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.hypervisor.kvm.storage.MultipathSCSIPool; import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; import com.cloud.storage.Storage.StoragePoolType; @@ -84,6 +85,10 @@ public final class LibvirtResizeVolumeCommandWrapper extends CommandWrapper; connid= + String type = null; + String address = null; + String connectionId = null; + String path = null; + String[] parts = inPath.split(";"); + // handle initial code of wwn only + if (parts.length == 1) { + type = "FIBERWWN"; + address = parts[0]; + } else { + for (String part: parts) { + String[] pair = part.split("="); + if (pair.length == 2) { + String key = pair[0].trim(); + String value = pair[1].trim(); + if (key.equals("type")) { + type = value.toUpperCase(); + } else if (key.equals("address")) { + address = value; + } else if (key.equals("connid")) { + connectionId = value; + } + } + } + } + + if ("FIBERWWN".equals(type)) { + path = "/dev/mapper/3" + address; + } else { + throw new CloudRuntimeException("Invalid address type provided for target disk: " + type); + } + + return new AddressInfo(type, address, connectionId, path); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index dd31025d35f..1be4a8b6185 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -290,9 +290,12 @@ public class KVMStorageProcessor implements StorageProcessor { final TemplateObjectTO newTemplate = new TemplateObjectTO(); newTemplate.setPath(primaryVol.getName()); newTemplate.setSize(primaryVol.getSize()); - if (primaryPool.getType() == StoragePoolType.RBD || - primaryPool.getType() == StoragePoolType.PowerFlex || - primaryPool.getType() == StoragePoolType.Linstor) { + + if(List.of( + StoragePoolType.RBD, + StoragePoolType.PowerFlex, + StoragePoolType.Linstor, + StoragePoolType.FiberChannel).contains(primaryPool.getType())) { newTemplate.setFormat(ImageFormat.RAW); } else { newTemplate.setFormat(ImageFormat.QCOW2); @@ -584,7 +587,9 @@ public class KVMStorageProcessor implements StorageProcessor { public Answer createTemplateFromVolume(final CopyCommand cmd) { Map details = cmd.getOptions(); - if (details != null && details.get(DiskTO.IQN) != null) { + // handle cases where the managed storage driver had to make a temporary volume from + // the snapshot in order to support the copy + if (details != null && (details.get(DiskTO.IQN) != null || details.get(DiskTO.PATH) != null)) { // use the managed-storage approach return createTemplateFromVolumeOrSnapshot(cmd); } @@ -712,7 +717,7 @@ public class KVMStorageProcessor implements StorageProcessor { public Answer createTemplateFromSnapshot(CopyCommand cmd) { Map details = cmd.getOptions(); - if (details != null && details.get(DiskTO.IQN) != null) { + if (details != null && (details.get(DiskTO.IQN) != null || details.get(DiskTO.PATH) != null)) { // use the managed-storage approach return createTemplateFromVolumeOrSnapshot(cmd); } @@ -750,12 +755,15 @@ public class KVMStorageProcessor implements StorageProcessor { KVMStoragePool secondaryStorage = null; try { + // look for options indicating an overridden path or IQN. Used when snapshots have to be + // temporarily copied on the manaaged storage device before the actual copy to target object Map details = cmd.getOptions(); - - String path = details != null ? details.get(DiskTO.IQN) : null; - + String path = details != null ? details.get(DiskTO.PATH) : null; if (path == null) { - new CloudRuntimeException("The 'path' field must be specified."); + path = details != null ? details.get(DiskTO.IQN) : null; + if (path == null) { + new CloudRuntimeException("The 'path' or 'iqn' field must be specified."); + } } storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details); @@ -2188,7 +2196,16 @@ public class KVMStorageProcessor implements StorageProcessor { Map details = cmd.getOptions2(); - String path = details != null ? details.get(DiskTO.IQN) : null; + String path = cmd.getDestTO().getPath(); + if (path == null) { + path = details != null ? details.get(DiskTO.PATH) : null; + if (path == null) { + path = details != null ? details.get(DiskTO.IQN) : null; + if (path == null) { + new CloudRuntimeException("The 'path' or 'iqn' field must be specified."); + } + } + } storagePoolMgr.connectPhysicalDisk(pool.getPoolType(), pool.getUuid(), path, details); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java new file mode 100644 index 00000000000..06dea46a98d --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -0,0 +1,758 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.storage; + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Timer; +import java.util.TimerTask; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.log4j.Logger; + +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.utils.PropertiesUtil; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.OutputInterpreter; +import com.cloud.utils.script.Script; +import org.apache.commons.lang3.StringUtils; +import org.libvirt.LibvirtException; +import org.joda.time.Duration; + +public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { + static final Logger LOGGER = Logger.getLogger(MultipathSCSIAdapterBase.class); + static final Map MapStorageUuidToStoragePool = new HashMap<>(); + + /** + * A lock to avoid any possiblity of multiple requests for a scan + */ + static byte[] CLEANUP_LOCK = new byte[0]; + + /** + * Property keys and defaults + */ + static final Property CLEANUP_FREQUENCY_SECS = new Property("multimap.cleanup.frequency.secs", 60); + static final Property CLEANUP_TIMEOUT_SECS = new Property("multimap.cleanup.timeout.secs", 4); + static final Property CLEANUP_ENABLED = new Property("multimap.cleanup.enabled", true); + static final Property CLEANUP_SCRIPT = new Property("multimap.cleanup.script", "cleanStaleMaps.sh"); + static final Property CONNECT_SCRIPT = new Property("multimap.connect.script", "connectVolume.sh"); + static final Property COPY_SCRIPT = new Property("multimap.copy.script", "copyVolume.sh"); + static final Property DISCONNECT_SCRIPT = new Property("multimap.disconnect.script", "disconnectVolume.sh"); + static final Property RESIZE_SCRIPT = new Property("multimap.resize.script", "resizeVolume.sh"); + static final Property DISK_WAIT_SECS = new Property("multimap.disk.wait.secs", 240); + static final Property STORAGE_SCRIPTS_DIR = new Property("multimap.storage.scripts.dir", "scripts/storage/multipath"); + + static Timer cleanupTimer = new Timer(); + private static int cleanupTimeoutSecs = CLEANUP_TIMEOUT_SECS.getFinalValue(); + private static String connectScript = CONNECT_SCRIPT.getFinalValue(); + private static String disconnectScript = DISCONNECT_SCRIPT.getFinalValue(); + private static String cleanupScript = CLEANUP_SCRIPT.getFinalValue(); + private static String resizeScript = RESIZE_SCRIPT.getFinalValue(); + private static String copyScript = COPY_SCRIPT.getFinalValue(); + private static int diskWaitTimeSecs = DISK_WAIT_SECS.getFinalValue(); + + /** + * Initialize static program-wide configurations and background jobs + */ + static { + long cleanupFrequency = CLEANUP_FREQUENCY_SECS.getFinalValue() * 1000; + boolean cleanupEnabled = CLEANUP_ENABLED.getFinalValue(); + + + connectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), connectScript); + if (connectScript == null) { + throw new Error("Unable to find the connectVolume.sh script"); + } + + disconnectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), disconnectScript); + if (disconnectScript == null) { + throw new Error("Unable to find the disconnectVolume.sh script"); + } + + resizeScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), resizeScript); + if (resizeScript == null) { + throw new Error("Unable to find the resizeVolume.sh script"); + } + + copyScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), copyScript); + if (copyScript == null) { + throw new Error("Unable to find the copyVolume.sh script"); + } + + if (cleanupEnabled) { + cleanupScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), cleanupScript); + if (cleanupScript == null) { + throw new Error("Unable to find the cleanStaleMaps.sh script and " + CLEANUP_ENABLED.getName() + " is true"); + } + + TimerTask task = new TimerTask() { + @Override + public void run() { + try { + MultipathSCSIAdapterBase.cleanupStaleMaps(); + } catch (Throwable e) { + LOGGER.warn("Error running stale multipath map cleanup", e); + } + } + }; + + cleanupTimer = new Timer("MultipathMapCleanupJob"); + cleanupTimer.scheduleAtFixedRate(task, 0, cleanupFrequency); + } + } + + @Override + public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) { + return getStoragePool(uuid); + } + + public abstract String getName(); + + public abstract boolean isStoragePoolTypeSupported(Storage.StoragePoolType type); + + /** + * We expect WWN values in the volumePath so need to convert it to an actual physical path + */ + public abstract AddressInfo parseAndValidatePath(String path); + + @Override + public KVMPhysicalDisk getPhysicalDisk(String volumePath, KVMStoragePool pool) { + LOGGER.debug(String.format("getPhysicalDisk(volumePath,pool) called with args (%s,%s)", volumePath, pool)); + + if (StringUtils.isEmpty(volumePath) || pool == null) { + LOGGER.error("Unable to get physical disk, volume path or pool not specified"); + return null; + } + + AddressInfo address = parseAndValidatePath(volumePath); + return getPhysicalDisk(address, pool); + } + + private KVMPhysicalDisk getPhysicalDisk(AddressInfo address, KVMStoragePool pool) { + LOGGER.debug(String.format("getPhysicalDisk(addressInfo,pool) called with args (%s,%s)", address.getPath(), pool)); + KVMPhysicalDisk disk = new KVMPhysicalDisk(address.getPath(), address.toString(), pool); + disk.setFormat(QemuImg.PhysicalDiskFormat.RAW); + + long diskSize = getPhysicalDiskSize(address.getPath()); + disk.setSize(diskSize); + disk.setVirtualSize(diskSize); + LOGGER.debug("Physical disk " + disk.getPath() + " with format " + disk.getFormat() + " and size " + disk.getSize() + " provided"); + return disk; + } + + @Override + public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, Storage.StoragePoolType type, Map details) { + LOGGER.info(String.format("createStoragePool(uuid,host,port,path,type) called with args (%s, %s, %s, %s, %s)", uuid, host, ""+port, path, type)); + MultipathSCSIPool storagePool = new MultipathSCSIPool(uuid, host, port, path, type, details, this); + MapStorageUuidToStoragePool.put(uuid, storagePool); + return storagePool; + } + + @Override + public boolean deleteStoragePool(String uuid) { + return MapStorageUuidToStoragePool.remove(uuid) != null; + } + + @Override + public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map details) { + LOGGER.info("connectPhysicalDisk called for [" + volumePath + "]"); + + if (StringUtils.isEmpty(volumePath)) { + LOGGER.error("Unable to connect physical disk due to insufficient data - volume path is undefined"); + throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - volume path is underfined"); + } + + if (pool == null) { + LOGGER.error("Unable to connect physical disk due to insufficient data - pool is not set"); + throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - pool is not set"); + } + + AddressInfo address = this.parseAndValidatePath(volumePath); + int waitTimeInSec = diskWaitTimeSecs; + if (details != null && details.containsKey(StorageManager.STORAGE_POOL_DISK_WAIT.toString())) { + String waitTime = details.get(StorageManager.STORAGE_POOL_DISK_WAIT.toString()); + if (StringUtils.isNotEmpty(waitTime)) { + waitTimeInSec = Integer.valueOf(waitTime).intValue(); + } + } + return waitForDiskToBecomeAvailable(address, pool, waitTimeInSec); + } + + @Override + public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool) { + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) START", volumePath, pool.getUuid())); + AddressInfo address = this.parseAndValidatePath(volumePath); + ScriptResult result = runScript(disconnectScript, 60000L, address.getAddress().toLowerCase()); + if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult()); + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) COMPLETE [rc=%s]", volumePath, pool.getUuid(), result.getResult())); return true; + } + + @Override + public boolean disconnectPhysicalDisk(Map volumeToDisconnect) { + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumeToDisconnect) called with arg bag [not implemented]:") + " " + volumeToDisconnect); + return false; + } + + @Override + public boolean disconnectPhysicalDiskByPath(String localPath) { + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) STARTED", localPath)); + ScriptResult result = runScript(disconnectScript, 60000L, localPath.replace("/dev/mapper/3", "")); + if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult()); + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode())); return true; + } + + @Override + public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.ImageFormat format) { + LOGGER.info(String.format("deletePhysicalDisk(uuid,pool,format) called with args (%s, %s, %s) [not implemented]", uuid, pool.getUuid(), format.toString())); + return true; + } + + @Override + public KVMPhysicalDisk createTemplateFromDisk(KVMPhysicalDisk disk, String name, QemuImg.PhysicalDiskFormat format, long size, KVMStoragePool destPool) { + LOGGER.info(String.format("createTemplateFromDisk(disk,name,format,size,destPool) called with args (%s, %s, %s, %s, %s) [not implemented]", disk.getPath(), name, format.toString(), ""+size, destPool.getUuid())); + return null; + } + + @Override + public List listPhysicalDisks(String storagePoolUuid, KVMStoragePool pool) { + LOGGER.info(String.format("listPhysicalDisks(uuid,pool) called with args (%s, %s) [not implemented]", storagePoolUuid, pool.getUuid())); + return null; + } + + @Override + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) { + return copyPhysicalDisk(disk, name, destPool, timeout, null, null, null); + } + + @Override + public boolean refresh(KVMStoragePool pool) { + LOGGER.info(String.format("refresh(pool) called with args (%s)", pool.getUuid())); + return true; + } + + @Override + public boolean deleteStoragePool(KVMStoragePool pool) { + LOGGER.info(String.format("deleteStroagePool(pool) called with args (%s)", pool.getUuid())); + return deleteStoragePool(pool.getUuid()); + } + + @Override + public boolean createFolder(String uuid, String path) { + LOGGER.info(String.format("createFolder(uuid,path) called with args (%s, %s) [not implemented]", uuid, path)); + return createFolder(uuid, path, null); + } + + @Override + public boolean createFolder(String uuid, String path, String localPath) { + LOGGER.info(String.format("createFolder(uuid,path,localPath) called with args (%s, %s, %s) [not implemented]", uuid, path, localPath)); + return true; + } + + /** + * Validate inputs and return the source file for a template copy + * @param templateFilePath + * @param destTemplatePath + * @param destPool + * @param format + * @return + */ + File createTemplateFromDirectDownloadFileValidate(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format) { + if (StringUtils.isAnyEmpty(templateFilePath, destTemplatePath) || destPool == null) { + LOGGER.error("Unable to create template from direct download template file due to insufficient data"); + throw new CloudRuntimeException("Unable to create template from direct download template file due to insufficient data"); + } + + LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString()); + + File sourceFile = new File(templateFilePath); + if (!sourceFile.exists()) { + throw new CloudRuntimeException("Direct download template file " + templateFilePath + " does not exist on this host"); + } + + if (destTemplatePath == null || destTemplatePath.isEmpty()) { + LOGGER.error("Failed to create template, target template disk path not provided"); + throw new CloudRuntimeException("Target template disk path not provided"); + } + + if (this.isStoragePoolTypeSupported(destPool.getType())) { + throw new CloudRuntimeException("Unsupported storage pool type: " + destPool.getType().toString()); + } + + if (Storage.ImageFormat.RAW.equals(format) && Storage.ImageFormat.QCOW2.equals(format)) { + LOGGER.error("Failed to create template, unsupported template format: " + format.toString()); + throw new CloudRuntimeException("Unsupported template format: " + format.toString()); + } + return sourceFile; + } + + String extractSourceTemplateIfNeeded(File sourceFile, String templateFilePath) { + String srcTemplateFilePath = templateFilePath; + if (isTemplateExtractable(templateFilePath)) { + srcTemplateFilePath = sourceFile.getParent() + "/" + UUID.randomUUID().toString(); + LOGGER.debug("Extract the downloaded template " + templateFilePath + " to " + srcTemplateFilePath); + String extractCommand = getExtractCommandForDownloadedFile(templateFilePath, srcTemplateFilePath); + Script.runSimpleBashScript(extractCommand); + Script.runSimpleBashScript("rm -f " + templateFilePath); + } + return srcTemplateFilePath; + } + + QemuImg.PhysicalDiskFormat deriveImgFileFormat(Storage.ImageFormat format) { + if (format == Storage.ImageFormat.RAW) { + return QemuImg.PhysicalDiskFormat.RAW; + } else if (format == Storage.ImageFormat.QCOW2) { + return QemuImg.PhysicalDiskFormat.QCOW2; + } else { + return QemuImg.PhysicalDiskFormat.RAW; + } + } + + @Override + public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { + File sourceFile = createTemplateFromDirectDownloadFileValidate(templateFilePath, destTemplatePath, destPool, format); + LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString()); + KVMPhysicalDisk sourceDisk = destPool.getPhysicalDisk(sourceFile.getAbsolutePath()); + return copyPhysicalDisk(sourceDisk, destTemplatePath, destPool, timeout, null, null, Storage.ProvisioningType.THIN); + } + + @Override + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout, + byte[] srcPassphrase, byte[] dstPassphrase, Storage.ProvisioningType provisioningType) { + + validateForDiskCopy(disk, name, destPool); + LOGGER.info("Copying FROM source physical disk " + disk.getPath() + ", size: " + disk.getSize() + ", virtualsize: " + disk.getVirtualSize()+ ", format: " + disk.getFormat()); + + KVMPhysicalDisk destDisk = destPool.getPhysicalDisk(name); + if (destDisk == null) { + LOGGER.error("Failed to find the disk: " + name + " of the storage pool: " + destPool.getUuid()); + throw new CloudRuntimeException("Failed to find the disk: " + name + " of the storage pool: " + destPool.getUuid()); + } + + if (srcPassphrase != null || dstPassphrase != null) { + throw new CloudRuntimeException("Storage provider does not support user-space encrypted source or destination volumes"); + } + + destDisk.setFormat(QemuImg.PhysicalDiskFormat.RAW); + destDisk.setVirtualSize(disk.getVirtualSize()); + destDisk.setSize(disk.getSize()); + + LOGGER.info("Copying TO destination physical disk " + destDisk.getPath() + ", size: " + destDisk.getSize() + ", virtualsize: " + destDisk.getVirtualSize()+ ", format: " + destDisk.getFormat()); + QemuImgFile srcFile = new QemuImgFile(disk.getPath(), disk.getFormat()); + QemuImgFile destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); + LOGGER.debug("Starting COPY from source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath()); + ScriptResult result = runScript(copyScript, timeout, destDisk.getFormat().toString().toLowerCase(), srcFile.getFileName(), destFile.getFileName()); + int rc = result.getExitCode(); + if (rc != 0) { + throw new CloudRuntimeException("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + rc + " - " + result.getResult()); + } + LOGGER.debug("Successfully converted source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath() + " " + result.getResult()); + + return destDisk; + } + + void validateForDiskCopy(KVMPhysicalDisk disk, String name, KVMStoragePool destPool) { + if (StringUtils.isEmpty(name) || disk == null || destPool == null) { + LOGGER.error("Unable to copy physical disk due to insufficient data"); + throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data"); + } + } + + /** + * Copy a disk path to another disk path using QemuImg command + * @param disk + * @param destDisk + * @param name + * @param timeout + */ + void qemuCopy(KVMPhysicalDisk disk, KVMPhysicalDisk destDisk, String name, int timeout) { + QemuImg qemu; + try { + qemu = new QemuImg(timeout); + } catch (LibvirtException | QemuImgException e) { + throw new CloudRuntimeException (e); + } + QemuImgFile srcFile = null; + QemuImgFile destFile = null; + + try { + srcFile = new QemuImgFile(disk.getPath(), disk.getFormat()); + destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); + + LOGGER.debug("Starting copy from source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath()); + qemu.convert(srcFile, destFile, true); + LOGGER.debug("Successfully converted source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath()); + } catch (QemuImgException | LibvirtException e) { + try { + Map srcInfo = qemu.info(srcFile); + LOGGER.debug("Source disk info: " + Arrays.asList(srcInfo)); + } catch (Exception ignored) { + LOGGER.warn("Unable to get info from source disk: " + disk.getName()); + } + + String errMsg = String.format("Unable to convert/copy from %s to %s, due to: %s", disk.getName(), name, ((StringUtils.isEmpty(e.getMessage())) ? "an unknown error" : e.getMessage())); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg, e); + } + } + + @Override + public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, + String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, + KVMStoragePool destPool, int timeout, byte[] passphrase) { + throw new UnsupportedOperationException("Unimplemented method 'createDiskFromTemplate'"); + } + + @Override + public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, + String name, PhysicalDiskFormat format, long size, + KVMStoragePool destPool, int timeout, byte[] passphrase) { + throw new UnsupportedOperationException("Unimplemented method 'createDiskFromTemplateBacking'"); + } + + @Override + public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, + PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { + throw new UnsupportedOperationException("Unimplemented method 'createPhysicalDisk'"); + } + + boolean isTemplateExtractable(String templatePath) { + ScriptResult result = runScript("file", 5000L, templatePath, "| awk -F' ' '{print $2}'"); + String type = result.getResult(); + return type.equalsIgnoreCase("bzip2") || type.equalsIgnoreCase("gzip") || type.equalsIgnoreCase("zip"); + } + + String getExtractCommandForDownloadedFile(String downloadedTemplateFile, String templateFile) { + if (downloadedTemplateFile.endsWith(".zip")) { + return "unzip -p " + downloadedTemplateFile + " | cat > " + templateFile; + } else if (downloadedTemplateFile.endsWith(".bz2")) { + return "bunzip2 -c " + downloadedTemplateFile + " > " + templateFile; + } else if (downloadedTemplateFile.endsWith(".gz")) { + return "gunzip -c " + downloadedTemplateFile + " > " + templateFile; + } else { + throw new CloudRuntimeException("Unable to extract template " + downloadedTemplateFile); + } + } + + private static final ScriptResult runScript(String script, long timeout, String...args) { + ScriptResult result = new ScriptResult(); + Script cmd = new Script(script, Duration.millis(timeout), LOGGER); + cmd.add(args); + OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); + String output = cmd.execute(parser); + // its possible the process never launches which causes an NPE on getExitValue below + if (output != null && output.contains("Unable to execute the command")) { + result.setResult(output); + result.setExitCode(-1); + return result; + } + result.setResult(output); + result.setExitCode(cmd.getExitValue()); + return result; + } + + boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) { + LOGGER.debug("Waiting for the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid() + " to become available for " + waitTimeInSec + " secs"); + long scriptTimeoutSecs = 30; // how long to wait for each script execution to run + long maxTries = 10; // how many max retries to attempt the script + long waitTimeInMillis = waitTimeInSec * 1000; // how long overall to wait + int timeBetweenTries = 1000; // how long to sleep between tries + // wait at least 60 seconds even if input was lower + if (waitTimeInSec < 60) { + waitTimeInSec = 60; + } + KVMPhysicalDisk physicalDisk = null; + + // Rescan before checking for the physical disk + int tries = 0; + while (waitTimeInMillis > 0 && tries < maxTries) { + tries++; + long start = System.currentTimeMillis(); + String lun; + if (address.getConnectionId() == null) { + lun = "-"; + } else { + lun = address.getConnectionId(); + } + + Process p = null; + try { + ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress()); + p = builder.start(); + if (p.waitFor(scriptTimeoutSecs, TimeUnit.SECONDS)) { + int rc = p.exitValue(); + StringBuffer output = new StringBuffer(); + if (rc == 0) { + BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream())); + String line = null; + while ((line = input.readLine()) != null) { + output.append(line); + output.append(" "); + } + + physicalDisk = getPhysicalDisk(address, pool); + if (physicalDisk != null && physicalDisk.getSize() > 0) { + LOGGER.debug("Found the volume using id: " + address.getPath() + " of the storage pool: " + pool.getUuid()); + return true; + } + + break; + } else { + LOGGER.warn("Failure discovering LUN via " + connectScript); + BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream())); + String line = null; + while ((line = error.readLine()) != null) { + LOGGER.warn("error --> " + line); + } + } + } else { + LOGGER.debug("Timeout waiting for " + connectScript + " to complete - try " + tries); + } + } catch (IOException | InterruptedException | IllegalThreadStateException e) { + LOGGER.warn("Problem performing scan on SCSI hosts - try " + tries, e); + } finally { + if (p != null && p.isAlive()) { + p.destroyForcibly(); + } + } + + long elapsed = System.currentTimeMillis() - start; + waitTimeInMillis = waitTimeInMillis - elapsed; + + try { + Thread.sleep(timeBetweenTries); + } catch (Exception ex) { + // don't do anything + } + } + + LOGGER.debug("Unable to find the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid()); + return false; + } + + void runConnectScript(String lun, AddressInfo address) { + try { + ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress()); + Process p = builder.start(); + int rc = p.waitFor(); + StringBuffer output = new StringBuffer(); + if (rc == 0) { + BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream())); + String line = null; + while ((line = input.readLine()) != null) { + output.append(line); + output.append(" "); + } + } else { + LOGGER.warn("Failure discovering LUN via " + connectScript); + BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream())); + String line = null; + while ((line = error.readLine()) != null) { + LOGGER.warn("error --> " + line); + } + } + } catch (IOException | InterruptedException e) { + throw new CloudRuntimeException("Problem performing scan on SCSI hosts", e); + } + } + + void sleep(long sleepTimeMs) { + try { + Thread.sleep(sleepTimeMs); + } catch (Exception ex) { + // don't do anything + } + } + + long getPhysicalDiskSize(String diskPath) { + if (StringUtils.isEmpty(diskPath)) { + return 0; + } + + Script diskCmd = new Script("blockdev", LOGGER); + diskCmd.add("--getsize64", diskPath); + + OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); + String result = diskCmd.execute(parser); + + if (result != null) { + LOGGER.debug("Unable to get the disk size at path: " + diskPath); + return 0; + } + + Long size = Long.parseLong(parser.getLine()); + + if (size <= 0) { + // its possible the path can't be seen on the host yet, lets rescan + // now rerun the command + parser = new OutputInterpreter.OneLineParser(); + result = diskCmd.execute(parser); + + if (result != null) { + LOGGER.debug("Unable to get the disk size at path: " + diskPath); + return 0; + } + + size = Long.parseLong(parser.getLine()); + } + + return size; + } + + public void resize(String path, String vmName, long newSize) { + if (LOGGER.isDebugEnabled()) LOGGER.debug("Executing resize of " + path + " to " + newSize + " bytes for VM " + vmName); + + // extract wwid + AddressInfo address = parseAndValidatePath(path); + if (address == null || address.getAddress() == null) { + LOGGER.error("Unable to resize volume, address value is not valid"); + throw new CloudRuntimeException("Unable to resize volume, address value is not valid"); + } + + if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("Running %s %s %s %s", resizeScript, address.getAddress(), vmName, newSize)); + + // call resizeVolume.sh + ScriptResult result = runScript(resizeScript, 60000L, address.getAddress(), vmName, ""+newSize); + + if (result.getExitCode() != 0) { + throw new CloudRuntimeException("Failed to resize volume at address " + address.getAddress() + " to " + newSize + " bytes for VM " + vmName + ": " + result.getResult()); + } + + LOGGER.info("Resize of volume at address " + address.getAddress() + " completed successfully: " + result.getResult()); + } + + static void cleanupStaleMaps() { + synchronized(CLEANUP_LOCK) { + long start = System.currentTimeMillis(); + ScriptResult result = runScript(cleanupScript, cleanupTimeoutSecs * 1000); + LOGGER.debug("Multipath Cleanup Job elapsed time (ms): "+ (System.currentTimeMillis() - start) + "; result: " + result.getExitCode(), null); + } + } + + public static final class AddressInfo { + String type; + String address; + String connectionId; + String path; + + public AddressInfo(String type, String address, String connectionId, String path) { + this.type = type; + this.address = address; + this.connectionId = connectionId; + this.path = path; + } + + public String getType() { + return type; + } + + public String getAddress() { + return address; + } + + public String getConnectionId() { + return connectionId; + } + + public String getPath() { + return path; + } + + public String toString() { + return String.format("type=%s; address=%s; connid=%s", getType(), getAddress(), getConnectionId()); + } + } + + public static class Property { + private String name; + private T defaultValue; + + Property(String name, T value) { + this.name = name; + this.defaultValue = value; + } + + public String getName() { + return this.name; + } + + public T getDefaultValue() { + return this.defaultValue; + } + + public T getFinalValue() { + File agentPropertiesFile = PropertiesUtil.findConfigFile("agent.properties"); + if (agentPropertiesFile == null) { + LOGGER.debug(String.format("File [%s] was not found, we will use default defined values. Property [%s]: [%s].", "agent.properties", name, defaultValue)); + return defaultValue; + } else { + try { + String configValue = PropertiesUtil.loadFromFile(agentPropertiesFile).getProperty(name); + if (StringUtils.isBlank(configValue)) { + LOGGER.debug(String.format("Property [%s] has empty or null value. Using default value [%s].", name, defaultValue)); + return defaultValue; + } else { + if (defaultValue instanceof Integer) { + return (T)Integer.getInteger(configValue); + } else if (defaultValue instanceof Long) { + return (T)Long.getLong(configValue); + } else if (defaultValue instanceof String) { + return (T)configValue; + } else if (defaultValue instanceof Boolean) { + return (T)Boolean.valueOf(configValue); + } else { + return null; + } + } + } catch (IOException var5) { + LOGGER.debug(String.format("Failed to get property [%s]. Using default value [%s].", name, defaultValue), var5); + return defaultValue; + } + } + } + } + + public static class ScriptResult { + private int exitCode = -1; + private String result = null; + public int getExitCode() { + return exitCode; + } + public void setExitCode(int exitCode) { + this.exitCode = exitCode; + } + public String getResult() { + return result; + } + public void setResult(String result) { + this.result = result; + } + } + +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIPool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIPool.java new file mode 100644 index 00000000000..bc2f072f719 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIPool.java @@ -0,0 +1,241 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.storage; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.joda.time.Duration; + +import com.cloud.agent.api.to.HostTO; +import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool; +import com.cloud.storage.Storage; +import com.cloud.storage.Storage.ProvisioningType; + +public class MultipathSCSIPool implements KVMStoragePool { + private String uuid; + private String sourceHost; + private int sourcePort; + private String sourceDir; + private Storage.StoragePoolType storagePoolType; + private StorageAdaptor storageAdaptor; + private long capacity; + private long used; + private long available; + private Map details; + + public MultipathSCSIPool(String uuid, String host, int port, String path, Storage.StoragePoolType poolType, Map poolDetails, StorageAdaptor adaptor) { + this.uuid = uuid; + sourceHost = host; + sourcePort = port; + sourceDir = path; + storagePoolType = poolType; + storageAdaptor = adaptor; + capacity = 0; + used = 0; + available = 0; + details = poolDetails; + } + + public MultipathSCSIPool(String uuid, StorageAdaptor adapter) { + this.uuid = uuid; + sourceHost = null; + sourcePort = -1; + sourceDir = null; + storagePoolType = Storage.StoragePoolType.FiberChannel; + details = new HashMap(); + this.storageAdaptor = adapter; + } + + @Override + public KVMPhysicalDisk createPhysicalDisk(String arg0, ProvisioningType arg1, long arg2, byte[] arg3) { + return null; + } + + @Override + public KVMPhysicalDisk createPhysicalDisk(String arg0, PhysicalDiskFormat arg1, ProvisioningType arg2, long arg3, + byte[] arg4) { + return null; + } + + @Override + public boolean connectPhysicalDisk(String volumeUuid, Map details) { + return storageAdaptor.connectPhysicalDisk(volumeUuid, this, details); + } + + @Override + public KVMPhysicalDisk getPhysicalDisk(String volumeId) { + return storageAdaptor.getPhysicalDisk(volumeId, this); + } + + @Override + public boolean disconnectPhysicalDisk(String volumeUuid) { + return storageAdaptor.disconnectPhysicalDisk(volumeUuid, this); + } + + @Override + public boolean deletePhysicalDisk(String volumeUuid, Storage.ImageFormat format) { + return true; + } + + @Override + public List listPhysicalDisks() { + return null; + } + + @Override + public String getUuid() { + return uuid; + } + + public void setCapacity(long capacity) { + this.capacity = capacity; + } + + @Override + public long getCapacity() { + return this.capacity; + } + + public void setUsed(long used) { + this.used = used; + } + + @Override + public long getUsed() { + return this.used; + } + + public void setAvailable(long available) { + this.available = available; + } + + @Override + public long getAvailable() { + return this.available; + } + + @Override + public boolean refresh() { + return false; + } + + @Override + public boolean isExternalSnapshot() { + return true; + } + + @Override + public String getLocalPath() { + return null; + } + + @Override + public String getSourceHost() { + return this.sourceHost; + } + + @Override + public String getSourceDir() { + return this.sourceDir; + } + + @Override + public int getSourcePort() { + return this.sourcePort; + } + + @Override + public String getAuthUserName() { + return null; + } + + @Override + public String getAuthSecret() { + return null; + } + + @Override + public Storage.StoragePoolType getType() { + return storagePoolType; + } + + @Override + public boolean delete() { + return false; + } + + @Override + public QemuImg.PhysicalDiskFormat getDefaultFormat() { + return QemuImg.PhysicalDiskFormat.RAW; + } + + @Override + public boolean createFolder(String path) { + return false; + } + + @Override + public boolean supportsConfigDriveIso() { + return false; + } + + @Override + public Map getDetails() { + return this.details; + } + + @Override + public boolean isPoolSupportHA() { + return false; + } + + @Override + public String getHearthBeatPath() { + return null; + } + + @Override + public String createHeartBeatCommand(HAStoragePool primaryStoragePool, String hostPrivateIp, + boolean hostValidation) { + return null; + } + + @Override + public String getStorageNodeId() { + return null; + } + + @Override + public Boolean checkingHeartBeat(HAStoragePool pool, HostTO host) { + return null; + } + + @Override + public Boolean vmActivityCheck(HAStoragePool pool, HostTO host, Duration activityScriptTimeout, + String volumeUUIDListString, String vmActivityCheckPath, long duration) { + return null; + } + + public void resize(String path, String vmName, long newSize) { + ((MultipathSCSIAdapterBase)storageAdaptor).resize(path, vmName, newSize); + } +} diff --git a/plugins/pom.xml b/plugins/pom.xml index 6c4d561f896..2edbbd5ee1d 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -133,6 +133,9 @@ storage/volume/scaleio storage/volume/linstor storage/volume/storpool + storage/volume/adaptive + storage/volume/flasharray + storage/volume/primera storage/object/minio storage/object/simulator diff --git a/plugins/storage/volume/adaptive/README.md b/plugins/storage/volume/adaptive/README.md new file mode 100644 index 00000000000..041f1f1a128 --- /dev/null +++ b/plugins/storage/volume/adaptive/README.md @@ -0,0 +1,58 @@ +# CloudStack Volume Provider Adaptive Plugin Base + +The Adaptive Plugin Base is an abstract volume storage provider that +provides a generic implementation for managing volumes that are exposed +to hosts through FiberChannel and similar methods but managed independently +through a storage API or interface. The ProviderAdapter, and associated +classes, provide a decoupled interface from the rest of +Cloudstack that covers the exact actions needed +to interface with a storage provider. Each storage provider can extend +and implement the ProviderAdapter without needing to understand the internal +logic of volume management, database structure, etc. + +## Implement the Provider Interface +To implement a provider, create another module -- or a standalone project -- +and implement the following interfaces from the **org.apache.cloudstack.storage.datastore.adapter** package: + +1. **ProviderAdapter** - this is the primary interface used to communicate with the storage provider when volume management actions are required. +2. **ProviderAdapterFactory** - the implementation of this class creates the correct ProviderAdapter when needed. + +Follow Javadoc for each class on further instructions for implementing each function. + +## Implement the Primary Datastore Provider Plugin +Once the provider interface is implemented, you will need to extend the **org.apache.cloudstack.storage.datastore.provider.AdaptiveProviderDatastoreProviderImpl** class. When extending it, you simply need to implement a default +constructor that creates an instance of the ProviderAdapterFactory implementation created in #2 above. Once created, you need to call the parent constructor and pass the factory object. + +## Provide the Configuration for the Provider Plugin +Lastly, you need to include a module file and Spring configuration for your Primary Datastore Provider Plugin class so Cloudstack will load it during startup. + +### Module Properties +This provides the hint to Cloudstack to load this as a module during startup. +``` +#resources/META-INF/cloudstack/storage-volume-/module.properties +name=storage-volume- +parent=storage +``` +### Spring Bean Context Configuration +This provides instructions of which provider implementation class to load when the Spring bean initilization is running. +``` + + + + + + +``` +## Build and Deploy the Jar +Once you build the new jar, start Cloudstack Management Server or, if a standalone jar, add it to the classpath before start. You should now have a new storage provider of the designated name once Cloudstack finishes loading +all configured modules. diff --git a/plugins/storage/volume/adaptive/pom.xml b/plugins/storage/volume/adaptive/pom.xml new file mode 100644 index 00000000000..a8ef6337a0c --- /dev/null +++ b/plugins/storage/volume/adaptive/pom.xml @@ -0,0 +1,62 @@ + + + 4.0.0 + cloud-plugin-storage-volume-adaptive + Apache CloudStack Plugin - Storage Volume Adaptive Base Provider + + org.apache.cloudstack + cloudstack-plugins + 4.19.0.0-SNAPSHOT + ../../../pom.xml + + + + org.apache.cloudstack + cloud-engine-storage-volume + ${project.version} + + + org.apache.cloudstack + cloud-engine-storage-snapshot + ${project.version} + + + org.apache.cloudstack + cloud-plugin-storage-volume-default + ${project.version} + + + + + + maven-surefire-plugin + + + integration-test + + test + + + + + + + diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java new file mode 100644 index 00000000000..0cd44cd04c2 --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java @@ -0,0 +1,157 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +import java.util.Map; + +/** + * A simple DataStore adaptive interface. This interface allows the ManagedVolumeDataStoreDriverImpl + * to interact with the external provider without the provider needing to interface with any CloudStack + * objects, factories or database tables, simplifying the implementation and maintenance of the provider + * interface. + */ +public interface ProviderAdapter { + // some common keys across providers. Provider code determines what to do with it + public static final String API_USERNAME_KEY = "api_username"; + public static final String API_PASSWORD_KEY = "api_password"; + public static final String API_TOKEN_KEY = "api_token"; + public static final String API_PRIVATE_KEY = "api_privatekey"; + public static final String API_URL_KEY = "api_url"; + public static final String API_SKIP_TLS_VALIDATION_KEY = "api_skiptlsvalidation"; + // one of: basicauth (default), apitoken, privatekey + public static final String API_AUTHENTICATION_TYPE_KEY = "api_authn_type"; + + /** + * Refresh the connector with the provided details + * @param details + */ + public void refresh(Map details); + + /** + * Return if currently connected/configured properly, otherwise throws a RuntimeException + * with information about what is misconfigured + * @return + */ + public void validate(); + + /** + * Forcefully remove/disconnect + */ + public void disconnect(); + + /** + * Create a new volume on the storage provider + * @param context + * @param volume + * @param diskOffering + * @param sizeInBytes + * @return + */ + public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject volume, ProviderAdapterDiskOffering diskOffering, long sizeInBytes); + + /** + * Attach the volume to the target object for the provided context. Returns the scope-specific connection value (for example, the LUN) + * @param context + * @param request + * @return + */ + public String attach(ProviderAdapterContext context, ProviderAdapterDataObject request); + + /** + * Detach the host from the storage context + * @param context + * @param request + */ + public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request); + + /** + * Delete the provided volume/object + * @param context + * @param request + */ + public void delete(ProviderAdapterContext context, ProviderAdapterDataObject request); + + /** + * Copy a source object to a destination volume. The source object can be a Volume, Snapshot, or Template + */ + public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceVolume, ProviderAdapterDataObject targetVolume); + + /** + * Make a device-specific snapshot of the provided volume + */ + public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceVolume, ProviderAdapterDataObject targetSnapshot); + + /** + * Revert the snapshot to its base volume. Replaces the base volume with the snapshot point on the storage array + * @param context + * @param request + * @return + */ + public ProviderVolume revert(ProviderAdapterContext context, ProviderAdapterDataObject request); + + /** + * Resize a volume + * @param context + * @param request + * @param totalNewSizeInBytes + */ + public void resize(ProviderAdapterContext context, ProviderAdapterDataObject request, long totalNewSizeInBytes); + + /** + * Return the managed volume info from storage system. + * @param context + * @param request + * @return ProviderVolume object or null if the object was not found but no errors were encountered. + */ + public ProviderVolume getVolume(ProviderAdapterContext context, ProviderAdapterDataObject request); + + /** + * Return the managed snapshot info from storage system + * @param context + * @param request + * @return ProviderSnapshot object or null if the object was not found but no errors were encountered. + */ + public ProviderSnapshot getSnapshot(ProviderAdapterContext context, ProviderAdapterDataObject request); + + /** + * Given an array-specific address, find the matching volume information from the array + * @param addressType + * @param address + * @return + */ + public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, ProviderVolume.AddressType addressType, String address); + + /** + * Returns stats about the managed storage where the volumes and snapshots are created/managed + * @return + */ + public ProviderVolumeStorageStats getManagedStorageStats(); + + /** + * Returns stats about a specific volume + * @return + */ + public ProviderVolumeStats getVolumeStats(ProviderAdapterContext context, ProviderAdapterDataObject request); + + /** + * Returns true if the given hostname is accessible to the storage provider. + * @param context + * @param request + * @return + */ + public boolean canAccessHost(ProviderAdapterContext context, String hostname); +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterConstants.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterConstants.java new file mode 100644 index 00000000000..e5e9f77d15b --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterConstants.java @@ -0,0 +1,22 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +public class ProviderAdapterConstants { + public static final String EXTERNAL_UUID = "external_uuid"; + public static final String EXTERNAL_NAME = "external_name"; +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterContext.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterContext.java new file mode 100644 index 00000000000..c726fd6ca63 --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterContext.java @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +public class ProviderAdapterContext { + private String domainUuid; + private String domainName; + private Long domainId; + private String zoneUuid; + private String zoneName; + private Long zoneId; + private String accountUuid; + private String accountName; + private Long accountId; + public String getDomainUuid() { + return domainUuid; + } + public void setDomainUuid(String domainUuid) { + this.domainUuid = domainUuid; + } + public String getDomainName() { + return domainName; + } + public void setDomainName(String domainName) { + this.domainName = domainName; + } + public Long getDomainId() { + return domainId; + } + public void setDomainId(Long domainId) { + this.domainId = domainId; + } + public String getZoneUuid() { + return zoneUuid; + } + public void setZoneUuid(String zoneUuid) { + this.zoneUuid = zoneUuid; + } + public String getZoneName() { + return zoneName; + } + public void setZoneName(String zoneName) { + this.zoneName = zoneName; + } + public Long getZoneId() { + return zoneId; + } + public void setZoneId(Long zoneId) { + this.zoneId = zoneId; + } + public String getAccountUuid() { + return accountUuid; + } + public void setAccountUuid(String accountUuid) { + this.accountUuid = accountUuid; + } + public String getAccountName() { + return accountName; + } + public void setAccountName(String accountName) { + this.accountName = accountName; + } + public Long getAccountId() { + return accountId; + } + public void setAccountId(Long accountId) { + this.accountId = accountId; + } +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterDataObject.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterDataObject.java new file mode 100644 index 00000000000..16e0170cc60 --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterDataObject.java @@ -0,0 +1,159 @@ + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +/** + * Represents a translation object for transmitting meta-data about a volume, + * snapshot or template between cloudstack and the storage provider + */ +public class ProviderAdapterDataObject { + public enum Type { + VOLUME(), + SNAPSHOT(), + TEMPLATE(), + ARCHIVE() + } + /** + * The cloudstack UUID of the object + */ + private String uuid; + /** + * The cloudstack name of the object (generated or user provided) + */ + private String name; + /** + * The type of the object + */ + private Type type; + /** + * The internal local ID of the object (not globally unique) + */ + private Long id; + /** + * The external name assigned on the storage array. it may be dynamiically + * generated or derived from cloudstack data + */ + private String externalName; + + /** + * The external UUID of the object on the storage array. This may be different + * or the same as the cloudstack UUID depending on implementation. + */ + private String externalUuid; + + /** + * The internal (non-global) ID of the datastore this object is defined in + */ + private Long dataStoreId; + + /** + * The global ID of the datastore this object is defined in + */ + private String dataStoreUuid; + + /** + * The name of the data store this object is defined in + */ + private String dataStoreName; + + /** + * Represents the device connection id, typically a LUN, used to find the volume in conjunction with Address and AddressType. + */ + private String externalConnectionId; + + public String getUuid() { + return uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public Type getType() { + return type; + } + + public void setType(Type type) { + this.type = type; + } + + public String getExternalName() { + return externalName; + } + + public void setExternalName(String externalName) { + this.externalName = externalName; + } + + public String getExternalUuid() { + return externalUuid; + } + + public void setExternalUuid(String externalUuid) { + this.externalUuid = externalUuid; + } + + public Long getId() { + return id; + } + + public void setId(Long id) { + this.id = id; + } + + public Long getDataStoreId() { + return dataStoreId; + } + + public void setDataStoreId(Long dataStoreId) { + this.dataStoreId = dataStoreId; + } + + public String getDataStoreUuid() { + return dataStoreUuid; + } + + public void setDataStoreUuid(String dataStoreUuid) { + this.dataStoreUuid = dataStoreUuid; + } + + public String getDataStoreName() { + return dataStoreName; + } + + public void setDataStoreName(String dataStoreName) { + this.dataStoreName = dataStoreName; + } + + public String getExternalConnectionId() { + return externalConnectionId; + } + + public void setExternalConnectionId(String externalConnectionId) { + this.externalConnectionId = externalConnectionId; + } + +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterDiskOffering.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterDiskOffering.java new file mode 100644 index 00000000000..1db5efbb8ec --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterDiskOffering.java @@ -0,0 +1,194 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +import java.util.Date; +import org.apache.commons.lang.NotImplementedException; +import com.cloud.offering.DiskOffering; + +/** + * Wrapper Disk Offering that masks the cloudstack-dependent classes from the storage provider code + */ +public class ProviderAdapterDiskOffering { + private ProvisioningType type; + private DiskCacheMode diskCacheMode; + private DiskOffering hiddenDiskOffering; + private State state; + public ProviderAdapterDiskOffering(DiskOffering hiddenDiskOffering) { + this.hiddenDiskOffering = hiddenDiskOffering; + if (hiddenDiskOffering.getProvisioningType() != null) { + this.type = ProvisioningType.getProvisioningType(hiddenDiskOffering.getProvisioningType().toString()); + } + if (hiddenDiskOffering.getCacheMode() != null) { + this.diskCacheMode = DiskCacheMode.getDiskCasehMode(hiddenDiskOffering.getCacheMode().toString()); + } + if (hiddenDiskOffering.getState() != null) { + this.state = State.valueOf(hiddenDiskOffering.getState().toString()); + } + } + public Long getBytesReadRate() { + return hiddenDiskOffering.getBytesReadRate(); + } + public Long getBytesReadRateMax() { + return hiddenDiskOffering.getBytesReadRateMax(); + } + public Long getBytesReadRateMaxLength() { + return hiddenDiskOffering.getBytesReadRateMaxLength(); + } + public Long getBytesWriteRate() { + return hiddenDiskOffering.getBytesWriteRate(); + } + public Long getBytesWriteRateMax() { + return hiddenDiskOffering.getBytesWriteRateMax(); + } + public Long getBytesWriteRateMaxLength() { + return hiddenDiskOffering.getBytesWriteRateMaxLength(); + } + public DiskCacheMode getCacheMode() { + return diskCacheMode; + } + public Date getCreated() { + return hiddenDiskOffering.getCreated(); + } + public long getDiskSize() { + return hiddenDiskOffering.getDiskSize(); + } + public boolean getDiskSizeStrictness() { + return hiddenDiskOffering.getDiskSizeStrictness(); + } + public String getDisplayText() { + return hiddenDiskOffering.getDisplayText(); + } + public boolean getEncrypt() { + return hiddenDiskOffering.getEncrypt(); + } + public Integer getHypervisorSnapshotReserve() { + return hiddenDiskOffering.getHypervisorSnapshotReserve(); + } + public long getId() { + return hiddenDiskOffering.getId(); + } + public Long getIopsReadRate() { + return hiddenDiskOffering.getIopsReadRate(); + } + public Long getIopsReadRateMax() { + return hiddenDiskOffering.getIopsReadRateMax(); + } + public Long getIopsReadRateMaxLength() { + return hiddenDiskOffering.getIopsReadRateMaxLength(); + } + public Long getIopsWriteRate() { + return hiddenDiskOffering.getIopsWriteRate(); + } + public Long getIopsWriteRateMax() { + return hiddenDiskOffering.getIopsWriteRateMax(); + } + public Long getIopsWriteRateMaxLength() { + return hiddenDiskOffering.getIopsWriteRateMaxLength(); + } + public Long getMaxIops() { + return hiddenDiskOffering.getMaxIops(); + } + public Long getMinIops() { + return hiddenDiskOffering.getMinIops(); + } + public String getName() { + return hiddenDiskOffering.getName(); + } + public State getState() { + return state; + } + public String getTags() { + return hiddenDiskOffering.getTags(); + } + public String[] getTagsArray() { + return hiddenDiskOffering.getTagsArray(); + } + public String getUniqueName() { + return hiddenDiskOffering.getUniqueName(); + } + public String getUuid() { + return hiddenDiskOffering.getUuid(); + } + public ProvisioningType getType() { + return type; + } + public void setType(ProvisioningType type) { + this.type = type; + } + + public static enum ProvisioningType { + THIN("thin"), + SPARSE("sparse"), + FAT("fat"); + + private final String provisionType; + + private ProvisioningType(String provisionType){ + this.provisionType = provisionType; + } + + public String toString(){ + return this.provisionType; + } + + public static ProvisioningType getProvisioningType(String provisioningType){ + + if(provisioningType.equals(THIN.provisionType)){ + return ProvisioningType.THIN; + } else if(provisioningType.equals(SPARSE.provisionType)){ + return ProvisioningType.SPARSE; + } else if (provisioningType.equals(FAT.provisionType)){ + return ProvisioningType.FAT; + } else { + throw new NotImplementedException("Invalid provisioning type specified: " + provisioningType); + } + } + } + + + enum State { + Inactive, Active, + } + + enum DiskCacheMode { + NONE("none"), WRITEBACK("writeback"), WRITETHROUGH("writethrough"); + + private final String _diskCacheMode; + + DiskCacheMode(String cacheMode) { + _diskCacheMode = cacheMode; + } + + @Override + public String toString() { + return _diskCacheMode; + } + + public static DiskCacheMode getDiskCasehMode(String cacheMode) { + if (cacheMode.equals(NONE._diskCacheMode)) { + return NONE; + } else if (cacheMode.equals(WRITEBACK._diskCacheMode)) { + return WRITEBACK; + } else if (cacheMode.equals(WRITETHROUGH._diskCacheMode)) { + return WRITETHROUGH; + } else { + throw new NotImplementedException("Invalid cache mode specified: " + cacheMode); + } + } + }; +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterFactory.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterFactory.java new file mode 100644 index 00000000000..13a843d4763 --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterFactory.java @@ -0,0 +1,24 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +import java.util.Map; + +public interface ProviderAdapterFactory { + public String getProviderName(); + public ProviderAdapter create(String url, Map details); +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderSnapshot.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderSnapshot.java new file mode 100644 index 00000000000..50262ae6f2b --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderSnapshot.java @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +public interface ProviderSnapshot extends ProviderVolume { + /** + * Returns true if the provider supports directly attaching the snapshot. + * If false is returned, it indicates that cloudstack needs to perform + * a temporary volume copy prior to copying the snapshot to a new + * volume on another provider + * @return + */ + public Boolean canAttachDirectly(); +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolume.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolume.java new file mode 100644 index 00000000000..25577903e3d --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolume.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +public interface ProviderVolume { + + public Boolean isDestroyed(); + public String getId(); + public void setId(String id); + public String getName(); + public void setName(String name); + public Integer getPriority(); + public void setPriority(Integer priority); + public String getState(); + public AddressType getAddressType(); + public void setAddressType(AddressType addressType); + public String getAddress(); + public Long getAllocatedSizeInBytes(); + public Long getUsedBytes(); + public String getExternalUuid(); + public String getExternalName(); + public String getExternalConnectionId(); + public enum AddressType { + FIBERWWN + } +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeNamer.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeNamer.java new file mode 100644 index 00000000000..5a72871e9c0 --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeNamer.java @@ -0,0 +1,58 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +public class ProviderVolumeNamer { + + private static final String SNAPSHOT_PREFIX = "snap"; + private static final String VOLUME_PREFIX = "vol"; + private static final String TEMPLATE_PREFIX = "tpl"; + /** Simple method to allow sharing storage setup, primarily in lab/testing environment */ + private static final String ENV_PREFIX = System.getProperty("adaptive.storage.provider.envIdentifier"); + + public static String generateObjectName(ProviderAdapterContext context, ProviderAdapterDataObject obj) { + ProviderAdapterDataObject.Type objType = obj.getType(); + String prefix = null; + if (objType == ProviderAdapterDataObject.Type.SNAPSHOT) { + prefix = SNAPSHOT_PREFIX; + } else if (objType == ProviderAdapterDataObject.Type.VOLUME) { + prefix = VOLUME_PREFIX; + } else if (objType == ProviderAdapterDataObject.Type.TEMPLATE) { + prefix = TEMPLATE_PREFIX; + } else { + throw new RuntimeException("Unknown ManagedDataObject type provided: " + obj.getType()); + } + + if (ENV_PREFIX != null) { + prefix = ENV_PREFIX + "-" + prefix; + } + + return prefix + "-" + obj.getDataStoreId() + "-" + context.getDomainId() + "-" + context.getAccountId() + "-" + obj.getId(); + } + + + public static String generateObjectComment(ProviderAdapterContext context, ProviderAdapterDataObject obj) { + return "CSInfo [Account=" + context.getAccountName() + + "; Domain=" + context.getDomainName() + + "; DomainUUID=" + context.getDomainUuid() + + "; Account=" + context.getAccountName() + + "; AccountUUID=" + context.getAccountUuid() + + "; ObjectEndUserName=" + obj.getName() + + "; ObjectUUID=" + obj.getUuid() + "]"; + } + +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeStats.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeStats.java new file mode 100644 index 00000000000..33638e1f9ea --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeStats.java @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +public class ProviderVolumeStats { + private Long allocatedInBytes; + private Long virtualUsedInBytes; + private Long actualUsedInBytes; + private Long iops; + private Long throughput; + public Long getAllocatedInBytes() { + return allocatedInBytes; + } + public void setAllocatedInBytes(Long allocatedInBytes) { + this.allocatedInBytes = allocatedInBytes; + } + public Long getVirtualUsedInBytes() { + return virtualUsedInBytes; + } + public void setVirtualUsedInBytes(Long virtualUsedInBytes) { + this.virtualUsedInBytes = virtualUsedInBytes; + } + public Long getActualUsedInBytes() { + return actualUsedInBytes; + } + public void setActualUsedInBytes(Long actualUsedInBytes) { + this.actualUsedInBytes = actualUsedInBytes; + } + public Long getIops() { + return iops; + } + public void setIops(Long iops) { + this.iops = iops; + } + public Long getThroughput() { + return throughput; + } + public void setThroughput(Long throughput) { + this.throughput = throughput; + } +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeStorageStats.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeStorageStats.java new file mode 100644 index 00000000000..0624ef2db12 --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeStorageStats.java @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +public class ProviderVolumeStorageStats { + /** + * Total capacity in bytes currently physically used on the storage system within the scope of given API configuration + */ + private long capacityInBytes; + /** + * Virtual amount of bytes allocated for use. Typically what the users of the volume think they have before + * any compression, deduplication, or thin-provisioning semantics are accounted for. + */ + private Long virtualUsedInBytes; + /** + * Actual physical bytes used on the storage system within the scope of the given API configuration + */ + private Long actualUsedInBytes; + /** + * Current IOPS + */ + private Long iops; + /** + * Current raw throughput + */ + private Long throughput; + public Long getVirtualUsedInBytes() { + return virtualUsedInBytes; + } + public void setVirtualUsedInBytes(Long virtualUsedInBytes) { + this.virtualUsedInBytes = virtualUsedInBytes; + } + public Long getActualUsedInBytes() { + return actualUsedInBytes; + } + public void setActualUsedInBytes(Long actualUsedInBytes) { + this.actualUsedInBytes = actualUsedInBytes; + } + public Long getIops() { + return iops; + } + public void setIops(Long iops) { + this.iops = iops; + } + public Long getThroughput() { + return throughput; + } + public void setThroughput(Long throughput) { + this.throughput = throughput; + } + public Long getCapacityInBytes() { + return capacityInBytes; + } + public void setCapacityInBytes(Long capacityInBytes) { + this.capacityInBytes = capacityInBytes; + } +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java new file mode 100644 index 00000000000..d908d48c7da --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java @@ -0,0 +1,901 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.driver; + +import java.util.Map; +import javax.inject.Inject; +import org.apache.log4j.Logger; + +import java.util.HashMap; +import java.util.List; + +import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.command.CopyCmdAnswer; +import org.apache.cloudstack.storage.command.CreateObjectAnswer; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterConstants; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterContext; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDataObject; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering; +import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStats; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStorageStats; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.provider.AdaptivePrimaryDatastoreAdapterFactoryMap; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.storage.volume.VolumeObject; +import org.apache.cloudstack.storage.snapshot.SnapshotObject; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.DataObjectType; +import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.agent.api.to.DataTO; +import com.cloud.agent.api.to.DiskTO; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.domain.DomainVO; +import com.cloud.domain.dao.DomainDao; +import com.cloud.host.Host; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.projects.dao.ProjectDao; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.ResizeVolumePayload; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Storage.ImageFormat; + +import com.cloud.storage.StoragePool; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeDetailVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.SnapshotDetailsDao; +import com.cloud.storage.dao.SnapshotDetailsVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountVO; +import com.cloud.user.dao.AccountDao; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VirtualMachine; + +public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDriverImpl { + + static final Logger s_logger = Logger.getLogger(AdaptiveDataStoreDriverImpl.class); + + private String providerName = null; + + @Inject + AccountManager _accountMgr; + @Inject + DiskOfferingDao _diskOfferingDao; + @Inject + VolumeDao _volumeDao; + @Inject + PrimaryDataStoreDao _storagePoolDao; + @Inject + ProjectDao _projectDao; + @Inject + SnapshotDataStoreDao _snapshotDataStoreDao; + @Inject + SnapshotDetailsDao _snapshotDetailsDao; + @Inject + VolumeDetailsDao _volumeDetailsDao; + @Inject + VMTemplatePoolDao _vmTemplatePoolDao; + @Inject + AccountDao _accountDao; + @Inject + StoragePoolDetailsDao _storagePoolDetailsDao; + @Inject + SnapshotDao _snapshotDao; + @Inject + VMTemplateDao _vmTemplateDao; + @Inject + DataCenterDao _datacenterDao; + @Inject + DomainDao _domainDao; + @Inject + VolumeService _volumeService; + + private AdaptivePrimaryDatastoreAdapterFactoryMap _adapterFactoryMap = null; + + public AdaptiveDataStoreDriverImpl(AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap) { + this._adapterFactoryMap = factoryMap; + } + + @Override + public DataTO getTO(DataObject data) { + return null; + } + + @Override + public DataStoreTO getStoreTO(DataStore store) { + return null; + } + + public ProviderAdapter getAPI(StoragePool pool, Map details) { + return _adapterFactoryMap.getAPI(pool.getUuid(), pool.getStorageProviderName(), details); + } + + @Override + public void createAsync(DataStore dataStore, DataObject dataObject, + AsyncCompletionCallback callback) { + CreateCmdResult result = null; + try { + s_logger.info("Volume creation starting for data store [" + dataStore.getName() + + "] and data object [" + dataObject.getUuid() + "] of type [" + dataObject.getType() + "]"); + + // quota size of the cloudbyte volume will be increased with the given + // HypervisorSnapshotReserve + Long volumeSizeBytes = dataObject.getSize(); + // cloudstack talks bytes, primera talks MiB + StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId()); + Map details = _storagePoolDao.getDetails(storagePool.getId()); + + ProviderAdapter api = getAPI(storagePool, details); + ProviderAdapterContext context = newManagedVolumeContext(dataObject); + ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool); + ProviderAdapterDiskOffering inDiskOffering = null; + // only get the offering if its a volume type. If its a template type we skip this. + if (DataObjectType.VOLUME.equals(dataObject.getType())) { + // get the disk offering as provider may need to see details of this to + // provision the correct type of volume + VolumeVO volumeVO = _volumeDao.findById(dataObject.getId()); + DiskOfferingVO diskOffering = _diskOfferingDao.findById(volumeVO.getDiskOfferingId()); + if (diskOffering.isUseLocalStorage()) { + throw new CloudRuntimeException( + "Disk offering requires local storage but this storage provider does not suppport local storage. Please contact the cloud adminstrator to have the disk offering configuration updated to avoid this conflict."); + } + inDiskOffering = new ProviderAdapterDiskOffering(diskOffering); + } + + // if its a template and it already exist, just return the info -- may mean a previous attempt to + // copy this template failed after volume creation and its state has not advanced yet. + ProviderVolume volume = null; + if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { + volume = api.getVolume(context, dataIn); + if (volume != null) { + s_logger.info("Template volume already exists [" + dataObject.getUuid() + "]"); + } + } + + // create the volume if it didn't already exist + if (volume == null) { + // klunky - if this fails AND this detail property is set, it means upstream may have already created it + // in VolumeService and DataMotionStrategy tries to do it again before copying... + try { + volume = api.create(context, dataIn, inDiskOffering, volumeSizeBytes); + } catch (Exception e) { + VolumeDetailVO csId = _volumeDetailsDao.findDetail(dataObject.getId(), "cloneOfTemplate"); + if (csId != null && csId.getId() > 0) { + volume = api.getVolume(context, dataIn); + } else { + throw e; + } + } + s_logger.info("New volume created on remote storage for [" + dataObject.getUuid() + "]"); + } + + // set these from the discovered or created volume before proceeding + dataIn.setExternalName(volume.getExternalName()); + dataIn.setExternalUuid(volume.getExternalUuid()); + + // add the volume to the host set + String connectionId = api.attach(context, dataIn); + + // update the cloudstack metadata about the volume + persistVolumeOrTemplateData(storagePool, details, dataObject, volume, connectionId); + + result = new CreateCmdResult(dataObject.getUuid(), new Answer(null)); + result.setSuccess(true); + s_logger.info("Volume creation complete for [" + dataObject.getUuid() + "]"); + } catch (Throwable e) { + s_logger.error("Volume creation failed for dataObject [" + dataObject.getUuid() + "]: " + e.toString(), e); + result = new CreateCmdResult(null, new Answer(null)); + result.setResult(e.toString()); + result.setSuccess(false); + throw new CloudRuntimeException(e.getMessage()); + } finally { + if (callback != null) + callback.complete(result); + } + } + + @Override + public void deleteAsync(DataStore dataStore, DataObject dataObject, + AsyncCompletionCallback callback) { + s_logger.debug("Delete volume started"); + CommandResult result = new CommandResult(); + try { + StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId()); + Map details = _storagePoolDao.getDetails(storagePool.getId()); + ProviderAdapter api = getAPI(storagePool, details); + ProviderAdapterContext context = newManagedVolumeContext(dataObject); + ProviderAdapterDataObject inData = newManagedDataObject(dataObject, storagePool); + // skip adapter delete if neither external identifier is set. Probably means the volume + // create failed before this chould be set + if (!(inData.getExternalName() == null && inData.getExternalUuid() == null)) { + api.delete(context, inData); + } + result.setResult("Successfully deleted volume"); + result.setSuccess(true); + } catch (Throwable e) { + s_logger.error("Result to volume delete failed with exception", e); + result.setResult(e.toString()); + } finally { + if (callback != null) + callback.complete(result); + } + } + + @Override + public void copyAsync(DataObject srcdata, DataObject destdata, + AsyncCompletionCallback callback) { + CopyCommandResult result = null; + try { + s_logger.info("Copying volume " + srcdata.getUuid() + " to " + destdata.getUuid() + "]"); + + if (!canCopy(srcdata, destdata)) { + throw new CloudRuntimeException( + "The data store provider is unable to perform copy operations because the source or destination object is not the correct type of volume"); + } + + try { + StoragePoolVO storagePool = _storagePoolDao.findById(srcdata.getDataStore().getId()); + Map details = _storagePoolDao.getDetails(storagePool.getId()); + ProviderAdapter api = getAPI(storagePool, details); + + s_logger.info("Copy volume " + srcdata.getUuid() + " to " + destdata.getUuid()); + + ProviderVolume outVolume; + ProviderAdapterContext context = newManagedVolumeContext(destdata); + ProviderAdapterDataObject sourceIn = newManagedDataObject(srcdata, storagePool); + ProviderAdapterDataObject destIn = newManagedDataObject(destdata, storagePool); + outVolume = api.copy(context, sourceIn, destIn); + + // populate this data - it may be needed later + destIn.setExternalName(outVolume.getExternalName()); + destIn.setExternalConnectionId(outVolume.getExternalConnectionId()); + destIn.setExternalUuid(outVolume.getExternalUuid()); + + // if we copied from one volume to another, the target volume's disk offering or user input may be of a larger size + // we won't, however, shrink a volume if its smaller. + if (outVolume.getAllocatedSizeInBytes() < destdata.getSize()) { + s_logger.info("Resizing volume " + destdata.getUuid() + " to requested target volume size of " + destdata.getSize()); + api.resize(context, destIn, destdata.getSize()); + } + + String connectionId = api.attach(context, destIn); + + String finalPath; + // format: type=fiberwwn; address=
; connid= + if (connectionId != null) { + finalPath = String.format("type=%s; address=%s; connid=%s", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase(), connectionId); + } else { + finalPath = String.format("type=%s; address=%s;", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase()); + } + + persistVolumeData(storagePool, details, destdata, outVolume, connectionId); + s_logger.info("Copy completed from [" + srcdata.getUuid() + "] to [" + destdata.getUuid() + "]"); + + VolumeObjectTO voto = new VolumeObjectTO(); + voto.setPath(finalPath); + + result = new CopyCommandResult(finalPath, new CopyCmdAnswer(voto)); + result.setSuccess(true); + } catch (Throwable e) { + s_logger.error("Result to volume copy failed with exception", e); + result = new CopyCommandResult(null, null); + result.setSuccess(false); + result.setResult(e.toString()); + } + } finally { + if (callback != null) + callback.complete(result); + } + } + + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, + AsyncCompletionCallback callback) { + copyAsync(srcData, destData, callback); + } + + @Override + public boolean canCopy(DataObject srcData, DataObject destData) { + s_logger.debug("canCopy: Checking srcData [" + srcData.getUuid() + ":" + srcData.getType() + ":" + + srcData.getDataStore().getId() + " AND destData [" + + destData.getUuid() + ":" + destData.getType() + ":" + destData.getDataStore().getId() + "]"); + try { + if (!isSameProvider(srcData)) { + s_logger.debug("canCopy: No we can't -- the source provider is NOT the correct type for this driver!"); + return false; + } + + if (!isSameProvider(destData)) { + s_logger.debug("canCopy: No we can't -- the destination provider is NOT the correct type for this driver!"); + return false; + } + s_logger.debug( + "canCopy: Source and destination are the same so we can copy via storage endpoint, checking that the source actually exists"); + StoragePoolVO poolVO = _storagePoolDao.findById(srcData.getDataStore().getId()); + Map details = _storagePoolDao.getDetails(srcData.getDataStore().getId()); + ProviderAdapter api = getAPI(poolVO, details); + + /** + * The storage provider generates its own names for snapshots which we store and + * retrieve when needed + */ + ProviderAdapterContext context = newManagedVolumeContext(srcData); + ProviderAdapterDataObject srcDataObject = newManagedDataObject(srcData, poolVO); + if (srcData instanceof SnapshotObject) { + ProviderSnapshot snapshot = api.getSnapshot(context, srcDataObject); + if (snapshot == null) { + return false; + } else { + return true; + } + } else { + ProviderVolume vol = api.getVolume(context, srcDataObject); + if (vol == null) { + return false; + } else { + return true; + } + } + } catch (Throwable e) { + s_logger.warn("Problem checking if we canCopy", e); + return false; + } + } + + @Override + public void resize(DataObject data, AsyncCompletionCallback callback) { + s_logger.debug("Resize volume started"); + CreateCmdResult result = null; + try { + + // Boolean status = false; + VolumeObject vol = (VolumeObject) data; + StoragePool pool = (StoragePool) data.getDataStore(); + + ResizeVolumePayload resizeParameter = (ResizeVolumePayload) vol.getpayload(); + + StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId()); + + if (!(poolVO.isManaged())) { + super.resize(data, callback); + return; + } + + try { + Map details = _storagePoolDao.getDetails(pool.getId()); + ProviderAdapter api = getAPI(pool, details); + + // doesn't support shrink (maybe can truncate but separate API calls to + // investigate) + if (vol.getSize() > resizeParameter.newSize) { + throw new CloudRuntimeException("Storage provider does not support shrinking an existing volume"); + } + + ProviderAdapterContext context = newManagedVolumeContext(data); + ProviderAdapterDataObject dataIn = newManagedDataObject(data, poolVO); + if (s_logger.isDebugEnabled()) s_logger.debug("Calling provider API to resize volume " + data.getUuid() + " to " + resizeParameter.newSize); + api.resize(context, dataIn, resizeParameter.newSize); + + if (vol.isAttachedVM()) { + if (VirtualMachine.State.Running.equals(vol.getAttachedVM().getState())) { + if (s_logger.isDebugEnabled()) s_logger.debug("Notify currently attached VM of volume resize for " + data.getUuid() + " to " + resizeParameter.newSize); + _volumeService.resizeVolumeOnHypervisor(vol.getId(), resizeParameter.newSize, vol.getAttachedVM().getHostId(), vol.getAttachedVM().getInstanceName()); + } + } + + result = new CreateCmdResult(data.getUuid(), new Answer(null)); + result.setSuccess(true); + } catch (Throwable e) { + s_logger.error("Resize volume failed, please contact cloud support.", e); + result = new CreateCmdResult(null, new Answer(null)); + result.setResult(e.toString()); + result.setSuccess(false); + } + } finally { + if (callback != null) + callback.complete(result); + } + + } + + @Override + public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, + QualityOfServiceState qualityOfServiceState) { + s_logger.info("handleQualityOfServiceVolumeMigration: " + volumeInfo.getUuid() + " " + + volumeInfo.getPath() + ": " + qualityOfServiceState.toString()); + } + + @Override + public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool) { + VolumeInfo volume = (VolumeInfo) dataObject; + long volumeSize = volume.getSize(); + Integer hypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve(); + + if (hypervisorSnapshotReserve != null) { + if (hypervisorSnapshotReserve < 25) { + hypervisorSnapshotReserve = 25; + } + + volumeSize += volumeSize * (hypervisorSnapshotReserve / 100f); + } + + return volumeSize; + } + + @Override + public ChapInfo getChapInfo(DataObject dataObject) { + return null; + } + + @Override + public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) { + CreateCmdResult result = null; + try { + s_logger.debug("taking volume snapshot"); + SnapshotObjectTO snapshotTO = (SnapshotObjectTO) snapshot.getTO(); + + VolumeInfo baseVolume = snapshot.getBaseVolume(); + DataStore ds = baseVolume.getDataStore(); + StoragePoolVO storagePool = _storagePoolDao.findById(ds.getId()); + + Map details = _storagePoolDao.getDetails(ds.getId()); + ProviderAdapter api = getAPI(storagePool, details); + + ProviderAdapterContext context = newManagedVolumeContext(snapshot); + ProviderAdapterDataObject inVolumeDO = newManagedDataObject(baseVolume, storagePool); + ProviderAdapterDataObject inSnapshotDO = newManagedDataObject(snapshot, storagePool); + ProviderSnapshot outSnapshot = api.snapshot(context, inVolumeDO, inSnapshotDO); + + // add the snapshot to the host group (needed for copying to non-provider storage + // to create templates, etc) + String connectionId = null; + String finalAddress = outSnapshot.getAddress(); + if (outSnapshot.canAttachDirectly()) { + connectionId = api.attach(context, inSnapshotDO); + if (connectionId != null) { + finalAddress = finalAddress + "::" + connectionId; + } + } + + snapshotTO.setPath(finalAddress); + snapshotTO.setName(outSnapshot.getName()); + snapshotTO.setHypervisorType(HypervisorType.KVM); + + // unclear why this is needed vs snapshotTO.setPath, but without it the path on + // the target snapshot object isn't set + // so a volume created from it also is not set and can't be attached to a VM + SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(snapshot.getId(), + DiskTO.PATH, finalAddress, true); + _snapshotDetailsDao.persist(snapshotDetail); + + // save the name (reuse on revert) + snapshotDetail = new SnapshotDetailsVO(snapshot.getId(), + ProviderAdapterConstants.EXTERNAL_NAME, outSnapshot.getExternalName(), true); + _snapshotDetailsDao.persist(snapshotDetail); + + // save the uuid (reuse on revert) + snapshotDetail = new SnapshotDetailsVO(snapshot.getId(), + ProviderAdapterConstants.EXTERNAL_UUID, outSnapshot.getExternalUuid(), true); + _snapshotDetailsDao.persist(snapshotDetail); + + result = new CreateCmdResult(finalAddress, new CreateObjectAnswer(snapshotTO)); + result.setResult("Snapshot completed with new WWN " + finalAddress); + result.setSuccess(true); + } catch (Throwable e) { + s_logger.debug("Failed to take snapshot: " + e.getMessage()); + result = new CreateCmdResult(null, null); + result.setResult(e.toString()); + } finally { + if (callback != null) + callback.complete(result); + } + } + + @Override + public void revertSnapshot(SnapshotInfo snapshot, SnapshotInfo snapshotOnPrimaryStore, + AsyncCompletionCallback callback) { + + CommandResult result = new CommandResult(); + ProviderAdapter api = null; + try { + DataStore ds = snapshotOnPrimaryStore.getDataStore(); + StoragePoolVO storagePool = _storagePoolDao.findById(ds.getId()); + Map details = _storagePoolDao.getDetails(ds.getId()); + api = getAPI(storagePool, details); + + String externalName = null; + String externalUuid = null; + List list = _snapshotDetailsDao.findDetails(snapshot.getId(), + ProviderAdapterConstants.EXTERNAL_NAME); + if (list != null && list.size() > 0) { + externalName = list.get(0).getValue(); + } + + list = _snapshotDetailsDao.findDetails(snapshot.getId(), ProviderAdapterConstants.EXTERNAL_UUID); + if (list != null && list.size() > 0) { + externalUuid = list.get(0).getValue(); + } + + ProviderAdapterContext context = newManagedVolumeContext(snapshot); + ProviderAdapterDataObject inSnapshotDO = newManagedDataObject(snapshot, storagePool); + inSnapshotDO.setExternalName(externalName); + inSnapshotDO.setExternalUuid(externalUuid); + + // perform promote (async, wait for job to finish) + api.revert(context, inSnapshotDO); + + // set command as success + result.setSuccess(true); + } catch (Throwable e) { + s_logger.warn("revertSnapshot failed", e); + result.setResult(e.toString()); + result.setSuccess(false); + } finally { + if (callback != null) + callback.complete(result); + } + } + + @Override + public long getUsedBytes(StoragePool storagePool) { + long usedSpaceBytes = 0; + // Volumes + List volumes = _volumeDao.findByPoolIdAndState(storagePool.getId(), Volume.State.Ready); + if (volumes != null) { + for (VolumeVO volume : volumes) { + usedSpaceBytes += volume.getSize(); + + long vmSnapshotChainSize = volume.getVmSnapshotChainSize() == null ? 0 + : volume.getVmSnapshotChainSize(); + usedSpaceBytes += vmSnapshotChainSize; + } + } + + // Snapshots + List snapshots = _snapshotDataStoreDao.listByStoreIdAndState(storagePool.getId(), + ObjectInDataStoreStateMachine.State.Ready); + if (snapshots != null) { + for (SnapshotDataStoreVO snapshot : snapshots) { + usedSpaceBytes += snapshot.getSize(); + } + } + + // Templates + List templates = _vmTemplatePoolDao.listByPoolIdAndState(storagePool.getId(), + ObjectInDataStoreStateMachine.State.Ready); + if (templates != null) { + for (VMTemplateStoragePoolVO template : templates) { + usedSpaceBytes += template.getTemplateSize(); + } + } + + s_logger.debug("Used/Allocated storage space (in bytes): " + String.valueOf(usedSpaceBytes)); + + return usedSpaceBytes; + } + + @Override + public long getUsedIops(StoragePool storagePool) { + return super.getUsedIops(storagePool); + } + + @Override + public Map getCapabilities() { + Map mapCapabilities = new HashMap(); + + mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString()); + mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString()); + mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); // set to false because it causes weird behavior when copying templates to root volumes + mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString()); + // indicates the datastore can create temporary volumes for use when copying + // data from a snapshot + mapCapabilities.put("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT", Boolean.TRUE.toString()); + + return mapCapabilities; + } + + @Override + public boolean canProvideStorageStats() { + return true; + } + + @Override + public Pair getStorageStats(StoragePool storagePool) { + Map details = _storagePoolDao.getDetails(storagePool.getId()); + String capacityBytesStr = details.get("capacityBytes"); + Long capacityBytes = null; + if (capacityBytesStr == null) { + ProviderAdapter api = getAPI(storagePool, details); + ProviderVolumeStorageStats stats = api.getManagedStorageStats(); + if (stats == null) { + return null; + } + capacityBytes = stats.getCapacityInBytes(); + } else { + capacityBytes = Long.parseLong(capacityBytesStr); + } + Long usedBytes = this.getUsedBytes(storagePool); + return new Pair(capacityBytes, usedBytes); + } + + @Override + public boolean canProvideVolumeStats() { + return true; + } + + public String getProviderName() { + return providerName; + } + + public void setProviderName(String providerName) { + this.providerName = providerName; + } + + @Override + public Pair getVolumeStats(StoragePool storagePool, String volumePath) { + Map details = _storagePoolDao.getDetails(storagePool.getId()); + ProviderAdapter api = getAPI(storagePool, details); + ProviderVolume.AddressType addressType = null; + if (volumePath.indexOf(";") > 1) { + String[] fields = volumePath.split(";"); + if (fields.length > 0) { + for (String field: fields) { + if (field.trim().startsWith("address=")) { + String[] toks = field.split("="); + if (toks.length > 1) { + volumePath = toks[1]; + } + } else if (field.trim().startsWith("type=")) { + String[] toks = field.split("="); + if (toks.length > 1) { + addressType = ProviderVolume.AddressType.valueOf(toks[1]); + } + } + } + } + } else { + addressType = ProviderVolume.AddressType.FIBERWWN; + } + // limited context since this is not at an account level + ProviderAdapterContext context = new ProviderAdapterContext(); + context.setZoneId(storagePool.getDataCenterId()); + ProviderVolume volume = api.getVolumeByAddress(context, addressType, volumePath); + + if (volume == null) { + return null; + } + + ProviderAdapterDataObject object = new ProviderAdapterDataObject(); + object.setExternalUuid(volume.getExternalUuid()); + object.setExternalName(volume.getExternalName()); + object.setType(ProviderAdapterDataObject.Type.VOLUME); + ProviderVolumeStats stats = api.getVolumeStats(context, object); + + Long provisionedSizeInBytes = stats.getActualUsedInBytes(); + Long allocatedSizeInBytes = stats.getAllocatedInBytes(); + if (provisionedSizeInBytes == null || allocatedSizeInBytes == null) { + return null; + } + return new Pair(provisionedSizeInBytes, allocatedSizeInBytes); + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + Map details = _storagePoolDao.getDetails(pool.getId()); + ProviderAdapter api = getAPI(pool, details); + + ProviderAdapterContext context = new ProviderAdapterContext(); + context.setZoneId(host.getDataCenterId()); + return api.canAccessHost(context, host.getName()); + } + + void persistVolumeOrTemplateData(StoragePoolVO storagePool, Map storagePoolDetails, + DataObject dataObject, ProviderVolume volume, String connectionId) { + if (dataObject.getType() == DataObjectType.VOLUME) { + persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connectionId); + } else if (dataObject.getType() == DataObjectType.TEMPLATE) { + persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connectionId); + } + } + + void persistVolumeData(StoragePoolVO storagePool, Map details, DataObject dataObject, + ProviderVolume managedVolume, String connectionId) { + VolumeVO volumeVO = _volumeDao.findById(dataObject.getId()); + + // if its null check if the storage provider returned one that is already set + if (connectionId == null) { + connectionId = managedVolume.getExternalConnectionId(); + } + + String finalPath; + // format: type=fiberwwn; address=
; connid= + if (connectionId != null) { + finalPath = String.format("type=%s; address=%s; connid=%s", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase(), connectionId); + } else { + finalPath = String.format("type=%s; address=%s;", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase()); + } + + volumeVO.setPath(finalPath); + volumeVO.setFormat(ImageFormat.RAW); + volumeVO.setPoolId(storagePool.getId()); + volumeVO.setExternalUuid(managedVolume.getExternalUuid()); + volumeVO.setDisplay(true); + volumeVO.setDisplayVolume(true); + _volumeDao.update(volumeVO.getId(), volumeVO); + + volumeVO = _volumeDao.findById(volumeVO.getId()); + + VolumeDetailVO volumeDetailVO = new VolumeDetailVO(volumeVO.getId(), + DiskTO.PATH, finalPath, true); + _volumeDetailsDao.persist(volumeDetailVO); + + volumeDetailVO = new VolumeDetailVO(volumeVO.getId(), + ProviderAdapterConstants.EXTERNAL_NAME, managedVolume.getExternalName(), true); + _volumeDetailsDao.persist(volumeDetailVO); + + volumeDetailVO = new VolumeDetailVO(volumeVO.getId(), + ProviderAdapterConstants.EXTERNAL_UUID, managedVolume.getExternalUuid(), true); + _volumeDetailsDao.persist(volumeDetailVO); + } + + void persistTemplateData(StoragePoolVO storagePool, Map details, DataObject dataObject, + ProviderVolume volume, String connectionId) { + TemplateInfo templateInfo = (TemplateInfo) dataObject; + VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(), + templateInfo.getId(), null); + // template pool ref doesn't have a details object so we'll save: + // 1. external name ==> installPath + // 2. address ==> local download path + if (connectionId == null) { + templatePoolRef.setInstallPath(String.format("type=%s; address=%s", volume.getAddressType().toString(), + volume.getAddress().toLowerCase())); + } else { + templatePoolRef.setInstallPath(String.format("type=%s; address=%s; connid=%s", volume.getAddressType().toString(), + volume.getAddress().toLowerCase(), connectionId)); + } + templatePoolRef.setLocalDownloadPath(volume.getExternalName()); + templatePoolRef.setTemplateSize(volume.getAllocatedSizeInBytes()); + _vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef); + } + + ProviderAdapterContext newManagedVolumeContext(DataObject obj) { + ProviderAdapterContext ctx = new ProviderAdapterContext(); + if (obj instanceof VolumeInfo) { + VolumeVO vol = _volumeDao.findById(obj.getId()); + ctx.setAccountId(vol.getAccountId()); + ctx.setDomainId(vol.getDomainId()); + } else if (obj instanceof SnapshotInfo) { + SnapshotVO snap = _snapshotDao.findById(obj.getId()); + ctx.setAccountId(snap.getAccountId()); + ctx.setDomainId(snap.getDomainId()); + } else if (obj instanceof TemplateInfo) { + VMTemplateVO template = _vmTemplateDao.findById(obj.getId()); + ctx.setAccountId(template.getAccountId()); + // templates don't have a domain ID so always set to 0 + ctx.setDomainId(0L); + } + + if (ctx.getAccountId() != null) { + AccountVO acct = _accountDao.findById(ctx.getAccountId()); + if (acct != null) { + ctx.setAccountUuid(acct.getUuid()); + ctx.setAccountName(acct.getName()); + } + } + + if (ctx.getDomainId() != null) { + DomainVO domain = _domainDao.findById(ctx.getDomainId()); + if (domain != null) { + ctx.setDomainUuid(domain.getUuid()); + ctx.setDomainName(domain.getName()); + } + } + + return ctx; + } + + boolean isSameProvider(DataObject obj) { + StoragePoolVO storagePool = this._storagePoolDao.findById(obj.getDataStore().getId()); + if (storagePool != null && storagePool.getStorageProviderName().equals(this.getProviderName())) { + return true; + } else { + return false; + } + } + + ProviderAdapterDataObject newManagedDataObject(DataObject data, StoragePool storagePool) { + ProviderAdapterDataObject dataIn = new ProviderAdapterDataObject(); + if (data instanceof VolumeInfo) { + List list = _volumeDetailsDao.findDetails(data.getId(), + ProviderAdapterConstants.EXTERNAL_NAME); + String externalName = null; + if (list != null && list.size() > 0) { + externalName = list.get(0).getValue(); + } + + list = _volumeDetailsDao.findDetails(data.getId(), ProviderAdapterConstants.EXTERNAL_UUID); + String externalUuid = null; + if (list != null && list.size() > 0) { + externalUuid = list.get(0).getValue(); + } + + dataIn.setName(((VolumeInfo) data).getName()); + dataIn.setExternalName(externalName); + dataIn.setExternalUuid(externalUuid); + } else if (data instanceof SnapshotInfo) { + List list = _snapshotDetailsDao.findDetails(data.getId(), + ProviderAdapterConstants.EXTERNAL_NAME); + String externalName = null; + if (list != null && list.size() > 0) { + externalName = list.get(0).getValue(); + } + + list = _snapshotDetailsDao.findDetails(data.getId(), ProviderAdapterConstants.EXTERNAL_UUID); + String externalUuid = null; + if (list != null && list.size() > 0) { + externalUuid = list.get(0).getValue(); + } + + dataIn = new ProviderAdapterDataObject(); + dataIn.setName(((SnapshotInfo) data).getName()); + dataIn.setExternalName(externalName); + dataIn.setExternalUuid(externalUuid); + } else if (data instanceof TemplateInfo) { + TemplateInfo ti = (TemplateInfo)data; + dataIn.setName(ti.getName()); + VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(), ti.getId(), null); + dataIn.setExternalName(templatePoolRef.getLocalDownloadPath()); + } + dataIn.setId(data.getId()); + dataIn.setDataStoreId(data.getDataStore().getId()); + dataIn.setDataStoreUuid(data.getDataStore().getUuid()); + dataIn.setDataStoreName(data.getDataStore().getName()); + dataIn.setUuid(data.getUuid()); + dataIn.setType(ProviderAdapterDataObject.Type.valueOf(data.getType().toString())); + return dataIn; + } +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java new file mode 100644 index 00000000000..56d9a25f34f --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java @@ -0,0 +1,407 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.lifecycle; + +import java.io.UnsupportedEncodingException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStorageStats; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.provider.AdaptivePrimaryDatastoreAdapterFactoryMap; +import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import org.apache.commons.collections.CollectionUtils; +import org.apache.log4j.Logger; + +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.host.HostVO; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolAutomation; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.utils.crypt.DBEncryptionUtil; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.host.Host; + +/** + * Manages the lifecycle of a Managed Data Store in CloudStack + */ +public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle { + @Inject + private PrimaryDataStoreDao _storagePoolDao; + private static final Logger s_logger = Logger.getLogger(AdaptiveDataStoreLifeCycleImpl.class); + + @Inject + PrimaryDataStoreHelper _dataStoreHelper; + @Inject + protected ResourceManager _resourceMgr; + @Inject + private StoragePoolAutomation _storagePoolAutomation; + @Inject + private PrimaryDataStoreDao _primaryDataStoreDao; + @Inject + private StorageManager _storageMgr; + @Inject + private ClusterDao _clusterDao; + AdaptivePrimaryDatastoreAdapterFactoryMap _adapterFactoryMap; + + public AdaptiveDataStoreLifeCycleImpl(AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap) { + _adapterFactoryMap = factoryMap; + } + + /** + * Initialize the storage pool + * https://hostname:port?cpg=&snapcpg=&hostset=&disabletlsvalidation=true& + */ + @Override + public DataStore initialize(Map dsInfos) { + // https://hostanme:443/cpgname/hostsetname. hostset should map to the cluster or zone (all nodes in the cluster or zone MUST be in the hostset and be configured outside cloudstack for now) + String url = (String) dsInfos.get("url"); + Long zoneId = (Long) dsInfos.get("zoneId"); + Long podId = (Long)dsInfos.get("podId"); + Long clusterId = (Long)dsInfos.get("clusterId"); + String dsName = (String) dsInfos.get("name"); + String providerName = (String) dsInfos.get("providerName"); + Long capacityBytes = (Long) dsInfos.get("capacityBytes"); + Long capacityIops = (Long)dsInfos.get("capacityIops"); + String tags = (String)dsInfos.get("tags"); + @SuppressWarnings("unchecked") + Map details = (Map) dsInfos.get("details"); + + // validate inputs are valid/provided as required + if (zoneId == null) throw new CloudRuntimeException("Zone Id must be specified."); + + URL uri = null; + try { + uri = new URL(url); + } catch (Exception ignored) { + throw new CloudRuntimeException(url + " is not a valid uri"); + } + + String username = null; + String password = null; + String token = null; + String userInfo = uri.getUserInfo(); + if (userInfo == null || userInfo.split(":").length < 2) { + // check if it was passed in the details object + username = details.get(ProviderAdapter.API_USERNAME_KEY); + if (username != null) { + password = details.get(ProviderAdapter.API_PASSWORD_KEY); + userInfo = username + ":" + password; + } else { + token = details.get(ProviderAdapter.API_TOKEN_KEY); + } + } else { + try { + userInfo = java.net.URLDecoder.decode(userInfo, StandardCharsets.UTF_8.toString()); + } catch (UnsupportedEncodingException e) { + throw new CloudRuntimeException("Unexpected error parsing the provided user info; check that it does not include any invalid characters"); + } + + username = userInfo.split(":")[0]; + password = userInfo.split(":")[1]; + } + + s_logger.info("Registering block storage provider with user=" + username); + + + if (clusterId != null) { + Hypervisor.HypervisorType hypervisorType = getHypervisorTypeForCluster(clusterId); + + if (!hypervisorType.equals(HypervisorType.KVM)) { + throw new CloudRuntimeException("Unsupported hypervisor type for provided cluster: " + hypervisorType.toString()); + } + + // Primary datastore is cluster-wide, check and set the podId and clusterId parameters + if (podId == null) { + throw new CloudRuntimeException("Pod Id must also be specified when the Cluster Id is specified for Cluster-wide primary storage."); + } + + s_logger.info("Registering with clusterid=" + clusterId + " which is confirmed to be a KVM host"); + + } else if (podId != null) { + throw new CloudRuntimeException("Cluster Id must also be specified when the Pod Id is specified for Cluster-wide primary storage."); + } + + // validate we don't have any duplication going on + List storagePoolVO = _primaryDataStoreDao.findPoolsByProvider(providerName); + if (CollectionUtils.isNotEmpty(storagePoolVO)) { + for (StoragePoolVO poolVO : storagePoolVO) { + Map poolDetails = _primaryDataStoreDao.getDetails(poolVO.getId()); + String otherPoolUrl = poolDetails.get(ProviderAdapter.API_URL_KEY); + if (dsName.equals(poolVO.getName())) { + throw new InvalidParameterValueException("A pool with the name [" + dsName + "] already exists, choose another name"); + } + + if (uri.toString().equals(otherPoolUrl)) { + throw new IllegalArgumentException("Provider URL [" + otherPoolUrl + "] is already in use by another storage pool named [" + poolVO.getName() + "], please validate you have correct API and CPG"); + } + } + } + + s_logger.info("Validated no other pool exists with this name: " + dsName); + + try { + PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters(); + parameters.setHost(uri.getHost()); + parameters.setPort(uri.getPort()); + parameters.setPath(uri.getPath() + "?" + uri.getQuery()); + parameters.setType(StoragePoolType.FiberChannel); + parameters.setZoneId(zoneId); + parameters.setPodId(podId); + parameters.setClusterId(clusterId); + parameters.setName(dsName); + parameters.setProviderName(providerName); + parameters.setManaged(true); + parameters.setCapacityBytes(capacityBytes); + parameters.setUsedBytes(0); + parameters.setCapacityIops(capacityIops); + parameters.setHypervisorType(HypervisorType.KVM); + parameters.setTags(tags); + parameters.setUserInfo(userInfo); + parameters.setUuid(UUID.randomUUID().toString()); + + details.put(ProviderAdapter.API_URL_KEY, uri.toString()); + if (username != null) { + details.put(ProviderAdapter.API_USERNAME_KEY, username); + } + + if (password != null) { + details.put(ProviderAdapter.API_PASSWORD_KEY, DBEncryptionUtil.encrypt(password)); + } + + if (token != null) { + details.put(ProviderAdapter.API_TOKEN_KEY, DBEncryptionUtil.encrypt(details.get(ProviderAdapter.API_TOKEN_KEY))); + } + // this appears to control placing the storage pool above network file system based storage pools in priority + details.put(Storage.Capability.HARDWARE_ACCELERATION.toString(), "true"); + // this new capablity indicates the storage pool allows volumes to migrate to/from other pools (i.e. to/from NFS pools) + details.put(Storage.Capability.ALLOW_MIGRATE_OTHER_POOLS.toString(), "true"); + parameters.setDetails(details); + + // make sure the storage array is connectable and the pod and hostgroup objects exist + ProviderAdapter api = _adapterFactoryMap.getAPI(parameters.getUuid(), providerName, details); + + // validate the provided details are correct/valid for the provider + api.validate(); + + // if we have user-provided capacity bytes, validate they do not exceed the manaaged storage capacity bytes + ProviderVolumeStorageStats stats = api.getManagedStorageStats(); + if (capacityBytes != null && capacityBytes != 0) { + if (stats.getCapacityInBytes() > 0) { + if (stats.getCapacityInBytes() < capacityBytes) { + throw new InvalidParameterValueException("Capacity bytes provided exceeds the capacity of the storage endpoint: provided by user: " + capacityBytes + ", storage capacity from storage provider: " + stats.getCapacityInBytes()); + } + } + parameters.setCapacityBytes(capacityBytes); + } + // if we have no user-provided capacity bytes, use the ones provided by storage + else { + if (stats.getCapacityInBytes() <= 0) { + throw new InvalidParameterValueException("Capacity bytes note available from the storage provider, user provided capacity bytes must be specified"); + } + parameters.setCapacityBytes(stats.getCapacityInBytes()); + } + + s_logger.info("Persisting [" + dsName + "] storage pool metadata to database"); + return _dataStoreHelper.createPrimaryDataStore(parameters); + } catch (Throwable e) { + s_logger.error("Problem persisting storage pool", e); + throw new CloudRuntimeException(e); + } + } + + /** + * Get the type of Hypervisor from the cluster id + * @param clusterId + * @return + */ + private Hypervisor.HypervisorType getHypervisorTypeForCluster(long clusterId) { + ClusterVO cluster = _clusterDao.findById(clusterId); + if (cluster == null) { + throw new CloudRuntimeException("Unable to locate the specified cluster: " + clusterId); + } + + return cluster.getHypervisorType(); + } + + /** + * Attach the pool to a cluster (all hosts in a single cluster) + */ + @Override + public boolean attachCluster(DataStore store, ClusterScope scope) { + s_logger.info("Attaching storage pool [" + store.getName() + "] to cluster [" + scope.getScopeId() + "]"); + _dataStoreHelper.attachCluster(store); + + StoragePoolVO dataStoreVO = _storagePoolDao.findById(store.getId()); + + PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store; + // Check if there is host up in this cluster + List allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId()); + if (allHosts.isEmpty()) { + _primaryDataStoreDao.expunge(primarystore.getId()); + throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primarystore.getClusterId()); + } + + if (dataStoreVO.isManaged()) { + //boolean success = false; + for (HostVO h : allHosts) { + s_logger.debug("adding host " + h.getName() + " to storage pool " + store.getName()); + } + } + + s_logger.debug("In createPool Adding the pool to each of the hosts"); + List poolHosts = new ArrayList(); + for (HostVO h : allHosts) { + try { + _storageMgr.connectHostToSharedPool(h.getId(), primarystore.getId()); + poolHosts.add(h); + } catch (Exception e) { + s_logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e); + } + } + + if (poolHosts.isEmpty()) { + s_logger.warn("No host can access storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); + _primaryDataStoreDao.expunge(primarystore.getId()); + throw new CloudRuntimeException("Failed to access storage pool"); + } + + return true; + } + + @Override + public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { + s_logger.info("Attaching storage pool [" + store.getName() + "] to host [" + scope.getScopeId() + "]"); + _dataStoreHelper.attachHost(store, scope, existingInfo); + return true; + } + + @Override + public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { + s_logger.info("Attaching storage pool [" + dataStore.getName() + "] to zone [" + scope.getScopeId() + "]"); + List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); + List poolHosts = new ArrayList(); + for (HostVO host : hosts) { + try { + _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + poolHosts.add(host); + } catch (Exception e) { + s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + } + } + if (poolHosts.isEmpty()) { + s_logger.warn("No host can access storage pool " + dataStore + " in this zone."); + _primaryDataStoreDao.expunge(dataStore.getId()); + throw new CloudRuntimeException("Failed to create storage pool as it is not accessible to hosts."); + } + _dataStoreHelper.attachZone(dataStore, hypervisorType); + return true; + } + + /** + * Put the storage pool in maintenance mode + */ + @Override + public boolean maintain(DataStore store) { + s_logger.info("Placing storage pool [" + store.getName() + "] in maintainence mode"); + if (_storagePoolAutomation.maintain(store)) { + return _dataStoreHelper.maintain(store); + } else { + return false; + } + } + + /** + * Cancel maintenance mode + */ + @Override + public boolean cancelMaintain(DataStore store) { + s_logger.info("Canceling storage pool maintainence for [" + store.getName() + "]"); + if (_dataStoreHelper.cancelMaintain(store)) { + return _storagePoolAutomation.cancelMaintain(store); + } else { + return false; + } + } + + /** + * Delete the data store + */ + @Override + public boolean deleteDataStore(DataStore store) { + s_logger.info("Delete datastore called for [" + store.getName() + "]"); + return _dataStoreHelper.deletePrimaryDataStore(store); + } + + /** + * Migrate objects in this store to another store + */ + @Override + public boolean migrateToObjectStore(DataStore store) { + s_logger.info("Migrate datastore called for [" + store.getName() + "]. This is not currently implemented for this provider at this time"); + return false; + } + + /** + * Update the storage pool configuration + */ + @Override + public void updateStoragePool(StoragePool storagePool, Map details) { + _adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), details); + } + + /** + * Enable the storage pool (allows volumes from this pool) + */ + @Override + public void enableStoragePool(DataStore store) { + s_logger.info("Enabling storage pool [" + store.getName() + "]"); + _dataStoreHelper.enable(store); + } + + /** + * Disable storage pool (stops new volume provisioning from pool) + */ + @Override + public void disableStoragePool(DataStore store) { + s_logger.info("Disabling storage pool [" + store.getName() + "]"); + _dataStoreHelper.disable(store); + } +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java new file mode 100644 index 00000000000..ee5caa7178e --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.provider; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory; +import org.apache.log4j.Logger; + +import com.cloud.utils.crypt.DBEncryptionUtil; +import com.cloud.utils.exception.CloudRuntimeException; + +public class AdaptivePrimaryDatastoreAdapterFactoryMap { + private final Logger logger = Logger.getLogger(ProviderAdapter.class); + private Map factoryMap = new HashMap(); + private Map apiMap = new HashMap(); + + public AdaptivePrimaryDatastoreAdapterFactoryMap() { + + } + + /** + * Given a storage pool return current client. Reconfigure if changes are + * discovered + */ + public final ProviderAdapter getAPI(String uuid, String providerName, Map details) { + ProviderAdapter api = apiMap.get(uuid); + if (api == null) { + synchronized (this) { + api = apiMap.get(uuid); + if (api == null) { + api = createNewAdapter(uuid, providerName, details); + apiMap.put(uuid, api); + logger.debug("Cached the new ProviderAdapter for storage pool " + uuid); + } + } + } + return api; + } + + /** + * Update the API with the given UUID. allows for URL changes and authentication updates + * @param uuid + * @param providerName + * @param details + */ + public final void updateAPI(String uuid, String providerName, Map details) { + // attempt to create (which validates) the new info before updating the cache + ProviderAdapter adapter = createNewAdapter(uuid, providerName, details); + + // if its null its likely because no action has occured yet to trigger the API object to be loaded + if (adapter == null) { + throw new CloudRuntimeException("Adapter configruation failed for an unknown reason"); + } + + ProviderAdapter oldAdapter = apiMap.get(uuid); + apiMap.put(uuid, adapter); + try { + if (oldAdapter != null) oldAdapter.disconnect(); + } catch (Throwable e) { + logger.debug("Failure closing the old ProviderAdapter during an update of the cached data after validation of the new adapter configuration, likely the configuration is no longer valid", e); + } + } + + public void register(ProviderAdapterFactory factory) { + factoryMap.put(factory.getProviderName(), factory); + } + + protected ProviderAdapter createNewAdapter(String uuid, String providerName, Map details) { + String authnType = details.get(ProviderAdapter.API_AUTHENTICATION_TYPE_KEY); + if (authnType == null) authnType = "basicauth"; + String lookupKey = null; + if (authnType.equals("basicauth")) { + lookupKey = details.get(ProviderAdapter.API_USERNAME_KEY); + if (lookupKey == null) { + throw new RuntimeException("Storage provider configuration property [" + ProviderAdapter.API_USERNAME_KEY + "] is required when using authentication type [" + authnType + "]"); + } + } else if (authnType.equals("apitoken")) { + lookupKey = details.get(ProviderAdapter.API_TOKEN_KEY); + if (lookupKey == null) { + throw new RuntimeException("Storage provider configuration property [" + ProviderAdapter.API_TOKEN_KEY + "] is required when using authentication type [" + authnType + "]"); + } + } else { + throw new RuntimeException("Storage provider configuration property [" + ProviderAdapter.API_AUTHENTICATION_TYPE_KEY + "] not set to valid value"); + } + + String url = details.get(ProviderAdapter.API_URL_KEY); + if (url == null) { + throw new RuntimeException("URL required when configuring a Managed Block API storage provider"); + } + + logger.debug("Looking for Provider [" + providerName + "] at [" + url + "]"); + ProviderAdapterFactory factory = factoryMap.get(providerName); + if (factory == null) { + throw new RuntimeException("Unable to find a storage provider API factory for provider: " + providerName); + } + + // decrypt password or token before sending to provider + if (authnType.equals("basicauth")) { + try { + details.put(ProviderAdapter.API_PASSWORD_KEY, DBEncryptionUtil.decrypt(details.get(ProviderAdapter.API_PASSWORD_KEY))); + } catch (Exception e) { + logger.warn("Failed to decrypt managed block API property: [" + ProviderAdapter.API_PASSWORD_KEY + "], trying to use as-is"); + } + } else if (authnType.equals("apitoken")) { + try { + details.put(ProviderAdapter.API_TOKEN_KEY, DBEncryptionUtil.decrypt(details.get(ProviderAdapter.API_TOKEN_KEY))); + } catch (Exception e) { + logger.warn("Failed to decrypt managed block API property: [" + ProviderAdapter.API_TOKEN_KEY + "], trying to use as-is"); + } + } + + ProviderAdapter api = factory.create(url, details); + api.validate(); + logger.debug("Creating new ProviderAdapter object for endpoint: " + providerName + "@" + url); + return api; + } +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreProviderImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreProviderImpl.java new file mode 100644 index 00000000000..200844702b2 --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreProviderImpl.java @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.provider; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.utils.component.ComponentContext; + +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory; +import org.apache.cloudstack.storage.datastore.driver.AdaptiveDataStoreDriverImpl; +import org.apache.cloudstack.storage.datastore.lifecycle.AdaptiveDataStoreLifeCycleImpl; + +@Component +public abstract class AdaptivePrimaryDatastoreProviderImpl implements PrimaryDataStoreProvider { + static final Logger s_logger = Logger.getLogger(AdaptivePrimaryDatastoreProviderImpl.class); + + AdaptiveDataStoreDriverImpl driver; + + HypervisorHostListener listener; + + AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap = new AdaptivePrimaryDatastoreAdapterFactoryMap(); + + DataStoreLifeCycle lifecycle; + + AdaptivePrimaryDatastoreProviderImpl(ProviderAdapterFactory f) { + s_logger.info("Creating " + f.getProviderName()); + factoryMap.register(f); + } + + @Override + public DataStoreLifeCycle getDataStoreLifeCycle() { + return this.lifecycle; + } + + @Override + public boolean configure(Map params) { + s_logger.info("Configuring " + getName()); + driver = new AdaptiveDataStoreDriverImpl(factoryMap); + driver.setProviderName(getName()); + lifecycle = ComponentContext.inject(new AdaptiveDataStoreLifeCycleImpl(factoryMap)); + driver = ComponentContext.inject(driver); + listener = ComponentContext.inject(new AdaptivePrimaryHostListener(factoryMap)); + return true; + } + + @Override + public PrimaryDataStoreDriver getDataStoreDriver() { + return this.driver; + } + + @Override + public HypervisorHostListener getHostListener() { + return this.listener; + } + + @Override + public Set getTypes() { + Set types = new HashSet(); + types.add(DataStoreProviderType.PRIMARY); + return types; + } + +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java new file mode 100644 index 00000000000..68dd4a15c62 --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.provider; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.log4j.Logger; + +import com.cloud.exception.StorageConflictException; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.dao.StoragePoolHostDao; + +public class AdaptivePrimaryHostListener implements HypervisorHostListener { + static final Logger s_logger = Logger.getLogger(AdaptivePrimaryHostListener.class); + + @Inject + StoragePoolHostDao storagePoolHostDao; + + public AdaptivePrimaryHostListener(AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap) { + + } + + @Override + public boolean hostAboutToBeRemoved(long hostId) { + s_logger.debug("hostAboutToBeRemoved called"); + return true; + } + + @Override + public boolean hostAdded(long hostId) { + s_logger.debug("hostAdded called"); + return true; + } + + @Override + public boolean hostConnect(long hostId, long poolId) throws StorageConflictException { + s_logger.debug("hostConnect called for hostid [" + hostId + "], poolId [" + poolId + "]"); + StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); + if (storagePoolHost == null) { + storagePoolHost = new StoragePoolHostVO(poolId, hostId, ""); + storagePoolHostDao.persist(storagePoolHost); + } + return true; + } + + @Override + public boolean hostDisconnected(long hostId, long poolId) { + s_logger.debug("hostDisconnected called for hostid [" + hostId + "], poolId [" + poolId + "]"); + StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); + + if (storagePoolHost != null) { + storagePoolHostDao.deleteStoragePoolHostDetails(hostId, poolId); + } + return true; + } + + @Override + public boolean hostEnabled(long hostId) { + s_logger.debug("hostEnabled called"); + return true; + } + + @Override + public boolean hostRemoved(long hostId, long clusterId) { + s_logger.debug("hostRemoved called"); + return true; + } +} diff --git a/plugins/storage/volume/flasharray/pom.xml b/plugins/storage/volume/flasharray/pom.xml new file mode 100644 index 00000000000..267595b58e9 --- /dev/null +++ b/plugins/storage/volume/flasharray/pom.xml @@ -0,0 +1,52 @@ + + + 4.0.0 + cloud-plugin-storage-volume-flasharray + Apache CloudStack Plugin - Storage Volume - Pure Flash Array + + org.apache.cloudstack + cloudstack-plugins + 4.19.0.0-SNAPSHOT + ../../../pom.xml + + + + org.apache.cloudstack + cloud-plugin-storage-volume-adaptive + ${project.version} + + + + + + maven-surefire-plugin + + + integration-test + + test + + + + + + + diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java new file mode 100644 index 00000000000..3082a19c732 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java @@ -0,0 +1,1086 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.MalformedURLException; +import java.net.URL; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLContext; + +import org.apache.http.Header; +import org.apache.http.NameValuePair; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterContext; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDataObject; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering; +import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeNamer; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStats; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStorageStats; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume.AddressType; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.entity.UrlEncodedFormEntity; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPatch; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.TrustAllStrategy; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.message.BasicNameValuePair; +import org.apache.http.ssl.SSLContextBuilder; +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; + +/** + * Array API + */ +public class FlashArrayAdapter implements ProviderAdapter { + private Logger logger = Logger.getLogger(FlashArrayAdapter.class); + + public static final String HOSTGROUP = "hostgroup"; + public static final String STORAGE_POD = "pod"; + public static final String KEY_TTL = "keyttl"; + public static final String CONNECT_TIMEOUT_MS = "connectTimeoutMs"; + public static final String POST_COPY_WAIT_MS = "postCopyWaitMs"; + public static final String API_LOGIN_VERSION = "apiLoginVersion"; + public static final String API_VERSION = "apiVersion"; + + private static final long KEY_TTL_DEFAULT = (1000 * 60 * 14); + private static final long CONNECT_TIMEOUT_MS_DEFAULT = 600000; + private static final long POST_COPY_WAIT_MS_DEFAULT = 5000; + private static final String API_LOGIN_VERSION_DEFAULT = "1.19"; + private static final String API_VERSION_DEFAULT = "2.23"; + + static final ObjectMapper mapper = new ObjectMapper(); + public String pod = null; + public String hostgroup = null; + private String username; + private String password; + private String accessToken; + private String url; + private long keyExpiration = -1; + private long keyTtl = KEY_TTL_DEFAULT; + private long connTimeout = CONNECT_TIMEOUT_MS_DEFAULT; + private long postCopyWait = POST_COPY_WAIT_MS_DEFAULT; + private CloseableHttpClient _client = null; + private boolean skipTlsValidation; + private String apiLoginVersion = API_LOGIN_VERSION_DEFAULT; + private String apiVersion = API_VERSION_DEFAULT; + + private Map connectionDetails = null; + + protected FlashArrayAdapter(String url, Map details) { + this.url = url; + this.connectionDetails = details; + login(); + } + + @Override + public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, ProviderAdapterDiskOffering offering, long size) { + FlashArrayVolume request = new FlashArrayVolume(); + request.setExternalName( + pod + "::" + ProviderVolumeNamer.generateObjectName(context, dataObject)); + request.setPodName(pod); + request.setAllocatedSizeBytes(roundUp512Boundary(size)); + FlashArrayList list = POST("/volumes?names=" + request.getExternalName() + "&overwrite=false", + request, new TypeReference>() { + }); + + return (ProviderVolume) getFlashArrayItem(list); + } + + /** + * Volumes must be added to a host set to be visable to the hosts. + * the Hostset should contain all the hosts that are membrers of the zone or + * cluster (depending on Cloudstack Storage Pool configuration) + */ + @Override + public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { + String volumeName = normalizeName(pod, dataObject.getExternalName()); + try { + FlashArrayList list = POST("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName, null, new TypeReference> () { }); + + if (list == null || list.getItems() == null || list.getItems().size() == 0) { + throw new RuntimeException("Volume attach did not return lun information"); + } + + FlashArrayConnection connection = (FlashArrayConnection)this.getFlashArrayItem(list); + if (connection.getLun() == null) { + throw new RuntimeException("Volume attach missing lun field"); + } + + return ""+connection.getLun(); + + } catch (Throwable e) { + // the volume is already attached. happens in some scenarios where orchestration creates the volume before copying to it + if (e.toString().contains("Connection already exists")) { + FlashArrayList list = GET("/connections?volume_names=" + volumeName, + new TypeReference>() { + }); + if (list != null && list.getItems() != null) { + return ""+list.getItems().get(0).getLun(); + } else { + throw new RuntimeException("Volume lun is not found in existing connection"); + } + } else { + throw e; + } + } + } + + @Override + public void detach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { + String volumeName = normalizeName(pod, dataObject.getExternalName()); + DELETE("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName); + } + + @Override + public void delete(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { + // public void deleteVolume(String volumeNamespace, String volumeName) { + // first make sure we are disconnected + removeVlunsAll(context, pod, dataObject.getExternalName()); + String fullName = normalizeName(pod, dataObject.getExternalName()); + + FlashArrayVolume volume = new FlashArrayVolume(); + volume.setDestroyed(true); + try { + PATCH("/volumes?names=" + fullName, volume, new TypeReference>() { + }); + } catch (CloudRuntimeException e) { + if (e.toString().contains("Volume does not exist")) { + return; + } else { + throw e; + } + } + } + + @Override + public ProviderVolume getVolume(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { + String externalName = dataObject.getExternalName(); + // if its not set, look for the generated name for some edge cases + if (externalName == null) { + externalName = pod + "::" + ProviderVolumeNamer.generateObjectName(context, dataObject); + } + FlashArrayVolume volume = null; + try { + volume = getVolume(externalName); + // if we didn't get an address back its likely an empty object + if (volume != null && volume.getAddress() == null) { + return null; + } else if (volume == null) { + return null; + } + + populateConnectionId(volume); + + return volume; + } catch (Exception e) { + // assume any exception is a not found. Flash returns 400's for most errors + return null; + } + } + + @Override + public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, AddressType addressType, String address) { + // public FlashArrayVolume getVolumeByWwn(String wwn) { + if (address == null ||addressType == null) { + throw new RuntimeException("Invalid search criteria provided for getVolumeByAddress"); + } + + // only support WWN type addresses at this time. + if (!ProviderVolume.AddressType.FIBERWWN.equals(addressType)) { + throw new RuntimeException( + "Invalid volume address type [" + addressType + "] requested for volume search"); + } + + // convert WWN to serial to search on. strip out WWN type # + Flash OUI value + String serial = address.substring(FlashArrayVolume.PURE_OUI.length() + 1).toUpperCase(); + String query = "serial='" + serial + "'"; + + FlashArrayVolume volume = null; + try { + FlashArrayList list = GET("/volumes?filter=" + query, + new TypeReference>() { + }); + + // if we didn't get an address back its likely an empty object + if (list == null || list.getItems() == null || list.getItems().size() == 0) { + return null; + } + + volume = (FlashArrayVolume)this.getFlashArrayItem(list); + if (volume != null && volume.getAddress() == null) { + return null; + } + + populateConnectionId(volume); + + return volume; + } catch (Exception e) { + // assume any exception is a not found. Flash returns 400's for most errors + return null; + } + } + + private void populateConnectionId(FlashArrayVolume volume) { + // we need to see if there is a connection (lun) associated with this volume. + // note we assume 1 lun for the hostgroup associated with this object + FlashArrayList list = null; + try { + list = GET("/connections?volume_names=" + volume.getExternalName(), + new TypeReference>() { + }); + } catch (CloudRuntimeException e) { + // this means there is no attachment associated with this volume on the array + if (e.toString().contains("Bad Request")) { + return; + } + } + + if (list != null && list.getItems() != null) { + for (FlashArrayConnection conn: list.getItems()) { + if (conn.getHostGroup() != null && conn.getHostGroup().getName().equals(this.hostgroup)) { + volume.setExternalConnectionId(""+conn.getLun()); + break; + } + } + + } + } + + @Override + public void resize(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, long newSizeInBytes) { + // public void resizeVolume(String volumeNamespace, String volumeName, long + // newSizeInBytes) { + FlashArrayVolume volume = new FlashArrayVolume(); + volume.setAllocatedSizeBytes(roundUp512Boundary(newSizeInBytes)); + PATCH("/volumes?names=" + dataObject.getExternalName(), volume, null); + } + + /** + * Take a snapshot and return a Volume representing that snapshot + * + * @param volumeName + * @param snapshotName + * @return + */ + @Override + public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, ProviderAdapterDataObject targetDataObject) { + // public FlashArrayVolume snapshotVolume(String volumeNamespace, String + // volumeName, String snapshotName) { + FlashArrayList list = POST( + "/volume-snapshots?source_names=" + sourceDataObject.getExternalName(), null, + new TypeReference>() { + }); + + return (FlashArrayVolume) getFlashArrayItem(list); + } + + /** + * Replaces the base volume with the given snapshot. Note this can only be done + * when the snapshot and volume + * are + * + * @param name + * @return + */ + @Override + public ProviderVolume revert(ProviderAdapterContext context, ProviderAdapterDataObject snapshotDataObject) { + // public void promoteSnapshot(String namespace, String snapshotName) { + if (snapshotDataObject == null || snapshotDataObject.getExternalName() == null) { + throw new RuntimeException("Snapshot revert not possible as an external snapshot name was not provided"); + } + + FlashArrayVolume snapshot = this.getSnapshot(snapshotDataObject.getExternalName()); + if (snapshot.getSource() == null) { + throw new CloudRuntimeException("Snapshot source was not available from the storage array"); + } + + String origVolumeName = snapshot.getSource().getName(); + + // now "create" a new volume with the snapshot volume as its source (basically a + // Flash array copy) + // and overwrite to true (volume already exists, we are recreating it) + FlashArrayVolume input = new FlashArrayVolume(); + input.setExternalName(origVolumeName); + input.setAllocatedSizeBytes(roundUp512Boundary(snapshot.getAllocatedSizeInBytes())); + input.setSource(new FlashArrayVolumeSource(snapshot.getExternalName())); + POST("/volumes?names=" + origVolumeName + "&overwrite=true", input, null); + + return this.getVolume(origVolumeName); + } + + @Override + public ProviderSnapshot getSnapshot(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { + FlashArrayList list = GET( + "/volume-snapshots?names=" + dataObject.getExternalName(), + new TypeReference>() { + }); + return (FlashArrayVolume) getFlashArrayItem(list); + } + + @Override + public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, ProviderAdapterDataObject destDataObject) { + // private ManagedVolume copy(ManagedVolume sourceVolume, String destNamespace, + // String destName) { + if (sourceDataObject == null || sourceDataObject.getExternalName() == null + ||sourceDataObject.getType() == null) { + throw new RuntimeException("Provided volume has no external source information"); + } + + if (destDataObject == null) { + throw new RuntimeException("Provided volume target information was not provided"); + } + + if (destDataObject.getExternalName() == null) { + // this means its a new volume? so our external name will be the Cloudstack UUID + destDataObject + .setExternalName(ProviderVolumeNamer.generateObjectName(context, destDataObject)); + } + + FlashArrayVolume currentVol; + if (sourceDataObject.getType().equals(ProviderAdapterDataObject.Type.SNAPSHOT)) { + currentVol = getSnapshot(sourceDataObject.getExternalName()); + } else { + currentVol = (FlashArrayVolume) this + .getFlashArrayItem(GET("/volumes?names=" + sourceDataObject.getExternalName(), + new TypeReference>() { + })); + } + + if (currentVol == null) { + throw new RuntimeException("Unable to find current volume to copy from"); + } + + // now "create" a new volume with the snapshot volume as its source (basically a + // Flash array copy) + // and overwrite to true (volume already exists, we are recreating it) + FlashArrayVolume payload = new FlashArrayVolume(); + payload.setExternalName(normalizeName(pod, destDataObject.getExternalName())); + payload.setPodName(pod); + payload.setAllocatedSizeBytes(roundUp512Boundary(currentVol.getAllocatedSizeInBytes())); + payload.setSource(new FlashArrayVolumeSource(sourceDataObject.getExternalName())); + FlashArrayList list = POST( + "/volumes?names=" + payload.getExternalName() + "&overwrite=true", payload, + new TypeReference>() { + }); + FlashArrayVolume outVolume = (FlashArrayVolume) getFlashArrayItem(list); + pause(postCopyWait); + return outVolume; + } + + private void pause(long period) { + try { + Thread.sleep(period); + } catch (InterruptedException e) { + + } + } + + public boolean supportsSnapshotConnection() { + return false; + } + + @Override + public void refresh(Map details) { + this.connectionDetails = details; + this.refreshSession(true); + } + + @Override + public void validate() { + login(); + // check if hostgroup and pod from details really exist - we will + // require a distinct configuration object/connection object for each type + if (this.getHostgroup(hostgroup) == null) { + throw new RuntimeException("Hostgroup [" + hostgroup + "] not found in FlashArray at [" + url + + "], please validate configuration"); + } + + if (this.getVolumeNamespace(pod) == null) { + throw new RuntimeException( + "Pod [" + pod + "] not found in FlashArray at [" + url + "], please validate configuration"); + } + } + + @Override + public void disconnect() { + return; + } + + @Override + public ProviderVolumeStorageStats getManagedStorageStats() { + FlashArrayPod pod = getVolumeNamespace(this.pod); + // just in case + if (pod == null || pod.getFootprint() == 0) { + return null; + } + Long capacityBytes = pod.getQuotaLimit(); + Long usedBytes = pod.getQuotaLimit() - (pod.getQuotaLimit() - pod.getFootprint()); + ProviderVolumeStorageStats stats = new ProviderVolumeStorageStats(); + stats.setCapacityInBytes(capacityBytes); + stats.setActualUsedInBytes(usedBytes); + return stats; + } + + @Override + public ProviderVolumeStats getVolumeStats(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { + ProviderVolume vol = getVolume(dataObject.getExternalName()); + Long usedBytes = vol.getUsedBytes(); + Long allocatedSizeInBytes = vol.getAllocatedSizeInBytes(); + if (usedBytes == null || allocatedSizeInBytes == null) { + return null; + } + ProviderVolumeStats stats = new ProviderVolumeStats(); + stats.setAllocatedInBytes(allocatedSizeInBytes); + stats.setActualUsedInBytes(usedBytes); + return stats; + } + + @Override + public boolean canAccessHost(ProviderAdapterContext context, String hostname) { + if (hostname == null) { + throw new RuntimeException("Unable to validate host access because a hostname was not provided"); + } + + List members = getHostgroupMembers(hostgroup); + + // check for fqdn and shortname combinations. this assumes there is at least a shortname match in both the storage array and cloudstack + // hostname configuration + String shortname; + if (hostname.indexOf('.') > 0) { + shortname = hostname.substring(0, (hostname.indexOf('.'))); + } else { + shortname = hostname; + } + + for (String member : members) { + // exact match (short or long names) + if (member.equals(hostname)) { + return true; + } + + // primera has short name and cloudstack had long name + if (member.equals(shortname)) { + return true; + } + + // member has long name but cloudstack had shortname + if (member.indexOf('.') > 0) { + if (member.substring(0, (member.indexOf('.'))).equals(shortname)) { + return true; + } + } + } + return false; + } + + private String getAccessToken() { + refreshSession(false); + return accessToken; + } + + private synchronized void refreshSession(boolean force) { + try { + if (force || keyExpiration < System.currentTimeMillis()) { + // close client to force connection reset on appliance -- not doing this can + // result in NotAuthorized error...guessing + _client.close(); + ; + _client = null; + login(); + keyExpiration = System.currentTimeMillis() + keyTtl; + } + } catch (Exception e) { + // retry frequently but not every request to avoid DDOS on storage API + logger.warn("Failed to refresh FlashArray API key for " + username + "@" + url + ", will retry in 5 seconds", + e); + keyExpiration = System.currentTimeMillis() + (5 * 1000); + } + } + + private void validateLoginInfo(String urlStr) { + URL urlFull; + try { + urlFull = new URL(urlStr); + } catch (MalformedURLException e) { + throw new RuntimeException("Invalid URL format: " + urlStr, e); + } + ; + + int port = urlFull.getPort(); + if (port <= 0) { + port = 443; + } + this.url = urlFull.getProtocol() + "://" + urlFull.getHost() + ":" + port + urlFull.getPath(); + + Map queryParms = new HashMap(); + if (urlFull.getQuery() != null) { + String[] queryToks = urlFull.getQuery().split("&"); + for (String tok : queryToks) { + if (tok.endsWith("=")) { + continue; + } + int i = tok.indexOf("="); + if (i > 0) { + queryParms.put(tok.substring(0, i), tok.substring(i + 1)); + } + } + } + + pod = connectionDetails.get(FlashArrayAdapter.STORAGE_POD); + if (pod == null) { + pod = queryParms.get(FlashArrayAdapter.STORAGE_POD); + if (pod == null) { + throw new RuntimeException( + FlashArrayAdapter.STORAGE_POD + " paramater/option required to configure this storage pool"); + } + } + + hostgroup = connectionDetails.get(FlashArrayAdapter.HOSTGROUP); + if (hostgroup == null) { + hostgroup = queryParms.get(FlashArrayAdapter.HOSTGROUP); + if (hostgroup == null) { + throw new RuntimeException( + FlashArrayAdapter.STORAGE_POD + " paramater/option required to configure this storage pool"); + } + } + + apiLoginVersion = connectionDetails.get(FlashArrayAdapter.API_LOGIN_VERSION); + if (apiLoginVersion == null) { + apiLoginVersion = queryParms.get(FlashArrayAdapter.API_LOGIN_VERSION); + if (apiLoginVersion == null) { + apiLoginVersion = API_LOGIN_VERSION_DEFAULT; + } + } + + apiVersion = connectionDetails.get(FlashArrayAdapter.API_VERSION); + if (apiVersion == null) { + apiVersion = queryParms.get(FlashArrayAdapter.API_VERSION); + if (apiVersion == null) { + apiVersion = API_VERSION_DEFAULT; + } + } + + String connTimeoutStr = connectionDetails.get(FlashArrayAdapter.CONNECT_TIMEOUT_MS); + if (connTimeoutStr == null) { + connTimeoutStr = queryParms.get(FlashArrayAdapter.CONNECT_TIMEOUT_MS); + } + if (connTimeoutStr == null) { + connTimeout = CONNECT_TIMEOUT_MS_DEFAULT; + } else { + try { + connTimeout = Integer.parseInt(connTimeoutStr); + } catch (NumberFormatException e) { + logger.warn("Connection timeout not formatted correctly, using default", e); + connTimeout = CONNECT_TIMEOUT_MS_DEFAULT; + } + } + + String keyTtlString = connectionDetails.get(FlashArrayAdapter.KEY_TTL); + if (keyTtlString == null) { + keyTtlString = queryParms.get(FlashArrayAdapter.KEY_TTL); + } + if (keyTtlString == null) { + keyTtl = KEY_TTL_DEFAULT; + } else { + try { + keyTtl = Integer.parseInt(keyTtlString); + } catch (NumberFormatException e) { + logger.warn("Key TTL not formatted correctly, using default", e); + keyTtl = KEY_TTL_DEFAULT; + } + } + + String copyWaitStr = connectionDetails.get(FlashArrayAdapter.POST_COPY_WAIT_MS); + if (copyWaitStr == null) { + copyWaitStr = queryParms.get(FlashArrayAdapter.POST_COPY_WAIT_MS); + } + if (copyWaitStr == null) { + postCopyWait = POST_COPY_WAIT_MS_DEFAULT; + } else { + try { + postCopyWait = Integer.parseInt(copyWaitStr); + } catch (NumberFormatException e) { + logger.warn("Key TTL not formatted correctly, using default", e); + postCopyWait = KEY_TTL_DEFAULT; + } + } + + String skipTlsValidationStr = connectionDetails.get(ProviderAdapter.API_SKIP_TLS_VALIDATION_KEY); + if (skipTlsValidationStr == null) { + skipTlsValidationStr = queryParms.get(ProviderAdapter.API_SKIP_TLS_VALIDATION_KEY); + } + + if (skipTlsValidationStr != null) { + skipTlsValidation = Boolean.parseBoolean(skipTlsValidationStr); + } else { + skipTlsValidation = true; + } + } + + /** + * Login to the array and get an access token + */ + private void login() { + username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY); + password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY); + String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY); + validateLoginInfo(urlStr); + CloseableHttpResponse response = null; + try { + HttpPost request = new HttpPost(url + "/" + apiLoginVersion + "/auth/apitoken"); + // request.addHeader("Content-Type", "application/json"); + // request.addHeader("Accept", "application/json"); + ArrayList postParms = new ArrayList(); + postParms.add(new BasicNameValuePair("username", username)); + postParms.add(new BasicNameValuePair("password", password)); + request.setEntity(new UrlEncodedFormEntity(postParms, "UTF-8")); + CloseableHttpClient client = getClient(); + response = (CloseableHttpResponse) client.execute(request); + + int statusCode = response.getStatusLine().getStatusCode(); + FlashArrayApiToken apitoken = null; + if (statusCode == 200 | statusCode == 201) { + apitoken = mapper.readValue(response.getEntity().getContent(), FlashArrayApiToken.class); + if (apitoken == null) { + throw new CloudRuntimeException( + "Authentication responded successfully but no api token was returned"); + } + } else if (statusCode == 401 || statusCode == 403) { + throw new CloudRuntimeException( + "Authentication or Authorization to FlashArray [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else { + throw new CloudRuntimeException( + "Unexpected HTTP response code from FlashArray [" + url + "] - [" + statusCode + + "] - " + response.getStatusLine().getReasonPhrase()); + } + + // now we need to get the access token + request = new HttpPost(url + "/" + apiVersion + "/login"); + request.addHeader("api-token", apitoken.getApiToken()); + response = (CloseableHttpResponse) client.execute(request); + + statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200 | statusCode == 201) { + Header[] headers = response.getHeaders("x-auth-token"); + if (headers == null || headers.length == 0) { + throw new CloudRuntimeException( + "Getting access token responded successfully but access token was not available"); + } + accessToken = headers[0].getValue(); + } else if (statusCode == 401 || statusCode == 403) { + throw new CloudRuntimeException( + "Authentication or Authorization to FlashArray [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else { + throw new CloudRuntimeException( + "Unexpected HTTP response code from FlashArray [" + url + "] - [" + statusCode + + "] - " + response.getStatusLine().getReasonPhrase()); + } + + } catch (UnsupportedEncodingException e) { + throw new CloudRuntimeException("Error creating input for login, check username/password encoding"); + } catch (UnsupportedOperationException e) { + throw new CloudRuntimeException("Error processing login response from FlashArray [" + url + "]", e); + } catch (IOException e) { + throw new CloudRuntimeException("Error sending login request to FlashArray [" + url + "]", e); + } finally { + try { + if (response != null) { + response.close(); + } + } catch (IOException e) { + logger.debug("Error closing response from login attempt to FlashArray", e); + } + } + } + + private void removeVlunsAll(ProviderAdapterContext context, String volumeNamespace, String volumeName) { + volumeName = normalizeName(volumeNamespace, volumeName); + FlashArrayList list = null; + + try { + list = GET("/connections?volume_names=" + volumeName, + new TypeReference>() { + }); + } catch (CloudRuntimeException e) { + // this means the volume being deleted no longer exists so no connections can be + // searched + if (e.toString().contains("Bad Request")) { + return; + } + } + + if (list != null && list.getItems() != null) { + for (FlashArrayConnection conn : list.getItems()) { + DELETE("/connections?host_group_names=" + conn.getHostGroup().getName() + "&volume_names=" + volumeName); + } + } + } + + private FlashArrayVolume getVolume(String volumeName) { + FlashArrayList list = GET("/volumes?names=" + volumeName, + new TypeReference>() { + }); + return (FlashArrayVolume) getFlashArrayItem(list); + } + + private FlashArrayPod getVolumeNamespace(String name) { + FlashArrayList list = GET("/pods?names=" + name, new TypeReference>() { + }); + return (FlashArrayPod) getFlashArrayItem(list); + } + + private FlashArrayHostgroup getHostgroup(String name) { + FlashArrayList list = GET("/host-groups?name=" + name, + new TypeReference>() { + }); + return (FlashArrayHostgroup) getFlashArrayItem(list); + } + + private List getHostgroupMembers(String groupname) { + FlashArrayGroupMemberReferenceList list = GET("/hosts/host-groups?group_names=" + groupname, + new TypeReference() { + }); + if (list == null || list.getItems().size() == 0) { + return null; + } + List hostnames = new ArrayList(); + for (FlashArrayGroupMemberReference ref : list.getItems()) { + hostnames.add(ref.getMember().getName()); + } + return hostnames; + } + + private FlashArrayVolume getSnapshot(String snapshotName) { + FlashArrayList list = GET("/volume-snapshots?names=" + snapshotName, + new TypeReference>() { + }); + return (FlashArrayVolume) getFlashArrayItem(list); + } + + private Object getFlashArrayItem(FlashArrayList list) { + if (list.getItems() != null && list.getItems().size() > 0) { + return list.getItems().get(0); + } else { + return null; + } + } + + private String normalizeName(String volumeNamespace, String volumeName) { + if (!volumeName.contains("::")) { + if (volumeNamespace != null) { + volumeName = volumeNamespace + "::" + volumeName; + } + } + return volumeName; + } + + @SuppressWarnings("unchecked") + private T POST(String path, Object input, final TypeReference type) { + CloseableHttpResponse response = null; + try { + this.refreshSession(false); + HttpPost request = new HttpPost(url + "/" + apiVersion + path); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.addHeader("X-auth-token", getAccessToken()); + if (input != null) { + try { + String data = mapper.writeValueAsString(input); + request.setEntity(new StringEntity(data)); + } catch (UnsupportedEncodingException | JsonProcessingException e) { + throw new CloudRuntimeException( + "Error processing request payload to [" + url + "] for path [" + path + "]", e); + } + } + + CloseableHttpClient client = getClient(); + try { + response = (CloseableHttpResponse) client + .execute(request); + } catch (IOException e) { + throw new CloudRuntimeException("Error sending request to FlashArray [" + url + path + "]", e); + } + + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200 || statusCode == 201) { + try { + if (type != null) { + Header header = response.getFirstHeader("Location"); + if (type.getType().getTypeName().equals(String.class.getName())) { + if (header != null) { + return (T) header.getValue(); + } else { + return null; + } + } else { + return mapper.readValue(response.getEntity().getContent(), type); + } + } + return null; + } catch (UnsupportedOperationException | IOException e) { + throw new CloudRuntimeException("Error processing response from FlashArray [" + url + path + "]", e); + } + } else if (statusCode == 400) { + try { + Map payload = mapper.readValue(response.getEntity().getContent(), + new TypeReference>() { + }); + throw new CloudRuntimeException("Invalid request error 400: " + payload); + } catch (UnsupportedOperationException | IOException e) { + throw new CloudRuntimeException( + "Error processing bad request response from FlashArray [" + url + path + "]", e); + } + } else if (statusCode == 401 || statusCode == 403) { + throw new CloudRuntimeException( + "Authentication or Authorization to FlashArray [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else { + try { + Map payload = mapper.readValue(response.getEntity().getContent(), + new TypeReference>() { + }); + throw new CloudRuntimeException("Invalid request error " + statusCode + ": " + payload); + } catch (UnsupportedOperationException | IOException e) { + throw new CloudRuntimeException( + "Unexpected HTTP response code from FlashArray on POST [" + url + path + "] - [" + + statusCode + "] - " + response.getStatusLine().getReasonPhrase()); + } + } + } finally { + if (response != null) { + try { + response.close(); + } catch (IOException e) { + logger.debug("Unexpected failure closing response to FlashArray API", e); + } + } + } + } + + private T PATCH(String path, Object input, final TypeReference type) { + CloseableHttpResponse response = null; + try { + this.refreshSession(false); + HttpPatch request = new HttpPatch(url + "/" + apiVersion + path); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.addHeader("X-auth-token", getAccessToken()); + String data = mapper.writeValueAsString(input); + request.setEntity(new StringEntity(data)); + + CloseableHttpClient client = getClient(); + response = (CloseableHttpResponse) client.execute(request); + + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200 || statusCode == 201) { + if (type != null) + return mapper.readValue(response.getEntity().getContent(), type); + return null; + } else if (statusCode == 400) { + Map payload = mapper.readValue(response.getEntity().getContent(), + new TypeReference>() { + }); + throw new CloudRuntimeException("Invalid request error 400: " + payload); + } else if (statusCode == 401 || statusCode == 403) { + throw new CloudRuntimeException( + "Authentication or Authorization to FlashArray [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else { + Map payload = mapper.readValue(response.getEntity().getContent(), + new TypeReference>() { + }); + throw new CloudRuntimeException( + "Invalid request error from FlashArray on PUT [" + url + path + "]" + statusCode + ": " + + response.getStatusLine().getReasonPhrase() + " - " + payload); + } + } catch (UnsupportedEncodingException | JsonProcessingException e) { + throw new CloudRuntimeException( + "Error processing request payload to [" + url + "] for path [" + path + "]", e); + } catch (UnsupportedOperationException e) { + throw new CloudRuntimeException("Error processing bad request response from FlashArray [" + url + "]", + e); + } catch (IOException e) { + throw new CloudRuntimeException("Error sending request to FlashArray [" + url + "]", e); + + } finally { + if (response != null) { + try { + response.close(); + } catch (IOException e) { + logger.debug("Unexpected failure closing response to FlashArray API", e); + } + } + } + + } + + private T GET(String path, final TypeReference type) { + CloseableHttpResponse response = null; + try { + this.refreshSession(false); + HttpGet request = new HttpGet(url + "/" + apiVersion + path); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.addHeader("X-auth-token", getAccessToken()); + + CloseableHttpClient client = getClient(); + response = (CloseableHttpResponse) client.execute(request); + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200) { + try { + return mapper.readValue(response.getEntity().getContent(), type); + } catch (UnsupportedOperationException | IOException e) { + throw new CloudRuntimeException("Error processing response from FlashArray [" + url + "]", e); + } + } else if (statusCode == 401 || statusCode == 403) { + throw new CloudRuntimeException( + "Authentication or Authorization to FlashArray [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else { + throw new CloudRuntimeException( + "Unexpected HTTP response code from FlashArray on GET [" + request.getURI() + "] - [" + + statusCode + "] - " + response.getStatusLine().getReasonPhrase()); + } + } catch (IOException e) { + throw new CloudRuntimeException("Error sending request to FlashArray [" + url + "]", e); + } catch (UnsupportedOperationException e) { + throw new CloudRuntimeException("Error processing response from FlashArray [" + url + "]", e); + } finally { + if (response != null) { + try { + response.close(); + } catch (IOException e) { + logger.debug("Unexpected failure closing response to FlashArray API", e); + } + } + } + } + + private void DELETE(String path) { + CloseableHttpResponse response = null; + try { + this.refreshSession(false); + HttpDelete request = new HttpDelete(url + "/" + apiVersion + path); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.addHeader("X-auth-token", getAccessToken()); + + CloseableHttpClient client = getClient(); + response = (CloseableHttpResponse) client.execute(request); + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200 || statusCode == 404 || statusCode == 400) { + // this means the volume was deleted successfully, or doesn't exist (effective + // delete), or + // the volume name is malformed or too long - meaning it never got created to + // begin with (effective delete) + return; + } else if (statusCode == 401 || statusCode == 403) { + throw new CloudRuntimeException( + "Authentication or Authorization to FlashArray [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else if (statusCode == 409) { + throw new CloudRuntimeException( + "The volume cannot be deleted at this time due to existing dependencies. Validate that all snapshots associated with this volume have been deleted and try again."); + } else { + throw new CloudRuntimeException( + "Unexpected HTTP response code from FlashArray on DELETE [" + url + path + "] - [" + + statusCode + "] - " + response.getStatusLine().getReasonPhrase()); + } + } catch (IOException e) { + throw new CloudRuntimeException("Error sending request to FlashArray [" + url + "]", e); + } finally { + if (response != null) { + try { + response.close(); + } catch (IOException e) { + logger.debug("Unexpected failure closing response to FlashArray API", e); + } + } + } + } + + private CloseableHttpClient getClient() { + if (_client == null) { + RequestConfig config = RequestConfig.custom() + .setConnectTimeout((int) connTimeout) + .setConnectionRequestTimeout((int) connTimeout) + .setSocketTimeout((int) connTimeout).build(); + + HostnameVerifier verifier = null; + SSLContext sslContext = null; + + if (this.skipTlsValidation) { + try { + verifier = NoopHostnameVerifier.INSTANCE; + sslContext = new SSLContextBuilder().loadTrustMaterial(null, TrustAllStrategy.INSTANCE).build(); + } catch (KeyManagementException e) { + throw new CloudRuntimeException(e); + } catch (NoSuchAlgorithmException e) { + throw new CloudRuntimeException(e); + } catch (KeyStoreException e) { + throw new CloudRuntimeException(e); + } + } + + _client = HttpClients.custom() + .setDefaultRequestConfig(config) + .setSSLHostnameVerifier(verifier) + .setSSLContext(sslContext) + .build(); + } + return _client; + } + + /** + * pure array requires volume sizes in multiples of 512...we'll just round up to + * next 512 boundary + * + * @param sizeInBytes + * @return + */ + private Long roundUp512Boundary(Long sizeInBytes) { + Long remainder = sizeInBytes % 512; + if (remainder > 0) { + sizeInBytes = sizeInBytes + (512 - remainder); + } + return sizeInBytes; + } +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapterFactory.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapterFactory.java new file mode 100644 index 00000000000..d1c3cee8fa8 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapterFactory.java @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import java.util.Map; + +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory; + +public class FlashArrayAdapterFactory implements ProviderAdapterFactory { + + @Override + public String getProviderName() { + return "Flash Array"; + } + + @Override + public ProviderAdapter create(String url, Map details) { + return new FlashArrayAdapter(url, details); + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayApiToken.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayApiToken.java new file mode 100644 index 00000000000..0f1e133cb5b --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayApiToken.java @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayApiToken { + @JsonProperty("api_token") + private String apiToken; + public void setApiToken(String apiToken) { + this.apiToken = apiToken; + } + public String getApiToken() { + return apiToken; + } +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnection.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnection.java new file mode 100644 index 00000000000..76cec9f70c4 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnection.java @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayConnection { + @JsonProperty("host_group") + private FlashArrayConnectionHostgroup hostGroup; + @JsonProperty("host") + private FlashArrayConnectionHost host; + @JsonProperty("volume") + private FlashArrayVolume volume; + @JsonProperty("lun") + private Integer lun; + + public FlashArrayConnectionHostgroup getHostGroup() { + return hostGroup; + } + + public void setHostGroup(FlashArrayConnectionHostgroup hostGroup) { + this.hostGroup = hostGroup; + } + + public FlashArrayConnectionHost getHost() { + return host; + } + + public void setHost(FlashArrayConnectionHost host) { + this.host = host; + } + + public FlashArrayVolume getVolume() { + return volume; + } + + public void setVolume(FlashArrayVolume volume) { + this.volume = volume; + } + + public Integer getLun() { + return lun; + } + + public void setLun(Integer lun) { + this.lun = lun; + } + + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnectionHost.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnectionHost.java new file mode 100644 index 00000000000..27dcf08ab21 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnectionHost.java @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayConnectionHost { + @JsonProperty("name") + private String name; + public FlashArrayConnectionHost() {} + public FlashArrayConnectionHost(String name) { + this.name = name; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnectionHostgroup.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnectionHostgroup.java new file mode 100644 index 00000000000..27a0f60cbae --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnectionHostgroup.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayConnectionHostgroup { + @JsonProperty("name") + private String name; + + public FlashArrayConnectionHostgroup() {} + public FlashArrayConnectionHostgroup(String name) { + this.name = name; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayGroupMemberReference.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayGroupMemberReference.java new file mode 100644 index 00000000000..f0f6d9e57fb --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayGroupMemberReference.java @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayGroupMemberReference { + @JsonProperty("group") + private FlashArrayGroupNameWrapper group; + @JsonProperty("member") + private FlashArrayGroupMemberNameWrapper member; + + public static class FlashArrayGroupNameWrapper { + @JsonProperty("name") + private String name; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + } + + public static class FlashArrayGroupMemberNameWrapper { + @JsonProperty("name") + private String name; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + } + + public FlashArrayGroupNameWrapper getGroup() { + return group; + } + + public void setGroup(FlashArrayGroupNameWrapper group) { + this.group = group; + } + + public FlashArrayGroupMemberNameWrapper getMember() { + return member; + } + + public void setMember(FlashArrayGroupMemberNameWrapper member) { + this.member = member; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayGroupMemberReferenceList.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayGroupMemberReferenceList.java new file mode 100644 index 00000000000..b17c8a5b1f9 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayGroupMemberReferenceList.java @@ -0,0 +1,38 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; +import java.util.ArrayList; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayGroupMemberReferenceList { + @JsonProperty("items") + private ArrayList items; + + public ArrayList getItems() { + return items; + } + + public void setItems(ArrayList items) { + this.items = items; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHostgroup.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHostgroup.java new file mode 100644 index 00000000000..1a2e3911e24 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHostgroup.java @@ -0,0 +1,58 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayHostgroup { + @JsonProperty("name") + private String name; + @JsonProperty("connection_count") + private Long connectionCount; + @JsonProperty("host_count") + private Long hostCount; + @JsonProperty("is_local") + private Boolean local; + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public Long getConnectionCount() { + return connectionCount; + } + public void setConnectionCount(Long connectionCount) { + this.connectionCount = connectionCount; + } + public Long getHostCount() { + return hostCount; + } + public void setHostCount(Long hostCount) { + this.hostCount = hostCount; + } + public Boolean getLocal() { + return local; + } + public void setLocal(Boolean local) { + this.local = local; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayList.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayList.java new file mode 100644 index 00000000000..992c3fc8b67 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayList.java @@ -0,0 +1,60 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayList { + @JsonProperty("more_items_remaining") + private Boolean moreItemsRemaining; + @JsonProperty("total_item_count") + private Integer totalItemCount; + @JsonProperty("continuation_token") + private String continuationToken; + @JsonProperty("items") + private List items; + public Boolean getMoreItemsRemaining() { + return moreItemsRemaining; + } + public void setMoreItemsRemaining(Boolean moreItemsRemaining) { + this.moreItemsRemaining = moreItemsRemaining; + } + public Integer getTotalItemCount() { + return totalItemCount; + } + public void setTotalItemCount(Integer totalItemCount) { + this.totalItemCount = totalItemCount; + } + public String getContinuationToken() { + return continuationToken; + } + public void setContinuationToken(String continuationToken) { + this.continuationToken = continuationToken; + } + public List getItems() { + return items; + } + public void setItems(List items) { + this.items = items; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayPod.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayPod.java new file mode 100644 index 00000000000..ddbfc298df4 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayPod.java @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayPod { + @JsonProperty("name") + private String name; + @JsonProperty("id") + private String id; + @JsonProperty("destroyed") + private Boolean destroyed; + @JsonProperty("footprint") + private Long footprint; + @JsonProperty("quota_limit") + private Long quotaLimit; + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public String getId() { + return id; + } + public void setId(String id) { + this.id = id; + } + public Boolean getDestroyed() { + return destroyed; + } + public void setDestroyed(Boolean destroyed) { + this.destroyed = destroyed; + } + public Long getFootprint() { + return footprint; + } + public void setFootprint(Long footprint) { + this.footprint = footprint; + } + public Long getQuotaLimit() { + return quotaLimit; + } + public void setQuotaLimit(Long quotaLimit) { + this.quotaLimit = quotaLimit; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayTag.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayTag.java new file mode 100644 index 00000000000..685d4e1f1cf --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayTag.java @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayTag { + @JsonProperty("copyable") + private Boolean copyable; + @JsonProperty("key") + private String key; + @JsonProperty("namespace") + private String namespace; + @JsonProperty("value") + private String value; + + public FlashArrayTag() { + + } + + public FlashArrayTag(String namespace, String key, String value) { + this.key = key; + this.namespace = namespace; + this.value = value; + } + + public Boolean getCopyable() { + return copyable; + } + + public void setCopyable(Boolean copyable) { + this.copyable = copyable; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public String getNamespace() { + return namespace; + } + + public void setNamespace(String namespace) { + this.namespace = namespace; + } + + public String getValue() { + return value; + } + + public void setValue(String value) { + this.value = value; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayTagList.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayTagList.java new file mode 100644 index 00000000000..7a23343a647 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayTagList.java @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayTagList { + @JsonProperty("tags") + public List tags; + + public List getTags() { + return tags; + } + + public void setTags(List tags) { + this.tags = tags; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolume.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolume.java new file mode 100644 index 00000000000..f939d70a77f --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolume.java @@ -0,0 +1,253 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayVolume implements ProviderSnapshot { + public static final String PURE_OUI = "24a9370"; + + @JsonProperty("destroyed") + private Boolean destroyed; + /** The virtual size requested for this volume */ + @JsonProperty("provisioned") + private Long allocatedSizeBytes; + @JsonIgnore + private String id; + @JsonIgnore // we don't use the Cloudstack user name at all + private String name; + @JsonIgnore + private String shortExternalName; + @JsonProperty("pod") + private FlashArrayVolumePod pod; + @JsonProperty("priority") + private Integer priority; + @JsonProperty("promotion_status") + private String promotionStatus; + @JsonProperty("subtype") + private String subtype; + @JsonProperty("space") + private FlashArrayVolumeSpace space; + @JsonProperty("source") + private FlashArrayVolumeSource source; + @JsonProperty("serial") + private String serial; + @JsonProperty("name") + private String externalName; + @JsonProperty("id") + private String externalUuid; + @JsonIgnore + private AddressType addressType; + @JsonIgnore + private String connectionId; + + public FlashArrayVolume() { + this.addressType = AddressType.FIBERWWN; + } + + @Override + public Boolean isDestroyed() { + return destroyed; + } + @Override + @JsonIgnore + public String getId() { + return id; + } + @Override + @JsonIgnore + public String getName() { + return name; + } + @JsonIgnore + public String getPodName() { + if (pod != null) { + return pod.getName(); + } else { + return null; + } + } + @Override + @JsonIgnore + public Integer getPriority() { + return priority; + } + @Override + @JsonIgnore + public String getState() { + return null; + } + @Override + @JsonIgnore + public AddressType getAddressType() { + return addressType; + } + @Override + @JsonIgnore + public String getAddress() { + if (serial == null) return null; + return ("6" + PURE_OUI + serial).toLowerCase(); + } + @Override + public String getExternalConnectionId() { + return connectionId; + } + + @JsonIgnore + public void setExternalConnectionId(String externalConnectionId) { + this.connectionId = externalConnectionId; + } + + @Override + public void setId(String id) { + this.id = id; + } + @Override + public void setName(String name) { + this.name = name; + } + public void setPodName(String podname) { + FlashArrayVolumePod pod = new FlashArrayVolumePod(); + pod.setName(podname); + this.pod = pod; + } + @Override + public void setPriority(Integer priority) { + this.priority = priority; + } + @Override + public void setAddressType(AddressType addressType) { + this.addressType = addressType; + } + @Override + @JsonIgnore + public Long getAllocatedSizeInBytes() { + return this.allocatedSizeBytes; + } + public void setAllocatedSizeBytes(Long size) { + this.allocatedSizeBytes = size; + } + @Override + @JsonIgnore + public Long getUsedBytes() { + if (space != null) { + return space.getVirtual(); + } else { + return null; + } + } + + public void setDestroyed(Boolean destroyed) { + this.destroyed = destroyed; + } + public FlashArrayVolumeSource getSource() { + return source; + } + public void setSource(FlashArrayVolumeSource source) { + this.source = source; + } + @Override + public String getExternalUuid() { + return externalUuid; + } + @Override + public String getExternalName() { + return externalName; + } + + public void setExternalUuid(String uuid) { + this.externalUuid = uuid; + } + + public void setExternalName(String name) { + this.externalName = name; + } + @Override + public Boolean canAttachDirectly() { + return false; + } + public String getConnectionId() { + return connectionId; + } + public void setConnectionId(String connectionId) { + this.connectionId = connectionId; + } + + public Boolean getDestroyed() { + return destroyed; + } + + public Long getAllocatedSizeBytes() { + return allocatedSizeBytes; + } + + public String getShortExternalName() { + return shortExternalName; + } + + public void setShortExternalName(String shortExternalName) { + this.shortExternalName = shortExternalName; + } + + public FlashArrayVolumePod getPod() { + return pod; + } + + public void setPod(FlashArrayVolumePod pod) { + this.pod = pod; + } + + public String getPromotionStatus() { + return promotionStatus; + } + + public void setPromotionStatus(String promotionStatus) { + this.promotionStatus = promotionStatus; + } + + public String getSubtype() { + return subtype; + } + + public void setSubtype(String subtype) { + this.subtype = subtype; + } + + public FlashArrayVolumeSpace getSpace() { + return space; + } + + public void setSpace(FlashArrayVolumeSpace space) { + this.space = space; + } + + public String getSerial() { + return serial; + } + + public void setSerial(String serial) { + this.serial = serial; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumePod.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumePod.java new file mode 100644 index 00000000000..1e46441e7d1 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumePod.java @@ -0,0 +1,43 @@ + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayVolumePod { + @JsonProperty("id") + private String id; + @JsonProperty("name") + private String name; + + public String getId() { + return id; + } + public void setId(String id) { + this.id = id; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumeSource.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumeSource.java new file mode 100644 index 00000000000..9bc8dec0016 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumeSource.java @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayVolumeSource { + @JsonProperty("id") + private String id; + @JsonProperty("name") + private String name; + public FlashArrayVolumeSource() { } + public FlashArrayVolumeSource(String sourceName) { + this.name = sourceName; + } + public String getId() { + return id; + } + public void setId(String id) { + this.id = id; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumeSpace.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumeSpace.java new file mode 100644 index 00000000000..95e148ce89f --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumeSpace.java @@ -0,0 +1,122 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayVolumeSpace { + @JsonProperty("data_reduction") + private Float dataReduction; + @JsonProperty("snapshots") + private Integer snapshots; + @JsonProperty("snapshots_effective") + private Integer snapshotsEffective; + @JsonProperty("thin_provisioning") + private Float thinProvisioning; + @JsonProperty("total_effective") + private Long totalEffective; + @JsonProperty("total_physical") + private Long totalPhysical; + @JsonProperty("total_provisioned") + private Long totalProvisioned; + @JsonProperty("total_reduction") + private Float totalReduction; + @JsonProperty("unique") + private Long unique; + @JsonProperty("unique_effective") + private Long uniqueEffective; + @JsonProperty("user_provisioned") + private Long usedProvisioned; + @JsonProperty("virtual") + private Long virtual; + public Float getData_reduction() { + return dataReduction; + } + public void setData_reduction(Float dataReduction) { + this.dataReduction = dataReduction; + } + public Integer getSnapshots() { + return snapshots; + } + public void setSnapshots(Integer snapshots) { + this.snapshots = snapshots; + } + public Integer getSnapshotsEffective() { + return snapshotsEffective; + } + public void setSnapshotsEffective(Integer snapshotsEffective) { + this.snapshotsEffective = snapshotsEffective; + } + public Float getThinProvisioning() { + return thinProvisioning; + } + public void setThinProvisioning(Float thinProvisioning) { + this.thinProvisioning = thinProvisioning; + } + public Long getTotalEffective() { + return totalEffective; + } + public void setTotalEffective(Long totalEffective) { + this.totalEffective = totalEffective; + } + public Long getTotalPhysical() { + return totalPhysical; + } + public void setTotal_physical(Long totalPhysical) { + this.totalPhysical = totalPhysical; + } + public Long getTotalProvisioned() { + return totalProvisioned; + } + public void setTotalProvisioned(Long totalProvisioned) { + this.totalProvisioned = totalProvisioned; + } + public Float getTotalReduction() { + return totalReduction; + } + public void setTotalReduction(Float totalReduction) { + this.totalReduction = totalReduction; + } + public Long getUnique() { + return unique; + } + public void setUnique(Long unique) { + this.unique = unique; + } + public Long getUniqueEffective() { + return uniqueEffective; + } + public void setUniqueEffective(Long uniqueEffective) { + this.uniqueEffective = uniqueEffective; + } + public Long getUsedProvisioned() { + return usedProvisioned; + } + public void setUsed_provisioned(Long usedProvisioned) { + this.usedProvisioned = usedProvisioned; + } + public Long getVirtual() { + return virtual; + } + public void setVirtual(Long virtual) { + this.virtual = virtual; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/provider/FlashArrayPrimaryDatastoreProviderImpl.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/provider/FlashArrayPrimaryDatastoreProviderImpl.java new file mode 100644 index 00000000000..0750ef2cc27 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/provider/FlashArrayPrimaryDatastoreProviderImpl.java @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.provider; + +import org.apache.cloudstack.storage.datastore.adapter.flasharray.FlashArrayAdapterFactory; + +public class FlashArrayPrimaryDatastoreProviderImpl extends AdaptivePrimaryDatastoreProviderImpl { + + public FlashArrayPrimaryDatastoreProviderImpl() { + super(new FlashArrayAdapterFactory()); + } + + @Override + public String getName() { + return "Flash Array"; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/resources/META-INF/cloudstack/storage-volume-flasharray/module.properties b/plugins/storage/volume/flasharray/src/main/resources/META-INF/cloudstack/storage-volume-flasharray/module.properties new file mode 100644 index 00000000000..ac3c1e20b08 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/resources/META-INF/cloudstack/storage-volume-flasharray/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=storage-volume-flasharray +parent=storage diff --git a/plugins/storage/volume/flasharray/src/main/resources/META-INF/cloudstack/storage-volume-flasharray/spring-storage-volume-flasharray-context.xml b/plugins/storage/volume/flasharray/src/main/resources/META-INF/cloudstack/storage-volume-flasharray/spring-storage-volume-flasharray-context.xml new file mode 100644 index 00000000000..030e9def26d --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/resources/META-INF/cloudstack/storage-volume-flasharray/spring-storage-volume-flasharray-context.xml @@ -0,0 +1,35 @@ + + + + + + diff --git a/plugins/storage/volume/primera/pom.xml b/plugins/storage/volume/primera/pom.xml new file mode 100644 index 00000000000..da345eeb173 --- /dev/null +++ b/plugins/storage/volume/primera/pom.xml @@ -0,0 +1,52 @@ + + + 4.0.0 + cloud-plugin-storage-volume-primera + Apache CloudStack Plugin - Storage Volume - HPE Primera + + org.apache.cloudstack + cloudstack-plugins + 4.19.0.0-SNAPSHOT + ../../../pom.xml + + + + org.apache.cloudstack + cloud-plugin-storage-volume-adaptive + ${project.version} + + + + + + maven-surefire-plugin + + + integration-test + + test + + + + + + + diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java new file mode 100644 index 00000000000..69f98567f72 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java @@ -0,0 +1,930 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.MalformedURLException; +import java.net.URL; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLContext; + +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterContext; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDataObject; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering; +import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume.AddressType; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeNamer; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStats; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStorageStats; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering.ProvisioningType; +import org.apache.http.Header; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.TrustAllStrategy; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.ssl.SSLContextBuilder; +import org.apache.log4j.Logger; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; + +public class PrimeraAdapter implements ProviderAdapter { + + static final Logger logger = Logger.getLogger(PrimeraAdapter.class); + + public static final String HOSTSET = "hostset"; + public static final String CPG = "cpg"; + public static final String SNAP_CPG = "snapCpg"; + public static final String KEY_TTL = "keyttl"; + public static final String CONNECT_TIMEOUT_MS = "connectTimeoutMs"; + public static final String POST_COPY_WAIT_MS = "postCopyWaitMs"; + public static final String TASK_WAIT_TIMEOUT_MS = "taskWaitTimeoutMs"; + + private static final long KEY_TTL_DEFAULT = (1000 * 60 * 14); + private static final long CONNECT_TIMEOUT_MS_DEFAULT = 600000; + private static final long TASK_WAIT_TIMEOUT_MS_DEFAULT = 10 * 60 * 1000; + public static final long BYTES_IN_MiB = 1048576; + + static final ObjectMapper mapper = new ObjectMapper(); + public String cpg = null; + public String snapCpg = null; + public String hostset = null; + private String username; + private String password; + private String key; + private String url; + private long keyExpiration = -1; + private long keyTtl = KEY_TTL_DEFAULT; + private long connTimeout = CONNECT_TIMEOUT_MS_DEFAULT; + private long taskWaitTimeoutMs = TASK_WAIT_TIMEOUT_MS_DEFAULT; + private CloseableHttpClient _client = null; + private boolean skipTlsValidation; + + private Map connectionDetails = null; + + public PrimeraAdapter(String url, Map details) { + this.url = url; + this.connectionDetails = details; + login(); + } + + @Override + public void refresh(Map details) { + this.connectionDetails = details; + this.refreshSession(true); + } + + /** + * Validate that the hostgroup and pod from the details data exists. Each + * configuration object/connection needs a distinct set of these 2 things. + */ + @Override + public void validate() { + login(); + if (this.getHostset(hostset) == null) { + throw new RuntimeException("Hostgroup [" + hostset + "] not found in FlashArray at [" + url + + "], please validate configuration"); + } + + if (this.getCpg(cpg) == null) { + throw new RuntimeException( + "Pod [" + cpg + "] not found in FlashArray at [" + url + "], please validate configuration"); + } + } + + @Override + public void disconnect() { + return; + } + + @Override + public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject dataIn, + ProviderAdapterDiskOffering diskOffering, long sizeInBytes) { + PrimeraVolumeRequest request = new PrimeraVolumeRequest(); + String externalName = ProviderVolumeNamer.generateObjectName(context, dataIn); + request.setName(externalName); + request.setCpg(cpg); + request.setSnapCPG(snapCpg); + if (sizeInBytes < BYTES_IN_MiB) { + request.setSizeMiB(1); + } else { + request.setSizeMiB(sizeInBytes/BYTES_IN_MiB); + } + + // determine volume type based on offering + // THIN: tpvv=true, reduce=false + // SPARSE: tpvv=true, reduce=true + // THICK: tpvv=false, tpZeroFill=true (not supported) + if (diskOffering != null) { + if (diskOffering.getType() == ProvisioningType.THIN) { + request.setTpvv(true); + request.setReduce(false); + } else if (diskOffering.getType() == ProvisioningType.SPARSE) { + request.setTpvv(false); + request.setReduce(true); + } else if (diskOffering.getType() == ProvisioningType.FAT) { + throw new RuntimeException("This storage provider does not support FAT provisioned volumes"); + } + + // sets the amount of space allowed for snapshots as a % of the volumes size + if (diskOffering.getHypervisorSnapshotReserve() != null) { + request.setSsSpcAllocLimitPct(diskOffering.getHypervisorSnapshotReserve()); + } + } else { + // default to deduplicated volume + request.setReduce(true); + request.setTpvv(false); + } + + request.setComment(ProviderVolumeNamer.generateObjectComment(context, dataIn)); + POST("/volumes", request, null); + dataIn.setExternalName(externalName); + ProviderVolume volume = getVolume(context, dataIn); + return volume; + } + + @Override + public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataIn) { + assert dataIn.getExternalName() != null : "External name not provided internally on volume attach"; + PrimeraHostset.PrimeraHostsetVLUNRequest request = new PrimeraHostset.PrimeraHostsetVLUNRequest(); + request.setHostname("set:" + hostset); + request.setVolumeName(dataIn.getExternalName()); + request.setAutoLun(true); + // auto-lun returned here: Location: /api/v1/vluns/test_vv02,252,mysystem,2:2:4 + String location = POST("/vluns", request, new TypeReference() {}); + if (location == null) { + throw new RuntimeException("Attach volume failed with empty location response to vlun add command on storage provider"); + } + String[] toks = location.split(","); + if (toks.length <2) { + throw new RuntimeException("Attach volume failed with invalid location response to vlun add command on storage provider. Provided location: " + location); + } + return toks[1]; + } + + @Override + public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request) { + // we expect to only be attaching one hostset to the vluns, so on detach we'll + // remove ALL vluns we find. + assert request.getExternalName() != null : "External name not provided internally on volume detach"; + removeAllVluns(request.getExternalName()); + } + + public void removeVlun(String name, Integer lunid, String hostString) { + // hostString can be a hostname OR "set:". It is stored this way + // in the appliance and returned as the vlun's name/string. + DELETE("/vluns/" + name + "," + lunid + "," + hostString); + } + + /** + * Removes all vluns - this should only be done when you are sure the volume is no longer in use + * @param name + */ + public void removeAllVluns(String name) { + PrimeraVlunList list = getVolumeHostsets(name); + if (list != null && list.getMembers() != null) { + for (PrimeraVlun vlun: list.getMembers()) { + removeVlun(vlun.getVolumeName(), vlun.getLun(), vlun.getHostname()); + } + } + } + + public PrimeraVlunList getVolumeHostsets(String name) { + String query = "%22volumeName%20EQ%20" + name + "%22"; + return GET("/vluns?query=" + query, new TypeReference() {}); + } + + @Override + public void delete(ProviderAdapterContext context, ProviderAdapterDataObject request) { + assert request.getExternalName() != null : "External name not provided internally on volume delete"; + + // first remove vluns (take volumes from vluns) from hostset + removeAllVluns(request.getExternalName()); + DELETE("/volumes/" + request.getExternalName()); + } + + @Override + public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceVolumeInfo, + ProviderAdapterDataObject targetVolumeInfo) { + PrimeraVolumeCopyRequest request = new PrimeraVolumeCopyRequest(); + PrimeraVolumeCopyRequestParameters parms = new PrimeraVolumeCopyRequestParameters(); + + assert sourceVolumeInfo.getExternalName() != null: "External provider name not provided on copy request to Primera volume provider"; + + // if we have no external name, treat it as a new volume + if (targetVolumeInfo.getExternalName() == null) { + targetVolumeInfo.setExternalName(ProviderVolumeNamer.generateObjectName(context, targetVolumeInfo)); + } + + ProviderVolume sourceVolume = this.getVolume(context, sourceVolumeInfo); + if (sourceVolume == null) { + throw new RuntimeException("Source volume " + sourceVolumeInfo.getExternalUuid() + " with provider name " + sourceVolumeInfo.getExternalName() + " not found on storage provider"); + } + + ProviderVolume targetVolume = this.getVolume(context, targetVolumeInfo); + if (targetVolume == null) { + this.create(context, targetVolumeInfo, null, sourceVolume.getAllocatedSizeInBytes()); + } + + parms.setDestVolume(targetVolumeInfo.getExternalName()); + parms.setOnline(false); + request.setParameters(parms); + + PrimeraTaskReference taskref = POST("/volumes/" + sourceVolumeInfo.getExternalName(), request, new TypeReference() {}); + if (taskref == null) { + throw new RuntimeException("Unable to retrieve task used to copy to newly created volume"); + } + + waitForTaskToComplete(taskref.getTaskid(), "copy volume " + sourceVolumeInfo.getExternalName() + " to " + + targetVolumeInfo.getExternalName(), taskWaitTimeoutMs); + + return this.getVolume(context, targetVolumeInfo); + } + + private void waitForTaskToComplete(String taskid, String taskDescription, Long timeoutMs) { + // first wait for task to complete + long taskWaitTimeout = System.currentTimeMillis() + timeoutMs; + boolean timedOut = true; + PrimeraTaskStatus status = null; + long starttime = System.currentTimeMillis(); + while (System.currentTimeMillis() <= taskWaitTimeout) { + status = this.getTaskStatus(taskid); + if (status != null && status.isFinished()) { + timedOut = false; + if (!status.isSuccess()) { + throw new RuntimeException("Task " + taskDescription + " was cancelled. TaskID: " + status.getId() + "; Final Status: " + status.getStatusName()); + } + break; + } else { + if (status != null) { + logger.info("Task " + taskDescription + " is still running. TaskID: " + status.getId() + "; Current Status: " + status.getStatusName()); + } + // ugly...to keep from hot-polling API + try { + Thread.sleep(5000); + } catch (InterruptedException e) { + + } + } + } + + if (timedOut) { + if (status != null) { + throw new RuntimeException("Task " + taskDescription + " timed out. TaskID: " + status.getId() + ", Last Known Status: " + status.getStatusName()); + } else { + throw new RuntimeException("Task " + taskDescription + " timed out and a current status could not be retrieved from storage endpoint"); + } + } + + logger.info(taskDescription + " completed in " + ((System.currentTimeMillis() - starttime)/1000) + " seconds"); + } + + private PrimeraTaskStatus getTaskStatus(String taskid) { + return GET("/tasks/" + taskid + "?view=excludeDetail", new TypeReference() { + }); + } + + @Override + public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceVolume, + ProviderAdapterDataObject targetSnapshot) { + assert sourceVolume.getExternalName() != null : "External name not set"; + PrimeraVolumeSnapshotRequest request = new PrimeraVolumeSnapshotRequest(); + PrimeraVolumeSnapshotRequestParameters parms = new PrimeraVolumeSnapshotRequestParameters(); + parms.setName(ProviderVolumeNamer.generateObjectName(context, targetSnapshot)); + request.setParameters(parms); + POST("/volumes/" + sourceVolume.getExternalName(), request, null); + targetSnapshot.setExternalName(parms.getName()); + return getSnapshot(context, targetSnapshot); + } + + @Override + public ProviderVolume revert(ProviderAdapterContext context, ProviderAdapterDataObject dataIn) { + assert dataIn.getExternalName() != null: "External name not internally set for provided snapshot when requested storage provider to revert"; + // first get original volume + PrimeraVolume snapVol = (PrimeraVolume)getVolume(context, dataIn); + assert snapVol != null: "Storage volume associated with snapshot externally named [" + dataIn.getExternalName() + "] not found"; + assert snapVol.getParentId() != null: "Unable to determine parent volume/snapshot for snapshot named [" + dataIn.getExternalName() + "]"; + + PrimeraVolumeRevertSnapshotRequest request = new PrimeraVolumeRevertSnapshotRequest(); + request.setOnline(true); + request.setPriority(2); + PrimeraTaskReference taskref = PUT("/volumes/" + dataIn.getExternalName(), request, new TypeReference() {}); + if (taskref == null) { + throw new RuntimeException("Unable to retrieve task used to revert snapshot to base volume"); + } + + waitForTaskToComplete(taskref.getTaskid(), "revert snapshot " + dataIn.getExternalName(), taskWaitTimeoutMs); + + return getVolumeById(context, snapVol.getParentId()); + } + + /** + * Resize the volume to the new size. For HPE Primera, the API takes the additional space to add to the volume + * so this method will first retrieve the current volume's size and subtract that from the new size provided + * before calling the API. + * + * This method uses option GROW_VOLUME=3 for the API at this URL: + * https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=v25706371.html + * + */ + @Override + public void resize(ProviderAdapterContext context, ProviderAdapterDataObject request, long totalNewSizeInBytes) { + assert request.getExternalName() != null: "External name not internally set for provided volume when requesting resize of volume"; + + PrimeraVolume existingVolume = (PrimeraVolume) getVolume(context, request); + assert existingVolume != null: "Storage volume resize request not possible as existing volume not found for external provider name: " + request.getExternalName(); + long existingSizeInBytes = existingVolume.getSizeMiB() * PrimeraAdapter.BYTES_IN_MiB; + assert existingSizeInBytes < totalNewSizeInBytes: "Existing volume size is larger than requested new size for volume resize request. The Primera storage system does not support truncating/shrinking volumes."; + long addOnSizeInBytes = totalNewSizeInBytes - existingSizeInBytes; + + PrimeraVolume volume = new PrimeraVolume(); + volume.setSizeMiB((int) (addOnSizeInBytes / PrimeraAdapter.BYTES_IN_MiB)); + volume.setAction(3); + PUT("/volumes/" + request.getExternalName(), volume, null); + } + + @Override + public ProviderVolume getVolume(ProviderAdapterContext context, ProviderAdapterDataObject request) { + String externalName; + + // if the external name isn't provided, look for the derived contextual name. some failure scenarios + // may result in the volume for this context being created but a subsequent failure causing the external + // name to not be persisted for later use. This is true of template-type objects being cached on primary + // storage + if (request.getExternalName() == null) { + externalName = ProviderVolumeNamer.generateObjectName(context, request); + } else { + externalName = request.getExternalName(); + } + + return GET("/volumes/" + externalName, new TypeReference() { + }); + } + + private ProviderVolume getVolumeById(ProviderAdapterContext context, Integer id) { + String query = "%22id%20EQ%20" + id + "%22"; + return GET("/volumes?query=" + query, new TypeReference() {}); + } + + @Override + public ProviderSnapshot getSnapshot(ProviderAdapterContext context, ProviderAdapterDataObject request) { + assert request.getExternalName() != null: "External name not provided internally when finding snapshot on storage provider"; + return GET("/volumes/" + request.getExternalName(), new TypeReference() { + }); + } + + @Override + public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, AddressType addressType, String address) { + assert address != null: "External volume address not provided"; + assert AddressType.FIBERWWN.equals(addressType): "This volume provider currently does not support address type " + addressType.name(); + String query = "%22wwn%20EQ%20" + address + "%22"; + return GET("/volumes?query=" + query, new TypeReference() {}); + } + + @Override + public ProviderVolumeStorageStats getManagedStorageStats() { + PrimeraCpg cpgobj = getCpg(cpg); + // just in case + if (cpgobj == null || cpgobj.getTotalSpaceMiB() == 0) { + return null; + } + Long capacityBytes = 0L; + if (cpgobj.getsDGrowth() != null) { + capacityBytes = cpgobj.getsDGrowth().getLimitMiB() * PrimeraAdapter.BYTES_IN_MiB; + } + Long usedBytes = 0L; + if (cpgobj.getUsrUsage() != null) { + usedBytes = (cpgobj.getUsrUsage().getRawUsedMiB()) * PrimeraAdapter.BYTES_IN_MiB; + } + ProviderVolumeStorageStats stats = new ProviderVolumeStorageStats(); + stats.setActualUsedInBytes(usedBytes); + stats.setCapacityInBytes(capacityBytes); + return stats; + } + + @Override + public ProviderVolumeStats getVolumeStats(ProviderAdapterContext context, ProviderAdapterDataObject request) { + PrimeraVolume vol = (PrimeraVolume)getVolume(context, request); + if (vol == null || vol.getSizeMiB() == null || vol.getSizeMiB() == 0) { + return null; + } + + Long virtualSizeInBytes = vol.getHostWriteMiB() * PrimeraAdapter.BYTES_IN_MiB; + Long allocatedSizeInBytes = vol.getSizeMiB() * PrimeraAdapter.BYTES_IN_MiB; + Long actualUsedInBytes = vol.getTotalUsedMiB() * PrimeraAdapter.BYTES_IN_MiB; + ProviderVolumeStats stats = new ProviderVolumeStats(); + stats.setActualUsedInBytes(actualUsedInBytes); + stats.setAllocatedInBytes(allocatedSizeInBytes); + stats.setVirtualUsedInBytes(virtualSizeInBytes); + return stats; + } + + @Override + public boolean canAccessHost(ProviderAdapterContext context, String hostname) { + PrimeraHostset hostset = getHostset(this.hostset); + + List members = hostset.getSetmembers(); + + // check for fqdn and shortname combinations. this assumes there is at least a shortname match in both the storage array and cloudstack + // hostname configuration + String shortname; + if (hostname.indexOf('.') > 0) { + shortname = hostname.substring(0, (hostname.indexOf('.'))); + } else { + shortname = hostname; + } + for (String member: members) { + // exact match (short or long names) + if (member.equals(hostname)) { + return true; + } + + // primera has short name and cloudstack had long name + if (member.equals(shortname)) { + return true; + } + + // member has long name but cloudstack had shortname + int index = member.indexOf("."); + if (index > 0) { + if (member.substring(0, (member.indexOf('.'))).equals(shortname)) { + return true; + } + } + } + + return false; + } + + private PrimeraCpg getCpg(String name) { + return GET("/cpgs/" + name, new TypeReference() { + }); + } + + private PrimeraHostset getHostset(String name) { + return GET("/hostsets/" + name, new TypeReference() { + }); + } + + private String getSessionKey() { + refreshSession(false); + return key; + } + + private synchronized void refreshSession(boolean force) { + try { + if (force || keyExpiration < System.currentTimeMillis()) { + // close client to force connection reset on appliance -- not doing this can result in NotAuthorized error...guessing + _client.close();; + _client = null; + login(); + keyExpiration = System.currentTimeMillis() + keyTtl; + } + } catch (Exception e) { + // retry frequently but not every request to avoid DDOS on storage API + logger.warn("Failed to refresh Primera API key for " + username + "@" + url + ", will retry in 5 seconds", e); + keyExpiration = System.currentTimeMillis() + (5*1000); + } + } + + private void validateLoginInfo(String urlStr) { + URL urlFull; + try { + urlFull = new URL(urlStr); + } catch (MalformedURLException e) { + throw new RuntimeException("Invalid URL format: " + urlStr, e); + } + ; + + int port = urlFull.getPort(); + if (port <= 0) { + port = 443; + } + this.url = urlFull.getProtocol() + "://" + urlFull.getHost() + ":" + port + urlFull.getPath(); + + Map queryParms = new HashMap(); + if (urlFull.getQuery() != null) { + String[] queryToks = urlFull.getQuery().split("&"); + for (String tok : queryToks) { + if (tok.endsWith("=")) { + continue; + } + int i = tok.indexOf("="); + if (i > 0) { + queryParms.put(tok.substring(0, i), tok.substring(i + 1)); + } + } + } + + cpg = connectionDetails.get(PrimeraAdapter.CPG); + if (cpg == null) { + cpg = queryParms.get(PrimeraAdapter.CPG); + if (cpg == null) { + throw new RuntimeException( + PrimeraAdapter.CPG + " paramater/option required to configure this storage pool"); + } + } + + snapCpg = connectionDetails.get(PrimeraAdapter.SNAP_CPG); + if (snapCpg == null) { + snapCpg = queryParms.get(PrimeraAdapter.SNAP_CPG); + if (snapCpg == null) { + // default to using same CPG as the volume + snapCpg = cpg; + } + } + + hostset = connectionDetails.get(PrimeraAdapter.HOSTSET); + if (hostset == null) { + hostset = queryParms.get(PrimeraAdapter.HOSTSET); + if (hostset == null) { + throw new RuntimeException( + PrimeraAdapter.HOSTSET + " paramater/option required to configure this storage pool"); + } + } + + String connTimeoutStr = connectionDetails.get(PrimeraAdapter.CONNECT_TIMEOUT_MS); + if (connTimeoutStr == null) { + connTimeoutStr = queryParms.get(PrimeraAdapter.CONNECT_TIMEOUT_MS); + } + if (connTimeoutStr == null) { + connTimeout = CONNECT_TIMEOUT_MS_DEFAULT; + } else { + try { + connTimeout = Integer.parseInt(connTimeoutStr); + } catch (NumberFormatException e) { + logger.warn("Connection timeout not formatted correctly, using default", e); + connTimeout = CONNECT_TIMEOUT_MS_DEFAULT; + } + } + + String keyTtlString = connectionDetails.get(PrimeraAdapter.KEY_TTL); + if (keyTtlString == null) { + keyTtlString = queryParms.get(PrimeraAdapter.KEY_TTL); + } + if (keyTtlString == null) { + keyTtl = KEY_TTL_DEFAULT; + } else { + try { + keyTtl = Integer.parseInt(keyTtlString); + } catch (NumberFormatException e) { + logger.warn("Key TTL not formatted correctly, using default", e); + keyTtl = KEY_TTL_DEFAULT; + } + } + + String taskWaitTimeoutMsStr = connectionDetails.get(PrimeraAdapter.TASK_WAIT_TIMEOUT_MS); + if (taskWaitTimeoutMsStr == null) { + taskWaitTimeoutMsStr = queryParms.get(PrimeraAdapter.TASK_WAIT_TIMEOUT_MS); + if (taskWaitTimeoutMsStr == null) { + taskWaitTimeoutMs = PrimeraAdapter.TASK_WAIT_TIMEOUT_MS_DEFAULT; + } else { + try { + taskWaitTimeoutMs = Long.parseLong(taskWaitTimeoutMsStr); + } catch (NumberFormatException e) { + logger.warn(PrimeraAdapter.TASK_WAIT_TIMEOUT_MS + " property not set to a proper number, using default value"); + } + } + } + + String skipTlsValidationStr = connectionDetails.get(ProviderAdapter.API_SKIP_TLS_VALIDATION_KEY); + if (skipTlsValidationStr == null) { + skipTlsValidationStr = queryParms.get(ProviderAdapter.API_SKIP_TLS_VALIDATION_KEY); + } + + if (skipTlsValidationStr != null) { + skipTlsValidation = Boolean.parseBoolean(skipTlsValidationStr); + } else { + skipTlsValidation = true; + } + } + + /** + * Login to the array and get an access token + */ + private void login() { + username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY); + password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY); + String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY); + validateLoginInfo(urlStr); + CloseableHttpResponse response = null; + try { + HttpPost request = new HttpPost(url + "/credentials"); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.setEntity(new StringEntity("{\"user\":\"" + username + "\", \"password\":\"" + password + "\"}")); + CloseableHttpClient client = getClient(); + response = (CloseableHttpResponse) client.execute(request); + + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200 | statusCode == 201) { + PrimeraKey keyobj = mapper.readValue(response.getEntity().getContent(), PrimeraKey.class); + key = keyobj.getKey(); + } else if (statusCode == 401 || statusCode == 403) { + throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else { + throw new RuntimeException("Unexpected HTTP response code from Primera [" + url + "] - [" + statusCode + + "] - " + response.getStatusLine().getReasonPhrase()); + } + } catch (UnsupportedEncodingException e) { + throw new RuntimeException("Error creating input for login, check username/password encoding"); + } catch (UnsupportedOperationException e) { + throw new RuntimeException("Error processing login response from Primera [" + url + "]", e); + } catch (IOException e) { + throw new RuntimeException("Error sending login request to Primera [" + url + "]", e); + } finally { + try { + if (response != null) { + response.close(); + } + } catch (IOException e) { + logger.debug("Error closing response from login attempt to Primera", e); + } + } + } + + private CloseableHttpClient getClient() { + if (_client == null) { + RequestConfig config = RequestConfig.custom() + .setConnectTimeout((int) connTimeout) + .setConnectionRequestTimeout((int) connTimeout) + .setSocketTimeout((int) connTimeout).build(); + + HostnameVerifier verifier = null; + SSLContext sslContext = null; + + if (this.skipTlsValidation) { + try { + verifier = NoopHostnameVerifier.INSTANCE; + sslContext = new SSLContextBuilder().loadTrustMaterial(null, TrustAllStrategy.INSTANCE).build(); + } catch (KeyManagementException e) { + throw new RuntimeException(e); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } catch (KeyStoreException e) { + throw new RuntimeException(e); + } + } + + _client = HttpClients.custom() + .setDefaultRequestConfig(config) + .setSSLHostnameVerifier(verifier) + .setSSLContext(sslContext) + .build(); + } + return _client; + } + + @SuppressWarnings("unchecked") + private T POST(String path, Object input, final TypeReference type) { + CloseableHttpResponse response = null; + try { + this.refreshSession(false); + HttpPost request = new HttpPost(url + path); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey()); + try { + String data = mapper.writeValueAsString(input); + request.setEntity(new StringEntity(data)); + logger.debug("POST data: " + request.getEntity()); + } catch (UnsupportedEncodingException | JsonProcessingException e) { + throw new RuntimeException( + "Error processing request payload to [" + url + "] for path [" + path + "]", e); + } + + CloseableHttpClient client = getClient(); + try { + response = (CloseableHttpResponse) client + .execute(request); + } catch (IOException e) { + throw new RuntimeException("Error sending request to Primera [" + url + path + "]", e); + } + + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200 || statusCode == 201) { + try { + if (type != null) { + Header header = response.getFirstHeader("Location"); + if (type.getType().getTypeName().equals(String.class.getName())) { + if (header != null) { + return (T) header.getValue(); + } else { + return null; + } + } else if (type.getType().getTypeName().equals(PrimeraTaskReference.class.getName())) { + T obj = mapper.readValue(response.getEntity().getContent(), type); + PrimeraTaskReference taskref = (PrimeraTaskReference) obj; + taskref.setLocation(header.getValue()); + return obj; + } else { + return mapper.readValue(response.getEntity().getContent(), type); + } + } + return null; + } catch (UnsupportedOperationException | IOException e) { + throw new RuntimeException("Error processing response from Primera [" + url + path + "]", e); + } + } else if (statusCode == 400) { + try { + Map payload = mapper.readValue(response.getEntity().getContent(), + new TypeReference>() { + }); + throw new RuntimeException("Invalid request error 400: " + payload); + } catch (UnsupportedOperationException | IOException e) { + throw new RuntimeException( + "Error processing bad request response from Primera [" + url + path + "]", e); + } + } else if (statusCode == 401 || statusCode == 403) { + throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else { + try { + Map payload = mapper.readValue(response.getEntity().getContent(), + new TypeReference>() { + }); + throw new RuntimeException("Invalid request error " + statusCode + ": " + payload); + } catch (UnsupportedOperationException | IOException e) { + throw new RuntimeException("Unexpected HTTP response code from Primera on POST [" + url + path + "] - [" + + statusCode + "] - " + response.getStatusLine().getReasonPhrase()); + } + } + } finally { + if (response != null) { + try { + response.close(); + } catch (IOException e) { + logger.debug("Unexpected failure closing response to Primera API", e); + } + } + } + } + + private T PUT(String path, Object input, final TypeReference type) { + CloseableHttpResponse response = null; + try { + this.refreshSession(false); + HttpPut request = new HttpPut(url + path); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey()); + String data = mapper.writeValueAsString(input); + request.setEntity(new StringEntity(data)); + + CloseableHttpClient client = getClient(); + response = (CloseableHttpResponse) client.execute(request); + + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200 || statusCode == 201) { + if (type != null) + return mapper.readValue(response.getEntity().getContent(), type); + return null; + } else if (statusCode == 400) { + Map payload = mapper.readValue(response.getEntity().getContent(), + new TypeReference>() { + }); + throw new RuntimeException("Invalid request error 400: " + payload); + } else if (statusCode == 401 || statusCode == 403) { + throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else { + Map payload = mapper.readValue(response.getEntity().getContent(), + new TypeReference>() {}); + throw new RuntimeException("Invalid request error from Primera on PUT [" + url + path + "]" + statusCode + ": " + + response.getStatusLine().getReasonPhrase() + " - " + payload); + } + } catch (UnsupportedEncodingException | JsonProcessingException e) { + throw new RuntimeException( + "Error processing request payload to [" + url + "] for path [" + path + "]", e); + } catch (UnsupportedOperationException e) { + throw new RuntimeException("Error processing bad request response from Primera [" + url + "]", + e); + } catch (IOException e) { + throw new RuntimeException("Error sending request to Primera [" + url + "]", e); + + } finally { + if (response != null) { + try { + response.close(); + } catch (IOException e) { + logger.debug("Unexpected failure closing response to Primera API", e); + } + } + } + } + + private T GET(String path, final TypeReference type) { + CloseableHttpResponse response = null; + try { + this.refreshSession(false); + HttpGet request = new HttpGet(url + path); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey()); + + CloseableHttpClient client = getClient(); + response = (CloseableHttpResponse) client.execute(request); + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200) { + try { + return mapper.readValue(response.getEntity().getContent(), type); + } catch (UnsupportedOperationException | IOException e) { + throw new RuntimeException("Error processing response from Primera [" + url + "]", e); + } + } else if (statusCode == 401 || statusCode == 403) { + throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else if (statusCode == 404) { + return null; + } else { + throw new RuntimeException("Unexpected HTTP response code from Primera on GET [" + url + path + "] - [" + + statusCode + "] - " + response.getStatusLine().getReasonPhrase()); + } + } catch (IOException e) { + throw new RuntimeException("Error sending request to Primera [" + url + "]", e); + } catch (UnsupportedOperationException e) { + throw new RuntimeException("Error processing response from Primera [" + url + "]", e); + } finally { + if (response != null) { + try { + response.close(); + } catch (IOException e) { + logger.debug("Unexpected failure closing response to Primera API", e); + } + } + } + } + + private void DELETE(String path) { + CloseableHttpResponse response = null; + try { + this.refreshSession(false); + HttpDelete request = new HttpDelete(url + path); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey()); + + CloseableHttpClient client = getClient(); + response = (CloseableHttpResponse) client.execute(request); + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200 || statusCode == 404 || statusCode == 400) { + // this means the volume was deleted successfully, or doesn't exist (effective delete), or + // the volume name is malformed or too long - meaning it never got created to begin with (effective delete) + return; + } else if (statusCode == 401 || statusCode == 403) { + throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else if (statusCode == 409) { + throw new RuntimeException("The volume cannot be deleted at this time due to existing dependencies. Validate that all snapshots associated with this volume have been deleted and try again." ); + } else { + throw new RuntimeException("Unexpected HTTP response code from Primera on DELETE [" + url + path + "] - [" + + statusCode + "] - " + response.getStatusLine().getReasonPhrase()); + } + } catch (IOException e) { + throw new RuntimeException("Error sending request to Primera [" + url + "]", e); + } finally { + if (response != null) { + try { + response.close(); + } catch (IOException e) { + logger.debug("Unexpected failure closing response to Primera API", e); + } + } + } + } + + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapterFactory.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapterFactory.java new file mode 100644 index 00000000000..81ae442b38d --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapterFactory.java @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.util.Map; + +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory; + +public class PrimeraAdapterFactory implements ProviderAdapterFactory { + + @Override + public String getProviderName() { + return "Primera"; + } + + @Override + public ProviderAdapter create(String url, Map details) { + return new PrimeraAdapter(url, details); + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpg.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpg.java new file mode 100644 index 00000000000..6ac969766c8 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpg.java @@ -0,0 +1,203 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.util.ArrayList; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpg { + private long ddsRsvdMiB; + private String tdvvVersion; + private PrimeraCpgSAGrowth sAGrowth; + private PrimeraCpgSAUsage sAUsage; + private PrimeraCpgSDGrowth sDGrowth; + private PrimeraCpgSDUsage sDUsage; + private PrimeraCpgUsrUsage usrUsage; + private ArrayList additionalStates; + private boolean dedupCapable; + private ArrayList degradedStates; + private ArrayList failedStates; + private int freeSpaceMiB; + private String name; + private int numFPVVs; + private int numTDVVs; + private int numTPVVs; + private PrimeraCpgPrivateSpaceMiB privateSpaceMiB; + private int rawFreeSpaceMiB; + private int rawSharedSpaceMiB; + private int rawTotalSpaceMiB; + private int sharedSpaceMiB; + private int state; + private int totalSpaceMiB; + private String uuid; + private int id; + public long getDdsRsvdMiB() { + return ddsRsvdMiB; + } + public void setDdsRsvdMiB(long ddsRsvdMiB) { + this.ddsRsvdMiB = ddsRsvdMiB; + } + public String getTdvvVersion() { + return tdvvVersion; + } + public void setTdvvVersion(String tdvvVersion) { + this.tdvvVersion = tdvvVersion; + } + public PrimeraCpgSAGrowth getsAGrowth() { + return sAGrowth; + } + public void setsAGrowth(PrimeraCpgSAGrowth sAGrowth) { + this.sAGrowth = sAGrowth; + } + public PrimeraCpgSAUsage getsAUsage() { + return sAUsage; + } + public void setsAUsage(PrimeraCpgSAUsage sAUsage) { + this.sAUsage = sAUsage; + } + public PrimeraCpgSDGrowth getsDGrowth() { + return sDGrowth; + } + public void setsDGrowth(PrimeraCpgSDGrowth sDGrowth) { + this.sDGrowth = sDGrowth; + } + public PrimeraCpgSDUsage getsDUsage() { + return sDUsage; + } + public void setsDUsage(PrimeraCpgSDUsage sDUsage) { + this.sDUsage = sDUsage; + } + public PrimeraCpgUsrUsage getUsrUsage() { + return usrUsage; + } + public void setUsrUsage(PrimeraCpgUsrUsage usrUsage) { + this.usrUsage = usrUsage; + } + public ArrayList getAdditionalStates() { + return additionalStates; + } + public void setAdditionalStates(ArrayList additionalStates) { + this.additionalStates = additionalStates; + } + public boolean isDedupCapable() { + return dedupCapable; + } + public void setDedupCapable(boolean dedupCapable) { + this.dedupCapable = dedupCapable; + } + public ArrayList getDegradedStates() { + return degradedStates; + } + public void setDegradedStates(ArrayList degradedStates) { + this.degradedStates = degradedStates; + } + public ArrayList getFailedStates() { + return failedStates; + } + public void setFailedStates(ArrayList failedStates) { + this.failedStates = failedStates; + } + public int getFreeSpaceMiB() { + return freeSpaceMiB; + } + public void setFreeSpaceMiB(int freeSpaceMiB) { + this.freeSpaceMiB = freeSpaceMiB; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public int getNumFPVVs() { + return numFPVVs; + } + public void setNumFPVVs(int numFPVVs) { + this.numFPVVs = numFPVVs; + } + public int getNumTDVVs() { + return numTDVVs; + } + public void setNumTDVVs(int numTDVVs) { + this.numTDVVs = numTDVVs; + } + public int getNumTPVVs() { + return numTPVVs; + } + public void setNumTPVVs(int numTPVVs) { + this.numTPVVs = numTPVVs; + } + public PrimeraCpgPrivateSpaceMiB getPrivateSpaceMiB() { + return privateSpaceMiB; + } + public void setPrivateSpaceMiB(PrimeraCpgPrivateSpaceMiB privateSpaceMiB) { + this.privateSpaceMiB = privateSpaceMiB; + } + public int getRawFreeSpaceMiB() { + return rawFreeSpaceMiB; + } + public void setRawFreeSpaceMiB(int rawFreeSpaceMiB) { + this.rawFreeSpaceMiB = rawFreeSpaceMiB; + } + public int getRawSharedSpaceMiB() { + return rawSharedSpaceMiB; + } + public void setRawSharedSpaceMiB(int rawSharedSpaceMiB) { + this.rawSharedSpaceMiB = rawSharedSpaceMiB; + } + public int getRawTotalSpaceMiB() { + return rawTotalSpaceMiB; + } + public void setRawTotalSpaceMiB(int rawTotalSpaceMiB) { + this.rawTotalSpaceMiB = rawTotalSpaceMiB; + } + public int getSharedSpaceMiB() { + return sharedSpaceMiB; + } + public void setSharedSpaceMiB(int sharedSpaceMiB) { + this.sharedSpaceMiB = sharedSpaceMiB; + } + public int getState() { + return state; + } + public void setState(int state) { + this.state = state; + } + public int getTotalSpaceMiB() { + return totalSpaceMiB; + } + public void setTotalSpaceMiB(int totalSpaceMiB) { + this.totalSpaceMiB = totalSpaceMiB; + } + public String getUuid() { + return uuid; + } + public void setUuid(String uuid) { + this.uuid = uuid; + } + public int getId() { + return id; + } + public void setId(int id) { + this.id = id; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgDiskPattern.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgDiskPattern.java new file mode 100644 index 00000000000..3bb8d4c1079 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgDiskPattern.java @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpgDiskPattern { + private int diskType; + + public int getDiskType() { + return diskType; + } + + public void setDiskType(int diskType) { + this.diskType = diskType; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgLDLayout.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgLDLayout.java new file mode 100644 index 00000000000..770480f2004 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgLDLayout.java @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.util.ArrayList; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpgLDLayout { + private int rAIDType; + private ArrayList diskPatterns; + private int hA; + public int getrAIDType() { + return rAIDType; + } + public void setrAIDType(int rAIDType) { + this.rAIDType = rAIDType; + } + public ArrayList getDiskPatterns() { + return diskPatterns; + } + public void setDiskPatterns(ArrayList diskPatterns) { + this.diskPatterns = diskPatterns; + } + public int gethA() { + return hA; + } + public void sethA(int hA) { + this.hA = hA; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgPrivateSpaceMiB.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgPrivateSpaceMiB.java new file mode 100644 index 00000000000..b38aa10c7f1 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgPrivateSpaceMiB.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpgPrivateSpaceMiB { + private int base; + private int rawBase; + private int rawSnapshot; + private int snapshot; + public int getBase() { + return base; + } + public void setBase(int base) { + this.base = base; + } + public int getRawBase() { + return rawBase; + } + public void setRawBase(int rawBase) { + this.rawBase = rawBase; + } + public int getRawSnapshot() { + return rawSnapshot; + } + public void setRawSnapshot(int rawSnapshot) { + this.rawSnapshot = rawSnapshot; + } + public int getSnapshot() { + return snapshot; + } + public void setSnapshot(int snapshot) { + this.snapshot = snapshot; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSAGrowth.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSAGrowth.java new file mode 100644 index 00000000000..83f67f945ae --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSAGrowth.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpgSAGrowth { + private PrimeraCpgLDLayout lDLayout; + private int incrementMiB; + public PrimeraCpgLDLayout getlDLayout() { + return lDLayout; + } + public void setlDLayout(PrimeraCpgLDLayout lDLayout) { + this.lDLayout = lDLayout; + } + public int getIncrementMiB() { + return incrementMiB; + } + public void setIncrementMiB(int incrementMiB) { + this.incrementMiB = incrementMiB; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSAUsage.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSAUsage.java new file mode 100644 index 00000000000..11b1df668cd --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSAUsage.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpgSAUsage { + private int rawTotalMiB; + private int rawUsedMiB; + private int totalMiB; + private int usedMiB; + public int getRawTotalMiB() { + return rawTotalMiB; + } + public void setRawTotalMiB(int rawTotalMiB) { + this.rawTotalMiB = rawTotalMiB; + } + public int getRawUsedMiB() { + return rawUsedMiB; + } + public void setRawUsedMiB(int rawUsedMiB) { + this.rawUsedMiB = rawUsedMiB; + } + public int getTotalMiB() { + return totalMiB; + } + public void setTotalMiB(int totalMiB) { + this.totalMiB = totalMiB; + } + public int getUsedMiB() { + return usedMiB; + } + public void setUsedMiB(int usedMiB) { + this.usedMiB = usedMiB; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSDGrowth.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSDGrowth.java new file mode 100644 index 00000000000..fc54e6380b7 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSDGrowth.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpgSDGrowth { + private PrimeraCpgLDLayout lDLayout; + private int incrementMiB; + private int limitMiB; + private int warningMiB; + public PrimeraCpgLDLayout getlDLayout() { + return lDLayout; + } + public void setlDLayout(PrimeraCpgLDLayout lDLayout) { + this.lDLayout = lDLayout; + } + public int getIncrementMiB() { + return incrementMiB; + } + public void setIncrementMiB(int incrementMiB) { + this.incrementMiB = incrementMiB; + } + public int getLimitMiB() { + return limitMiB; + } + public void setLimitMiB(int limitMiB) { + this.limitMiB = limitMiB; + } + public int getWarningMiB() { + return warningMiB; + } + public void setWarningMiB(int warningMiB) { + this.warningMiB = warningMiB; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSDUsage.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSDUsage.java new file mode 100644 index 00000000000..5de74fed7bb --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSDUsage.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpgSDUsage { + private int rawTotalMiB; + private int rawUsedMiB; + private int totalMiB; + private int usedMiB; + public int getRawTotalMiB() { + return rawTotalMiB; + } + public void setRawTotalMiB(int rawTotalMiB) { + this.rawTotalMiB = rawTotalMiB; + } + public int getRawUsedMiB() { + return rawUsedMiB; + } + public void setRawUsedMiB(int rawUsedMiB) { + this.rawUsedMiB = rawUsedMiB; + } + public int getTotalMiB() { + return totalMiB; + } + public void setTotalMiB(int totalMiB) { + this.totalMiB = totalMiB; + } + public int getUsedMiB() { + return usedMiB; + } + public void setUsedMiB(int usedMiB) { + this.usedMiB = usedMiB; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgUsrUsage.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgUsrUsage.java new file mode 100644 index 00000000000..2cce6c94769 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgUsrUsage.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpgUsrUsage { + private int rawTotalMiB; + private int rawUsedMiB; + private int totalMiB; + private int usedMiB; + public int getRawTotalMiB() { + return rawTotalMiB; + } + public void setRawTotalMiB(int rawTotalMiB) { + this.rawTotalMiB = rawTotalMiB; + } + public int getRawUsedMiB() { + return rawUsedMiB; + } + public void setRawUsedMiB(int rawUsedMiB) { + this.rawUsedMiB = rawUsedMiB; + } + public int getTotalMiB() { + return totalMiB; + } + public void setTotalMiB(int totalMiB) { + this.totalMiB = totalMiB; + } + public int getUsedMiB() { + return usedMiB; + } + public void setUsedMiB(int usedMiB) { + this.usedMiB = usedMiB; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostset.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostset.java new file mode 100644 index 00000000000..e062f0782af --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostset.java @@ -0,0 +1,141 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraHostset { + + private String comment; + private Integer id; + private String name; + private List setmembers = new ArrayList(); + private String uuid; + private Map additionalProperties = new LinkedHashMap(); + + public String getComment() { + return comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + + public Integer getId() { + return id; + } + + public void setId(Integer id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public List getSetmembers() { + return setmembers; + } + + public void setSetmembers(List setmembers) { + this.setmembers = setmembers; + } + + public String getUuid() { + return uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } + + public Map getAdditionalProperties() { + return additionalProperties; + } + + public void setAdditionalProperties(Map additionalProperties) { + this.additionalProperties = additionalProperties; + } + + // adds members to a hostset + public static class PrimeraHostsetVLUNRequest { + private String volumeName; + private Boolean autoLun = true; + private Integer lun = 0; + private Integer maxAutoLun = 0; + /** + * This can be a single hostname OR the set of hosts in the format + * "set:". + * For the purposes of this driver, its expected that the predominate usecase is + * to use + * a hostset that is aligned with a CloudStack Cluster. + */ + private String hostname; + + public String getVolumeName() { + return volumeName; + } + + public void setVolumeName(String volumeName) { + this.volumeName = volumeName; + } + + public Boolean getAutoLun() { + return autoLun; + } + + public void setAutoLun(Boolean autoLun) { + this.autoLun = autoLun; + } + + public Integer getLun() { + return lun; + } + + public void setLun(Integer lun) { + this.lun = lun; + } + + public Integer getMaxAutoLun() { + return maxAutoLun; + } + + public void setMaxAutoLun(Integer maxAutoLun) { + this.maxAutoLun = maxAutoLun; + } + + public String getHostname() { + return hostname; + } + + public void setHostname(String hostname) { + this.hostname = hostname; + } + + } +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraKey.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraKey.java new file mode 100644 index 00000000000..0fc050e9844 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraKey.java @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraKey { + private String key; + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraTaskReference.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraTaskReference.java new file mode 100644 index 00000000000..0a312038e9a --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraTaskReference.java @@ -0,0 +1,44 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraTaskReference { + private String taskid; + /** + * not really returned, but if there is a Location header in a + * response we'll add it automatically if this is the type + **/ + private String location; + public String getTaskid() { + return taskid; + } + public void setTaskid(String taskid) { + this.taskid = taskid; + } + public String getLocation() { + return location; + } + public void setLocation(String location) { + this.location = location; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraTaskStatus.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraTaskStatus.java new file mode 100644 index 00000000000..293efb149d4 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraTaskStatus.java @@ -0,0 +1,174 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraTaskStatus { + private Integer id; + private Integer type; + private String name; + private Integer status; + private Integer completedPhases; + private Integer totalPhases; + private Integer completedSteps; + private Integer totalSteps; + private String startTime; + private String finishTime; + private Integer priority; + private String user; + private String detailedStatus; + public static final Integer STATUS_DONE = 1; + public static final Integer STATUS_ACTIVE = 2; + public static final Integer STATUS_CANCELLED = 3; + public static final Integer STATUS_FAILED = 4; + + public boolean isFinished() { + if (status != STATUS_ACTIVE) { + return true; + } + return false; + } + + public boolean isSuccess() { + if (status == STATUS_DONE) { + return true; + } + return false; + } + + public String getStatusName() { + if (status == PrimeraTaskStatus.STATUS_DONE) { + return "DONE"; + } else if (status == PrimeraTaskStatus.STATUS_ACTIVE) { + return "ACTIVE"; + } else if (status == PrimeraTaskStatus.STATUS_CANCELLED) { + return "CANCELLED"; + } else if (status == PrimeraTaskStatus.STATUS_FAILED) { + return "FAILED"; + } else { + return "UNKNOWN"; + } + } + + public Integer getId() { + return id; + } + + public void setId(Integer id) { + this.id = id; + } + + public Integer getType() { + return type; + } + + public void setType(Integer type) { + this.type = type; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public Integer getStatus() { + return status; + } + + public void setStatus(Integer status) { + this.status = status; + } + + public Integer getCompletedPhases() { + return completedPhases; + } + + public void setCompletedPhases(Integer completedPhases) { + this.completedPhases = completedPhases; + } + + public Integer getTotalPhases() { + return totalPhases; + } + + public void setTotalPhases(Integer totalPhases) { + this.totalPhases = totalPhases; + } + + public Integer getCompletedSteps() { + return completedSteps; + } + + public void setCompletedSteps(Integer completedSteps) { + this.completedSteps = completedSteps; + } + + public Integer getTotalSteps() { + return totalSteps; + } + + public void setTotalSteps(Integer totalSteps) { + this.totalSteps = totalSteps; + } + + public String getStartTime() { + return startTime; + } + + public void setStartTime(String startTime) { + this.startTime = startTime; + } + + public String getFinishTime() { + return finishTime; + } + + public void setFinishTime(String finishTime) { + this.finishTime = finishTime; + } + + public Integer getPriority() { + return priority; + } + + public void setPriority(Integer priority) { + this.priority = priority; + } + + public String getUser() { + return user; + } + + public void setUser(String user) { + this.user = user; + } + + public String getDetailedStatus() { + return detailedStatus; + } + + public void setDetailedStatus(String detailedStatus) { + this.detailedStatus = detailedStatus; + } +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVlun.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVlun.java new file mode 100644 index 00000000000..d35b16c048b --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVlun.java @@ -0,0 +1,180 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVlun { + private int lun; + private String volumeName; + private String hostname; + private String remoteName; + private int type; + private String serial; + private PrimeraPortPosition portPos; + private String volumeWWN; + private int multipathing; + private int failedPathPol; + private int failedPathInterval; + private String hostDeviceName; + @JsonProperty("Subsystem_NQN") + private String subsystemNQN; + private boolean active; + + public static class PrimeraPortPosition { + private int node; + private int slot; + private int cardPort; + public int getNode() { + return node; + } + public void setNode(int node) { + this.node = node; + } + public int getSlot() { + return slot; + } + public void setSlot(int slot) { + this.slot = slot; + } + public int getCardPort() { + return cardPort; + } + public void setCardPort(int cardPort) { + this.cardPort = cardPort; + } + + } + + public int getLun() { + return lun; + } + + public void setLun(int lun) { + this.lun = lun; + } + + public String getVolumeName() { + return volumeName; + } + + public void setVolumeName(String volumeName) { + this.volumeName = volumeName; + } + + public String getHostname() { + return hostname; + } + + public void setHostname(String hostname) { + this.hostname = hostname; + } + + public String getRemoteName() { + return remoteName; + } + + public void setRemoteName(String remoteName) { + this.remoteName = remoteName; + } + + public int getType() { + return type; + } + + public void setType(int type) { + this.type = type; + } + + public String getSerial() { + return serial; + } + + public void setSerial(String serial) { + this.serial = serial; + } + + public PrimeraPortPosition getPortPos() { + return portPos; + } + + public void setPortPos(PrimeraPortPosition portPos) { + this.portPos = portPos; + } + + public String getVolumeWWN() { + return volumeWWN; + } + + public void setVolumeWWN(String volumeWWN) { + this.volumeWWN = volumeWWN; + } + + public int getMultipathing() { + return multipathing; + } + + public void setMultipathing(int multipathing) { + this.multipathing = multipathing; + } + + public int getFailedPathPol() { + return failedPathPol; + } + + public void setFailedPathPol(int failedPathPol) { + this.failedPathPol = failedPathPol; + } + + public int getFailedPathInterval() { + return failedPathInterval; + } + + public void setFailedPathInterval(int failedPathInterval) { + this.failedPathInterval = failedPathInterval; + } + + public String getHostDeviceName() { + return hostDeviceName; + } + + public void setHostDeviceName(String hostDeviceName) { + this.hostDeviceName = hostDeviceName; + } + + public String getSubsystemNQN() { + return subsystemNQN; + } + + public void setSubsystemNQN(String subsystemNQN) { + this.subsystemNQN = subsystemNQN; + } + + public boolean isActive() { + return active; + } + + public void setActive(boolean active) { + this.active = active; + } + + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVlunList.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVlunList.java new file mode 100644 index 00000000000..d50fdfef4aa --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVlunList.java @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVlunList { + private int total; + private String serial; + private List members; + public int getTotal() { + return total; + } + public void setTotal(int total) { + this.total = total; + } + public String getSerial() { + return serial; + } + public void setSerial(String serial) { + this.serial = serial; + } + public List getMembers() { + return members; + } + public void setMembers(List members) { + this.members = members; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolume.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolume.java new file mode 100644 index 00000000000..9ae58e42c17 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolume.java @@ -0,0 +1,420 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.util.ArrayList; +import java.util.Date; + +import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolume implements ProviderSnapshot { + @JsonIgnore + private AddressType addressType = AddressType.FIBERWWN; + @JsonIgnore + private String connectionId; + @JsonIgnore + private Integer priority = 0; + + private String physParentId = null; + private Integer parentId = null; + private String copyOf = null; + private Integer roChildId = null; + private Integer rwChildId = null; + private String snapCPG = null; + private Long total = null; + /** + * Actions are enumerated and listed at + * https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=v25706371.html + */ + private Integer action = null; + private String comment = null; + private Integer id = null; + private String name = null; + private Integer deduplicationState = null; + private Integer compressionState = null; + private Integer provisioningType = null; + private Integer copyType = null; + private Integer baseId = null; + private Boolean readOnly = null; + private Integer state = null; + private ArrayList failedStates = null; + private ArrayList degradedStates = null; + private ArrayList additionalStates = null; + private PrimeraVolumeAdminSpace adminSpace = null; + private PrimeraVolumeSnapshotSpace snapshotSpace = null; + private PrimeraVolumeUserSpace userSpace = null; + private Integer totalReservedMiB = null; + private Integer totalUsedMiB = null; + private Integer sizeMiB = null; + private Integer hostWriteMiB = null; + private String wwn = null; + private Integer creationTimeSec = null; + private Date creationTime8601 = null; + private Integer ssSpcAllocWarningPct; + private Integer ssSpcAllocLimitPct = null; + private Integer usrSpcAllocWarningPct = null; + private Integer usrSpcAllocLimitPct = null; + private PrimeraVolumePolicies policies = null; + private String userCPG = null; + private String uuid = null; + private Integer sharedParentId = null; + private Integer udid = null; + private PrimeraVolumeCapacityEfficiency capacityEfficiency = null; + private Integer rcopyStatus = null; + private ArrayList links = null; + public String getPhysParentId() { + return physParentId; + } + public void setPhysParentId(String physParentId) { + this.physParentId = physParentId; + } + public Integer getParentId() { + return parentId; + } + public void setParentId(Integer parentId) { + this.parentId = parentId; + } + public String getCopyOf() { + return copyOf; + } + public void setCopyOf(String copyOf) { + this.copyOf = copyOf; + } + public Integer getRoChildId() { + return roChildId; + } + public void setRoChildId(Integer roChildId) { + this.roChildId = roChildId; + } + public Integer getRwChildId() { + return rwChildId; + } + public void setRwChildId(Integer rwChildId) { + this.rwChildId = rwChildId; + } + public String getSnapCPG() { + return snapCPG; + } + public void setSnapCPG(String snapCPG) { + this.snapCPG = snapCPG; + } + public Long getTotal() { + return total; + } + public void setTotal(Long total) { + this.total = total; + } + public Integer getAction() { + return action; + } + public void setAction(Integer action) { + this.action = action; + } + public String getComment() { + return comment; + } + public void setComment(String comment) { + this.comment = comment; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public Integer getDeduplicationState() { + return deduplicationState; + } + public void setDeduplicationState(Integer deduplicationState) { + this.deduplicationState = deduplicationState; + } + public Integer getCompressionState() { + return compressionState; + } + public void setCompressionState(Integer compressionState) { + this.compressionState = compressionState; + } + public Integer getProvisioningType() { + return provisioningType; + } + public void setProvisioningType(Integer provisioningType) { + this.provisioningType = provisioningType; + } + public Integer getCopyType() { + return copyType; + } + public void setCopyType(Integer copyType) { + this.copyType = copyType; + } + public Integer getBaseId() { + return baseId; + } + public void setBaseId(Integer baseId) { + this.baseId = baseId; + } + public Boolean getReadOnly() { + return readOnly; + } + public void setReadOnly(Boolean readOnly) { + this.readOnly = readOnly; + } + public String getState() { + if (state != null) { + return state.toString(); + } + return null; + } + public void setState(Integer state) { + this.state = state; + } + public ArrayList getFailedStates() { + return failedStates; + } + public void setFailedStates(ArrayList failedStates) { + this.failedStates = failedStates; + } + public ArrayList getDegradedStates() { + return degradedStates; + } + public void setDegradedStates(ArrayList degradedStates) { + this.degradedStates = degradedStates; + } + public ArrayList getAdditionalStates() { + return additionalStates; + } + public void setAdditionalStates(ArrayList additionalStates) { + this.additionalStates = additionalStates; + } + public PrimeraVolumeAdminSpace getAdminSpace() { + return adminSpace; + } + public void setAdminSpace(PrimeraVolumeAdminSpace adminSpace) { + this.adminSpace = adminSpace; + } + public PrimeraVolumeSnapshotSpace getSnapshotSpace() { + return snapshotSpace; + } + public void setSnapshotSpace(PrimeraVolumeSnapshotSpace snapshotSpace) { + this.snapshotSpace = snapshotSpace; + } + public PrimeraVolumeUserSpace getUserSpace() { + return userSpace; + } + public void setUserSpace(PrimeraVolumeUserSpace userSpace) { + this.userSpace = userSpace; + } + public Integer getTotalReservedMiB() { + return totalReservedMiB; + } + public void setTotalReservedMiB(Integer totalReservedMiB) { + this.totalReservedMiB = totalReservedMiB; + } + public Integer getTotalUsedMiB() { + return totalUsedMiB; + } + public void setTotalUsedMiB(Integer totalUsedMiB) { + this.totalUsedMiB = totalUsedMiB; + } + public Integer getSizeMiB() { + return sizeMiB; + } + public void setSizeMiB(Integer sizeMiB) { + this.sizeMiB = sizeMiB; + } + public Integer getHostWriteMiB() { + return hostWriteMiB; + } + public void setHostWriteMiB(Integer hostWriteMiB) { + this.hostWriteMiB = hostWriteMiB; + } + public String getWwn() { + return wwn; + } + public void setWwn(String wwn) { + this.wwn = wwn; + } + public Integer getCreationTimeSec() { + return creationTimeSec; + } + public void setCreationTimeSec(Integer creationTimeSec) { + this.creationTimeSec = creationTimeSec; + } + public Date getCreationTime8601() { + return creationTime8601; + } + public void setCreationTime8601(Date creationTime8601) { + this.creationTime8601 = creationTime8601; + } + public Integer getSsSpcAllocWarningPct() { + return ssSpcAllocWarningPct; + } + public void setSsSpcAllocWarningPct(Integer ssSpcAllocWarningPct) { + this.ssSpcAllocWarningPct = ssSpcAllocWarningPct; + } + public Integer getSsSpcAllocLimitPct() { + return ssSpcAllocLimitPct; + } + public void setSsSpcAllocLimitPct(Integer ssSpcAllocLimitPct) { + this.ssSpcAllocLimitPct = ssSpcAllocLimitPct; + } + public Integer getUsrSpcAllocWarningPct() { + return usrSpcAllocWarningPct; + } + public void setUsrSpcAllocWarningPct(Integer usrSpcAllocWarningPct) { + this.usrSpcAllocWarningPct = usrSpcAllocWarningPct; + } + public Integer getUsrSpcAllocLimitPct() { + return usrSpcAllocLimitPct; + } + public void setUsrSpcAllocLimitPct(Integer usrSpcAllocLimitPct) { + this.usrSpcAllocLimitPct = usrSpcAllocLimitPct; + } + public PrimeraVolumePolicies getPolicies() { + return policies; + } + public void setPolicies(PrimeraVolumePolicies policies) { + this.policies = policies; + } + public String getUserCPG() { + return userCPG; + } + public void setUserCPG(String userCPG) { + this.userCPG = userCPG; + } + public String getUuid() { + return uuid; + } + public void setUuid(String uuid) { + this.uuid = uuid; + } + public Integer getSharedParentId() { + return sharedParentId; + } + public void setSharedParentId(Integer sharedParentId) { + this.sharedParentId = sharedParentId; + } + public Integer getUdid() { + return udid; + } + public void setUdid(Integer udid) { + this.udid = udid; + } + public PrimeraVolumeCapacityEfficiency getCapacityEfficiency() { + return capacityEfficiency; + } + public void setCapacityEfficiency(PrimeraVolumeCapacityEfficiency capacityEfficiency) { + this.capacityEfficiency = capacityEfficiency; + } + public Integer getRcopyStatus() { + return rcopyStatus; + } + public void setRcopyStatus(Integer rcopyStatus) { + this.rcopyStatus = rcopyStatus; + } + public ArrayList getLinks() { + return links; + } + public void setLinks(ArrayList links) { + this.links = links; + } + @Override + @JsonIgnore + public Boolean isDestroyed() { + return false; + } + @Override + public void setId(String id) { + this.id = Integer.parseInt(id); + } + public String getId() { + if (id != null) { + return Integer.toString(id); + } + return null; + } + @Override + public Integer getPriority() { + return priority; + } + @Override + public void setPriority(Integer priority) { + this.priority = priority; + } + @Override + public AddressType getAddressType() { + return addressType; + } + @Override + public void setAddressType(AddressType addressType) { + this.addressType = addressType; + } + @Override + public String getAddress() { + return this.wwn; + } + @Override + @JsonIgnore + public Long getAllocatedSizeInBytes() { + if (this.getSizeMiB() != null) { + return this.getSizeMiB() * PrimeraAdapter.BYTES_IN_MiB; + } + return 0L; + } + @Override + @JsonIgnore + public Long getUsedBytes() { + if (this.getTotalReservedMiB() != null) { + return this.getTotalReservedMiB() * PrimeraAdapter.BYTES_IN_MiB; + } + return 0L; + } + @Override + @JsonIgnore + public String getExternalUuid() { + return uuid; + } + public void setExternalUuid(String uuid) { + this.uuid = uuid; + } + @Override + @JsonIgnore + public String getExternalName() { + return name; + } + public void setExternalName(String name) { + this.name = name; + } + @Override + @JsonIgnore + public String getExternalConnectionId() { + return connectionId; + } + public void setExternalConnection(String connectionId) { + this.connectionId = connectionId; + } + @Override + @JsonIgnore + public Boolean canAttachDirectly() { + return true; + } +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeAdminSpace.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeAdminSpace.java new file mode 100644 index 00000000000..63ddf09d20e --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeAdminSpace.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeAdminSpace { + private int reservedMiB; + private int rawReservedMiB; + private int usedMiB; + private int freeMiB; + public int getReservedMiB() { + return reservedMiB; + } + public void setReservedMiB(int reservedMiB) { + this.reservedMiB = reservedMiB; + } + public int getRawReservedMiB() { + return rawReservedMiB; + } + public void setRawReservedMiB(int rawReservedMiB) { + this.rawReservedMiB = rawReservedMiB; + } + public int getUsedMiB() { + return usedMiB; + } + public void setUsedMiB(int usedMiB) { + this.usedMiB = usedMiB; + } + public int getFreeMiB() { + return freeMiB; + } + public void setFreeMiB(int freeMiB) { + this.freeMiB = freeMiB; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCapacityEfficiency.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCapacityEfficiency.java new file mode 100644 index 00000000000..b058902d318 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCapacityEfficiency.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeCapacityEfficiency { + private double compaction; + private double deduplication; + public double getCompaction() { + return compaction; + } + public void setCompaction(double compaction) { + this.compaction = compaction; + } + public double getDeduplication() { + return deduplication; + } + public void setDeduplication(double deduplication) { + this.deduplication = deduplication; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequest.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequest.java new file mode 100644 index 00000000000..779064f6e9b --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequest.java @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +/** + * https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=v24885490.html + */ +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeCopyRequest { + private String action = "createPhysicalCopy"; + private PrimeraVolumeCopyRequestParameters parameters; + public String getAction() { + return action; + } + public void setAction(String action) { + this.action = action; + } + public PrimeraVolumeCopyRequestParameters getParameters() { + return parameters; + } + public void setParameters(PrimeraVolumeCopyRequestParameters parameters) { + this.parameters = parameters; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequestParameters.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequestParameters.java new file mode 100644 index 00000000000..33ad0d445f8 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequestParameters.java @@ -0,0 +1,101 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +/** + * https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=v24885490.html + */ + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeCopyRequestParameters { + private String destVolume = null; + private String destCPG = null; + private Boolean online = false; + private String wwn = null; + private Boolean tpvv = null; + private Boolean reduce = null; + private String snapCPG = null; + private Boolean skipZero = null; + private Boolean saveSnapshot = null; + /** 1=HIGH, 2=MED, 3=LOW */ + private Integer priority = null; + public String getDestVolume() { + return destVolume; + } + public void setDestVolume(String destVolume) { + this.destVolume = destVolume; + } + public String getDestCPG() { + return destCPG; + } + public void setDestCPG(String destCPG) { + this.destCPG = destCPG; + } + public Boolean getOnline() { + return online; + } + public void setOnline(Boolean online) { + this.online = online; + } + public String getWwn() { + return wwn; + } + public void setWwn(String wwn) { + this.wwn = wwn; + } + public Boolean getTpvv() { + return tpvv; + } + public void setTpvv(Boolean tpvv) { + this.tpvv = tpvv; + } + public Boolean getReduce() { + return reduce; + } + public void setReduce(Boolean reduce) { + this.reduce = reduce; + } + public String getSnapCPG() { + return snapCPG; + } + public void setSnapCPG(String snapCPG) { + this.snapCPG = snapCPG; + } + public Boolean getSkipZero() { + return skipZero; + } + public void setSkipZero(Boolean skipZero) { + this.skipZero = skipZero; + } + public Boolean getSaveSnapshot() { + return saveSnapshot; + } + public void setSaveSnapshot(Boolean saveSnapshot) { + this.saveSnapshot = saveSnapshot; + } + public Integer getPriority() { + return priority; + } + public void setPriority(Integer priority) { + this.priority = priority; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeLink.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeLink.java new file mode 100644 index 00000000000..27b19bcc9e8 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeLink.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeLink { + private String href; + private String rel; + public String getHref() { + return href; + } + public void setHref(String href) { + this.href = href; + } + public String getRel() { + return rel; + } + public void setRel(String rel) { + this.rel = rel; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeLinkList.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeLinkList.java new file mode 100644 index 00000000000..01bec70acc3 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeLinkList.java @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeLinkList { + private List list; + + public List getList() { + return list; + } + + public void setList(List list) { + this.list = list; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePolicies.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePolicies.java new file mode 100644 index 00000000000..2baa9a2ddca --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePolicies.java @@ -0,0 +1,82 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumePolicies { + private Boolean tpZeroFill; + private Boolean staleSS; + private Boolean oneHost; + private Boolean zeroDetect; + private Boolean system; + private Boolean caching; + private Boolean fsvc; + private Integer hostDIF; + public Boolean getTpZeroFill() { + return tpZeroFill; + } + public void setTpZeroFill(Boolean tpZeroFill) { + this.tpZeroFill = tpZeroFill; + } + public Boolean getStaleSS() { + return staleSS; + } + public void setStaleSS(Boolean staleSS) { + this.staleSS = staleSS; + } + public Boolean getOneHost() { + return oneHost; + } + public void setOneHost(Boolean oneHost) { + this.oneHost = oneHost; + } + public Boolean getZeroDetect() { + return zeroDetect; + } + public void setZeroDetect(Boolean zeroDetect) { + this.zeroDetect = zeroDetect; + } + public Boolean getSystem() { + return system; + } + public void setSystem(Boolean system) { + this.system = system; + } + public Boolean getCaching() { + return caching; + } + public void setCaching(Boolean caching) { + this.caching = caching; + } + public Boolean getFsvc() { + return fsvc; + } + public void setFsvc(Boolean fsvc) { + this.fsvc = fsvc; + } + public Integer getHostDIF() { + return hostDIF; + } + public void setHostDIF(Integer hostDIF) { + this.hostDIF = hostDIF; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePromoteRequest.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePromoteRequest.java new file mode 100644 index 00000000000..48898c27277 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePromoteRequest.java @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumePromoteRequest { + /** + * Defines action for the request as described at https://support.hpe.com/hpesc/public/docDisplay?docId=a00114827en_us&page=v25706371.html + */ + private Integer action = 4; + private Boolean online = true; + private Integer priority = 2; // MEDIUM + private Boolean allowRemoteCopyParent = true; + public Integer getAction() { + return action; + } + public void setAction(Integer action) { + this.action = action; + } + public Boolean getOnline() { + return online; + } + public void setOnline(Boolean online) { + this.online = online; + } + public Integer getPriority() { + return priority; + } + public void setPriority(Integer priority) { + this.priority = priority; + } + public Boolean getAllowRemoteCopyParent() { + return allowRemoteCopyParent; + } + public void setAllowRemoteCopyParent(Boolean allowRemoteCopyParent) { + this.allowRemoteCopyParent = allowRemoteCopyParent; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeRequest.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeRequest.java new file mode 100644 index 00000000000..b8f67fbb562 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeRequest.java @@ -0,0 +1,110 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeRequest { + private String name; + private String cpg; + private long sizeMiB; + private String comment; + private String snapCPG = null; + private Boolean reduce; + private Boolean tpvv; + private Integer ssSpcAllocLimitPct; + private Integer ssSpcAllocWarningPct; + private Integer usrSpcAllocWarningPct; + private Integer usrSpcAllocLimitPct; + private PrimeraVolumePolicies policies; + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public String getCpg() { + return cpg; + } + public void setCpg(String cpg) { + this.cpg = cpg; + } + public long getSizeMiB() { + return sizeMiB; + } + public void setSizeMiB(long sizeMiB) { + this.sizeMiB = sizeMiB; + } + public String getComment() { + return comment; + } + public void setComment(String comment) { + this.comment = comment; + } + public String getSnapCPG() { + return snapCPG; + } + public void setSnapCPG(String snapCPG) { + this.snapCPG = snapCPG; + } + public Boolean getReduce() { + return reduce; + } + public void setReduce(Boolean reduce) { + this.reduce = reduce; + } + public Boolean getTpvv() { + return tpvv; + } + public void setTpvv(Boolean tpvv) { + this.tpvv = tpvv; + } + public Integer getSsSpcAllocLimitPct() { + return ssSpcAllocLimitPct; + } + public void setSsSpcAllocLimitPct(Integer ssSpcAllocLimitPct) { + this.ssSpcAllocLimitPct = ssSpcAllocLimitPct; + } + public Integer getSsSpcAllocWarningPct() { + return ssSpcAllocWarningPct; + } + public void setSsSpcAllocWarningPct(Integer ssSpcAllocWarningPct) { + this.ssSpcAllocWarningPct = ssSpcAllocWarningPct; + } + public Integer getUsrSpcAllocWarningPct() { + return usrSpcAllocWarningPct; + } + public void setUsrSpcAllocWarningPct(Integer usrSpcAllocWarningPct) { + this.usrSpcAllocWarningPct = usrSpcAllocWarningPct; + } + public Integer getUsrSpcAllocLimitPct() { + return usrSpcAllocLimitPct; + } + public void setUsrSpcAllocLimitPct(Integer usrSpcAllocLimitPct) { + this.usrSpcAllocLimitPct = usrSpcAllocLimitPct; + } + public PrimeraVolumePolicies getPolicies() { + return policies; + } + public void setPolicies(PrimeraVolumePolicies policies) { + this.policies = policies; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeRevertSnapshotRequest.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeRevertSnapshotRequest.java new file mode 100644 index 00000000000..fcdd7a81b6b --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeRevertSnapshotRequest.java @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +/** + * https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=s_creating_snapshot_volumes.html + */ +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeRevertSnapshotRequest { + private int action = 4; //PROMOTE_VIRTUAL_COPY + private Boolean online = true; + private Integer priority = 2; + public int getAction() { + return action; + } + public void setAction(int action) { + this.action = action; + } + public Boolean getOnline() { + return online; + } + public void setOnline(Boolean online) { + this.online = online; + } + public Integer getPriority() { + return priority; + } + public void setPriority(Integer priority) { + this.priority = priority; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotRequest.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotRequest.java new file mode 100644 index 00000000000..6fb0f195e8b --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotRequest.java @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +/** + * https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=s_creating_snapshot_volumes.html + */ +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeSnapshotRequest { + private String action = "createSnapshot"; + private PrimeraVolumeSnapshotRequestParameters parameters; + public String getAction() { + return action; + } + public void setAction(String action) { + this.action = action; + } + public PrimeraVolumeSnapshotRequestParameters getParameters() { + return parameters; + } + public void setParameters(PrimeraVolumeSnapshotRequestParameters parameters) { + this.parameters = parameters; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotRequestParameters.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotRequestParameters.java new file mode 100644 index 00000000000..de2fe24d638 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotRequestParameters.java @@ -0,0 +1,85 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +/** + * https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=s_creating_snapshot_volumes.html + */ +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeSnapshotRequestParameters { + private String name = null; + private String id = null; + private String comment = null; + private Boolean readOnly = false; + private Integer expirationHours = null; + private Integer retentionHours = null; + private String addToSet = null; + private Boolean syncSnapRcopy = false; + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public String getId() { + return id; + } + public void setId(String id) { + this.id = id; + } + public String getComment() { + return comment; + } + public void setComment(String comment) { + this.comment = comment; + } + public Boolean getReadOnly() { + return readOnly; + } + public void setReadOnly(Boolean readOnly) { + this.readOnly = readOnly; + } + public Integer getExpirationHours() { + return expirationHours; + } + public void setExpirationHours(Integer expirationHours) { + this.expirationHours = expirationHours; + } + public Integer getRetentionHours() { + return retentionHours; + } + public void setRetentionHours(Integer retentionHours) { + this.retentionHours = retentionHours; + } + public String getAddToSet() { + return addToSet; + } + public void setAddToSet(String addToSet) { + this.addToSet = addToSet; + } + public Boolean getSyncSnapRcopy() { + return syncSnapRcopy; + } + public void setSyncSnapRcopy(Boolean syncSnapRcopy) { + this.syncSnapRcopy = syncSnapRcopy; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotSpace.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotSpace.java new file mode 100644 index 00000000000..2cb0d53844a --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotSpace.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeSnapshotSpace { + private int reservedMiB; + private int rawReservedMiB; + private int usedMiB; + private int freeMiB; + public int getReservedMiB() { + return reservedMiB; + } + public void setReservedMiB(int reservedMiB) { + this.reservedMiB = reservedMiB; + } + public int getRawReservedMiB() { + return rawReservedMiB; + } + public void setRawReservedMiB(int rawReservedMiB) { + this.rawReservedMiB = rawReservedMiB; + } + public int getUsedMiB() { + return usedMiB; + } + public void setUsedMiB(int usedMiB) { + this.usedMiB = usedMiB; + } + public int getFreeMiB() { + return freeMiB; + } + public void setFreeMiB(int freeMiB) { + this.freeMiB = freeMiB; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeUpdateRequest.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeUpdateRequest.java new file mode 100644 index 00000000000..07b3425d126 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeUpdateRequest.java @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeUpdateRequest { + private String comment; + + public String getComment() { + return comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeUserSpace.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeUserSpace.java new file mode 100644 index 00000000000..e4cea1781f3 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeUserSpace.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeUserSpace { + private int reservedMiB; + private int rawReservedMiB; + private int usedMiB; + private int freeMiB; + public int getReservedMiB() { + return reservedMiB; + } + public void setReservedMiB(int reservedMiB) { + this.reservedMiB = reservedMiB; + } + public int getRawReservedMiB() { + return rawReservedMiB; + } + public void setRawReservedMiB(int rawReservedMiB) { + this.rawReservedMiB = rawReservedMiB; + } + public int getUsedMiB() { + return usedMiB; + } + public void setUsedMiB(int usedMiB) { + this.usedMiB = usedMiB; + } + public int getFreeMiB() { + return freeMiB; + } + public void setFreeMiB(int freeMiB) { + this.freeMiB = freeMiB; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/provider/PrimeraPrimaryDatastoreProviderImpl.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/provider/PrimeraPrimaryDatastoreProviderImpl.java new file mode 100644 index 00000000000..e5ce327c5b1 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/provider/PrimeraPrimaryDatastoreProviderImpl.java @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.provider; + +import org.apache.cloudstack.storage.datastore.adapter.primera.PrimeraAdapterFactory; + +public class PrimeraPrimaryDatastoreProviderImpl extends AdaptivePrimaryDatastoreProviderImpl { + + public PrimeraPrimaryDatastoreProviderImpl() { + super(new PrimeraAdapterFactory()); + } + + @Override + public String getName() { + return "Primera"; + } + +} diff --git a/plugins/storage/volume/primera/src/main/resources/META-INF/cloudstack/storage-volume-primera/module.properties b/plugins/storage/volume/primera/src/main/resources/META-INF/cloudstack/storage-volume-primera/module.properties new file mode 100644 index 00000000000..8b4bb66df78 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/resources/META-INF/cloudstack/storage-volume-primera/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=storage-volume-primera +parent=storage diff --git a/plugins/storage/volume/primera/src/main/resources/META-INF/cloudstack/storage-volume-primera/spring-storage-volume-primera-context.xml b/plugins/storage/volume/primera/src/main/resources/META-INF/cloudstack/storage-volume-primera/spring-storage-volume-primera-context.xml new file mode 100644 index 00000000000..92f0bf23394 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/resources/META-INF/cloudstack/storage-volume-primera/spring-storage-volume-primera-context.xml @@ -0,0 +1,35 @@ + + + + + + diff --git a/scripts/storage/multipath/cleanStaleMaps.sh b/scripts/storage/multipath/cleanStaleMaps.sh new file mode 100644 index 00000000000..90b9bef5a8d --- /dev/null +++ b/scripts/storage/multipath/cleanStaleMaps.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +############################################################################################# +# +# Clean old multipath maps that have 0 paths available +# +############################################################################################# + +cd $(dirname $0) + +for WWID in $(multipathd list maps status | awk '{ if ($4 == 0) { print substr($1,2); }}'); do + ./removeVolume.sh ${WWID} +done + +exit 0 diff --git a/scripts/storage/multipath/connectVolume.sh b/scripts/storage/multipath/connectVolume.sh new file mode 100644 index 00000000000..fb8387ece47 --- /dev/null +++ b/scripts/storage/multipath/connectVolume.sh @@ -0,0 +1,133 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +##################################################################################### +# +# Given a lun # and a WWID for a volume provisioned externally, find the volume +# through the SCSI bus and make sure its visable via multipath +# +##################################################################################### + + +LUN=${1:?"LUN required"} +WWID=${2:?"WWID required"} + +WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]') + +systemctl is-active multipathd || systemctl restart multipathd || { + echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume." + logger -t "CS_SCSI_VOL_FIND" "${WWID} cannot be mapped to this host because multipathd is not currently running and cannot be started" + exit 1 +} + +echo "$(date): Looking for ${WWID} on lun ${LUN}" + +# get vendor OUI. we will only delete a device on the designated lun if it matches the +# incoming WWN OUI value. This is because multiple storage arrays may be mapped to the +# host on different fiber channel hosts with the same LUN +INCOMING_OUI=$(echo ${WWID} | cut -c2-7) +echo "$(date): Incoming OUI: ${INCOMING_OUI}" + +# first we need to check if any stray references are left from a previous use of this lun +for fchost in $(ls /sys/class/fc_host | sed -e 's/host//g'); do + lingering_devs=$(lsscsi -w "${fchost}:*:*:${LUN}" | grep /dev | awk '{if (NF > 6) { printf("%s:%s ", $NF, $(NF-1));} }' | sed -e 's/0x/3/g') + + if [ ! -z "${lingering_devs}" ]; then + for dev in ${lingering_devs}; do + LSSCSI_WWID=$(echo $dev | awk -F: '{print $2}' | sed -e 's/0x/3/g') + FOUND_OUI=$(echo ${LSSCSI_WWID} | cut -c3-8) + if [ "${INCOMING_OUI}" != "${FOUND_OUI}" ]; then + continue; + fi + dev=$(echo $dev | awk -F: '{ print $1}') + logger -t "CS_SCSI_VOL_FIND" "${WWID} processing identified a lingering device ${dev} from previous lun use, attempting to clean up" + MP_WWID=$(multipath -l ${dev} | head -1 | awk '{print $1}') + MP_WWID=${MP_WWID:1} # strip first character (3) off + # don't do this if the WWID passed in matches the WWID from multipath + if [ ! -z "${MP_WWID}" ] && [ "${MP_WWID}" != "${WWID}" ]; then + # run full removal again so all devices and multimap are cleared + $(dirname $0)/disconnectVolume.sh ${MP_WWID} + # we don't have a multimap but we may still have some stranded devices to clean up + elif [ "${LSSCSI_WWID}" != "${WWID}" ]; then + echo "1" > /sys/block/$(echo ${dev} | awk -F'/' '{print $NF}')/device/delete + fi + done + sleep 3 + fi +done + +logger -t "CS_SCSI_VOL_FIND" "${WWID} awaiting disk path at /dev/mapper/3${WWID}" + +# wait for multipath to map the new lun to the WWID +echo "$(date): Waiting for multipath entry to show up for the WWID" +while true; do + ls /dev/mapper/3${WWID} >/dev/null 2>&1 + if [ $? == 0 ]; then + break + fi + + logger -t "CS_SCSI_VOL_FIND" "${WWID} not available yet, triggering scan" + + # instruct bus to scan for new lun + for fchost in $(ls /sys/class/fc_host); do + echo " --> Scanning ${fchost}" + echo "- - ${LUN}" > /sys/class/scsi_host/${fchost}/scan + done + + multipath -v2 2>/dev/null + + ls /dev/mapper/3${WWID} >/dev/null 2>&1 + if [ $? == 0 ]; then + break + fi + + sleep 5 +done + +echo "$(date): Doing a recan to make sure we have proper current size locally" +for device in $(multipath -ll 3${WWID} | egrep '^ ' | awk '{print $2}'); do + echo "1" > /sys/bus/scsi/drivers/sd/${device}/rescan; +done + +sleep 3 + +multipathd reconfigure + +sleep 3 + +# cleanup any old/faulty paths +delete_needed=false +multipath -l 3${WWID} +for dev in $(multipath -l 3${WWID} 2>/dev/null| grep failed | awk '{print $3}' ); do + logger -t "CS_SCSI_VOL_FIND" "${WWID} multipath contains faulty path ${dev}, removing" + echo 1 > /sys/block/${dev}/device/delete; + delete_needed=true +done + +if [ "${delete_needed}" == "true" ]; then + sleep 10 + multipath -v2 >/dev/null +fi + +multipath -l 3${WWID} + +logger -t "CS_SCSI_VOL_FIND" "${WWID} successfully discovered and available" + +echo "$(date): Complete - found mapped LUN at /dev/mapper/3${WWID}" + +exit 0 diff --git a/scripts/storage/multipath/copyVolume.sh b/scripts/storage/multipath/copyVolume.sh new file mode 100644 index 00000000000..d169198251b --- /dev/null +++ b/scripts/storage/multipath/copyVolume.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +OUTPUT_FORMAT=${1:?"Output format is required"} +INPUT_FILE=${2:?"Input file/path is required"} +OUTPUT_FILE=${3:?"Output file/path is required"} + +echo "$(date): qemu-img convert -n -p -W -t none -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE}" + +qemu-img convert -n -p -W -t none -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE} && { + # if its a block device make sure we flush caches before exiting + lsblk ${OUTPUT_FILE} >/dev/null 2>&1 && { + blockdev --flushbufs ${OUTPUT_FILE} + hdparm -F ${OUTPUT_FILE} + } + exit 0 +} diff --git a/scripts/storage/multipath/disconnectVolume.sh b/scripts/storage/multipath/disconnectVolume.sh new file mode 100644 index 00000000000..067e561f8a3 --- /dev/null +++ b/scripts/storage/multipath/disconnectVolume.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +######################################################################################### +# +# Given a WWID, cleanup/remove any multipath and devices associated with this WWID. This +# may not always have lasting result because if the storage array still has the volume +# visable to the host, it may be rediscovered. The cleanupStaleMaps.sh script should +# catch those cases +# +######################################################################################### + +WWID=${1:?"WWID required"} +WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]') + +echo "$(date): Removing ${WWID}" + +systemctl is-active multipathd || systemctl restart multipathd || { + echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume." + logger -t "CS_SCSI_VOL_REMOVE" "${WWID} cannot be disconnected from this host because multipathd is not currently running and cannot be started" + exit 1 +} + +# first get dm- name +DM_NAME=$(ls -lrt /dev/mapper/3${WWID} | awk '{ print $NF }' | awk -F'/' '{print $NF}') +SLAVE_DEVS="" +if [ -z "${DM_NAME}" ]; then + logger -t CS_SCSI_VOL_REMOVE "${WWID} has no active multimap so no removal performed" + logger -t CS_SCSI_VOL_REMOVE "WARN: dm name could not be found for ${WWID}" + dmsetup remove /dev/mapper/*${WWID} + logger -t CS_SCSI_VOL_REMOVE "${WWID} removal via dmsetup remove /dev/mapper/${WWID} finished with return code $?" +else + # now look for slave devices and save for deletion + for dev in $(ls /sys/block/${DM_NAME}/slaves/ 2>/dev/null); do + SLAVE_DEVS="${SLAVE_DEVS} ${dev}" + done +fi + +# delete the path map last +multipath -f 3${WWID} + +# now delete slave devices +# https://bugzilla.redhat.com/show_bug.cgi?id=1949369 +if [ ! -z "${SLAVE_DEVS}" ]; then + for dev in ${SLAVE_DEVS}; do + multipathd del path /dev/${dev} + echo "1" > /sys/block/${dev}/device/delete + logger -t CS_SCSI_VOL_REMOVE "${WWID} removal of device ${dev} complete" + done +fi + +logger -t CS_SCSI_VOL_REMOVE "${WWID} successfully purged from multipath along with slave devices" + +echo "$(date): ${WWID} removed" + +exit 0 diff --git a/scripts/storage/multipath/resizeVolume.sh b/scripts/storage/multipath/resizeVolume.sh new file mode 100644 index 00000000000..1b44a71b46a --- /dev/null +++ b/scripts/storage/multipath/resizeVolume.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +notifyqemu() { + if `virsh help 2>/dev/null | grep -q blockresize` + then + if `virsh domstate $VMNAME >/dev/null 2>&1` + then + sizeinkb=$(($NEWSIZE/1024)) + devicepath=$(virsh domblklist $VMNAME | grep ${WWID} | awk '{print $1}') + virsh blockresize --path $devicepath --size $sizeinkb ${VMNAME} >/dev/null 2>&1 + retval=$? + if [ -z $retval ] || [ $retval -ne 0 ] + then + log "failed to live resize $path to size of $sizeinkb kb" 1 + else + liveresize='true' + fi + fi + fi +} + +WWID=${1:?"WWID required"} +VMNAME=${2:?"VMName required"} +NEWSIZE=${3:?"New size required in bytes"} + +WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]') + +export WWID VMNAME NEWSIZE + +systemctl is-active multipathd || systemctl restart multipathd || { + echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume." + logger -t "CS_SCSI_VOL_RESIZE" "Unable to notify running VM of resize for ${WWID} because multipathd is not currently running and cannot be started" + exit 1 +} + +logger -t "CS_SCSI_VOL_RESIZE" "${WWID} resizing disk path at /dev/mapper/3${WWID} STARTING" + +for device in $(multipath -ll 3${WWID} | egrep '^ ' | awk '{print $2}'); do + echo "1" > /sys/bus/scsi/drivers/sd/${device}/rescan; +done + +sleep 3 + +multipathd reconfigure + +sleep 3 + +multipath -ll 3${WWID} + +notifyqemu + +logger -t "CS_SCSI_VOL_RESIZE" "${WWID} resizing disk path at /dev/mapper/3${WWID} COMPLETE" + +exit 0 diff --git a/server/src/main/java/com/cloud/api/ApiDBUtils.java b/server/src/main/java/com/cloud/api/ApiDBUtils.java index a3532a79af4..3cade046c74 100644 --- a/server/src/main/java/com/cloud/api/ApiDBUtils.java +++ b/server/src/main/java/com/cloud/api/ApiDBUtils.java @@ -1294,7 +1294,7 @@ public class ApiDBUtils { type = HypervisorType.Hyperv; } } if (format == ImageFormat.RAW) { - // Currently, KVM only supports RBD and PowerFlex images of type RAW. + // Currently, KVM only supports RBD, PowerFlex, and FiberChannel images of type RAW. // This results in a weird collision with OVM volumes which // can only be raw, thus making KVM RBD volumes show up as OVM // rather than RBD. This block of code can (hopefully) by checking to @@ -1306,10 +1306,12 @@ public class ApiDBUtils { ListIterator itr = pools.listIterator(); while(itr.hasNext()) { StoragePoolVO pool = itr.next(); - if(pool.getPoolType() == StoragePoolType.RBD || - pool.getPoolType() == StoragePoolType.PowerFlex || - pool.getPoolType() == StoragePoolType.CLVM || - pool.getPoolType() == StoragePoolType.Linstor) { + + if(List.of(StoragePoolType.RBD, + StoragePoolType.PowerFlex, + StoragePoolType.CLVM, + StoragePoolType.Linstor, + StoragePoolType.FiberChannel).contains(pool.getPoolType())) { // This case will note the presence of non-qcow2 primary stores, suggesting KVM without NFS. Otherwse, // If this check is not passed, the hypervisor type will remain OVM. type = HypervisorType.KVM; diff --git a/server/src/main/java/com/cloud/api/dispatch/ParamGenericValidationWorker.java b/server/src/main/java/com/cloud/api/dispatch/ParamGenericValidationWorker.java index 45d4ed7f773..009d88a983b 100644 --- a/server/src/main/java/com/cloud/api/dispatch/ParamGenericValidationWorker.java +++ b/server/src/main/java/com/cloud/api/dispatch/ParamGenericValidationWorker.java @@ -92,7 +92,9 @@ public class ParamGenericValidationWorker implements DispatchWorker { break; } } - if (!matchedCurrentParam && !((String)actualParamName).equalsIgnoreCase("expires") && !((String)actualParamName).equalsIgnoreCase("signatureversion")) { + if (!matchedCurrentParam && !((String)actualParamName).equalsIgnoreCase("expires") && + !((String)actualParamName).equalsIgnoreCase("signatureversion") && + !((String)actualParamName).equalsIgnoreCase("projectid")) { errorMsg.append(" ").append(actualParamName); foundUnknownParam= true; } diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index 91410198e2f..c885bce1afc 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -114,6 +114,9 @@ import com.cloud.org.Cluster; import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceState; import com.cloud.serializer.GsonHelper; +import com.cloud.server.StatsCollector.AbstractStatsCollector; +import com.cloud.server.StatsCollector.AutoScaleMonitor; +import com.cloud.server.StatsCollector.StorageCollector; import com.cloud.storage.ImageStoreDetailsUtil; import com.cloud.storage.ScopeType; import com.cloud.storage.Storage; @@ -1620,7 +1623,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc for (StoragePoolVO pool : pools) { List volumes = _volsDao.findByPoolId(pool.getId(), null); for (VolumeVO volume : volumes) { - if (volume.getFormat() != ImageFormat.QCOW2 && volume.getFormat() != ImageFormat.VHD && volume.getFormat() != ImageFormat.OVA && (volume.getFormat() != ImageFormat.RAW || pool.getPoolType() != Storage.StoragePoolType.PowerFlex)) { + if (!List.of(ImageFormat.QCOW2, ImageFormat.VHD, ImageFormat.OVA, ImageFormat.RAW).contains(volume.getFormat()) && + !List.of(Storage.StoragePoolType.PowerFlex, Storage.StoragePoolType.FiberChannel).contains(pool.getPoolType())) { LOGGER.warn("Volume stats not implemented for this format type " + volume.getFormat()); break; } diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 481c200c49d..4bc471c5084 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -1052,36 +1052,56 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C _storagePoolTagsDao.persist(pool.getId(), storagePoolTags, cmd.isTagARule()); } + boolean changes = false; Long updatedCapacityBytes = null; Long capacityBytes = cmd.getCapacityBytes(); if (capacityBytes != null) { if (capacityBytes != pool.getCapacityBytes()) { updatedCapacityBytes = capacityBytes; + changes = true; } } Long updatedCapacityIops = null; Long capacityIops = cmd.getCapacityIops(); - if (capacityIops != null) { if (!capacityIops.equals(pool.getCapacityIops())) { updatedCapacityIops = capacityIops; + changes = true; } } - if (updatedCapacityBytes != null || updatedCapacityIops != null) { + // retrieve current details and merge/overlay input to capture changes + Map inputDetails = extractApiParamAsMap(cmd.getDetails()); + Map details = null; + if (inputDetails == null) { + details = _storagePoolDetailsDao.listDetailsKeyPairs(id); + } else { + details = _storagePoolDetailsDao.listDetailsKeyPairs(id); + details.putAll(inputDetails); + changes = true; + } + + if (changes) { StoragePoolVO storagePool = _storagePoolDao.findById(id); DataStoreProvider dataStoreProvider = _dataStoreProviderMgr.getDataStoreProvider(storagePool.getStorageProviderName()); DataStoreLifeCycle dataStoreLifeCycle = dataStoreProvider.getDataStoreLifeCycle(); if (dataStoreLifeCycle instanceof PrimaryDataStoreLifeCycle) { - Map details = new HashMap(); - - details.put(PrimaryDataStoreLifeCycle.CAPACITY_BYTES, updatedCapacityBytes != null ? String.valueOf(updatedCapacityBytes) : null); - details.put(PrimaryDataStoreLifeCycle.CAPACITY_IOPS, updatedCapacityIops != null ? String.valueOf(updatedCapacityIops) : null); - - ((PrimaryDataStoreLifeCycle)dataStoreLifeCycle).updateStoragePool(storagePool, details); + if (updatedCapacityBytes != null) { + details.put(PrimaryDataStoreLifeCycle.CAPACITY_BYTES, updatedCapacityBytes != null ? String.valueOf(updatedCapacityBytes) : null); + _storagePoolDao.updateCapacityBytes(id, updatedCapacityBytes); + } + if (updatedCapacityIops != null) { + details.put(PrimaryDataStoreLifeCycle.CAPACITY_IOPS, updatedCapacityIops != null ? String.valueOf(updatedCapacityIops) : null); + _storagePoolDao.updateCapacityIops(id, updatedCapacityIops); + } + if (cmd.getUrl() != null) { + details.put("url", cmd.getUrl()); + } + _storagePoolDao.update(id, storagePool); + _storagePoolDao.updateDetails(id, details); } } @@ -1094,14 +1114,6 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } - if (updatedCapacityBytes != null) { - _storagePoolDao.updateCapacityBytes(id, capacityBytes); - } - - if (updatedCapacityIops != null) { - _storagePoolDao.updateCapacityIops(id, capacityIops); - } - return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); } diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 69b5f984081..ba24ea3be54 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -1253,7 +1253,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic if (storagePoolId != null) { StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId); - if (storagePoolVO.isManaged() && !storagePoolVO.getPoolType().equals(Storage.StoragePoolType.PowerFlex)) { + if (storagePoolVO.isManaged() && !List.of( + Storage.StoragePoolType.PowerFlex, + Storage.StoragePoolType.FiberChannel).contains(storagePoolVO.getPoolType())) { Long instanceId = volume.getInstanceId(); if (instanceId != null) { diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index e6650e9bb51..c798ed8dc0f 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -1551,6 +1551,10 @@ "label.presetup": "PreSetup", "label.prev": "Prev", "label.previous": "Previous", +"label.primera.username.tooltip": "The username with edit privileges", +"label.primera.url.tooltip": "URL designating the Primera storage array endpoint, formatted as: http[s]://HOSTNAME:PORT?cpg=NAME&hostset=NAME[&skipTlsValidation=true][&snapCPG=NAME][&taskWaitTimeoutMs=#][&keyttl=#][&connectTimeoutMs=#] where values in [] are optional.", +"label.flashArray.username.tooltip": "The username with edit privileges", +"label.flashArray.url.tooltip": "URL designating the Flash Array endpoint, formatted as: http[s]://HOSTNAME:PORT?pod=NAME&hostgroup=NAME[&skipTlsValidation=true][&postCopyWaitMs=#][&keyttl=#][&connectTimeoutMs=#][&apiLoginVersion=#][&apiVersion=#] where values in [] are optional.", "label.primary": "Primary", "label.primary.storage": "Primary storage", "label.primary.storage.allocated": "Primary storage allocated", diff --git a/ui/src/views/infra/AddPrimaryStorage.vue b/ui/src/views/infra/AddPrimaryStorage.vue index 91c0dcbf42e..730a806307c 100644 --- a/ui/src/views/infra/AddPrimaryStorage.vue +++ b/ui/src/views/infra/AddPrimaryStorage.vue @@ -231,7 +231,7 @@ -
+
- + - + + + + + + +
{{ $t('label.cancel') }} diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareHelper.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareHelper.java index 8301dd2a561..3a8aa085f97 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareHelper.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareHelper.java @@ -56,6 +56,7 @@ import com.vmware.vim25.NasDatastoreInfo; import com.vmware.vim25.VMwareDVSPortSetting; import com.vmware.vim25.VirtualDeviceFileBackingInfo; import com.vmware.vim25.VirtualIDEController; +import com.vmware.vim25.VirtualMachineConfigSummary; import com.vmware.vim25.VirtualMachineGuestOsIdentifier; import com.vmware.vim25.VirtualMachineToolsStatus; import com.vmware.vim25.VirtualSCSIController; @@ -797,18 +798,21 @@ public class VmwareHelper { instance = new UnmanagedInstanceTO(); instance.setName(vmMo.getVmName()); instance.setInternalCSName(vmMo.getInternalCSName()); - instance.setCpuCores(vmMo.getConfigSummary().getNumCpu()); instance.setCpuCoresPerSocket(vmMo.getCoresPerSocket()); - instance.setCpuSpeed(vmMo.getConfigSummary().getCpuReservation()); - instance.setMemory(vmMo.getConfigSummary().getMemorySizeMB()); instance.setOperatingSystemId(vmMo.getVmGuestInfo().getGuestId()); + VirtualMachineConfigSummary configSummary = vmMo.getConfigSummary(); + if (configSummary != null) { + instance.setCpuCores(configSummary.getNumCpu()); + instance.setCpuSpeed(configSummary.getCpuReservation()); + instance.setMemory(configSummary.getMemorySizeMB()); + } ClusterMO clusterMo = new ClusterMO(hyperHost.getContext(), hyperHost.getHyperHostCluster()); instance.setClusterName(clusterMo.getName()); instance.setHostName(hyperHost.getHyperHostName()); - if (StringUtils.isEmpty(instance.getOperatingSystemId())) { - instance.setOperatingSystemId(vmMo.getConfigSummary().getGuestId()); + if (StringUtils.isEmpty(instance.getOperatingSystemId()) && configSummary != null) { + instance.setOperatingSystemId(configSummary.getGuestId()); } VirtualMachineGuestOsIdentifier osIdentifier = VirtualMachineGuestOsIdentifier.OTHER_GUEST; try { @@ -819,8 +823,8 @@ public class VmwareHelper { } } instance.setOperatingSystem(vmMo.getGuestInfo().getGuestFullName()); - if (StringUtils.isEmpty(instance.getOperatingSystem())) { - instance.setOperatingSystem(vmMo.getConfigSummary().getGuestFullName()); + if (StringUtils.isEmpty(instance.getOperatingSystem()) && configSummary != null) { + instance.setOperatingSystem(configSummary.getGuestFullName()); } UnmanagedInstanceTO.PowerState powerState = UnmanagedInstanceTO.PowerState.PowerUnknown; if (vmMo.getPowerState().toString().equalsIgnoreCase("POWERED_ON")) { From 3bb318bab905d974f4613c637dee1d6e5d7ecb32 Mon Sep 17 00:00:00 2001 From: Bryan Lima <42067040+BryanMLima@users.noreply.github.com> Date: Wed, 13 Dec 2023 02:21:24 -0300 Subject: [PATCH 20/22] kvm: Add support for cgroupv2 (#8252) 1. Problem description In Apache CloudStack (ACS), when a VM is deployed in a host with the KVM hypervisor, an XML file is created in the assigned host, which has a property shares that defines the weight of the VM to access the host CPU. The value of this property has no unit, and it is a relative measure to calculate how much CPU a given VM will have in the host. However, this value has a limit, which depends on the version of cgroup utilized by the host's kernel. The problem lies at the range value of shares that varies between both versions: [2, 264144] for cgroups version 1; and [1, 10000] for cgroups version 2. Currently, ACS calculates the value of shares using Equation 1, presented below, where CPU is the number of cores and speed is the CPU frequency; both specified in the VM's compute offering. Therefore, if a compute offering has, for example, 6 cores at 2 GHz, the shares value will be 12000 and an exception will be thrown by libvirt if the host utilizes cgroup v2. The second version is becoming the default one in current Linux distributions; thus, it is necessary to address this limitation. Equation 1 shares = CPU * speed Fixes: #6744 2. Proposed changes To address the problem described, we propose to apply a scale conversion considering the max shares of the host. Using the same formula currently utilized by ACS, it is possible to calculate the maximum shares of a VM for a given host. In other words, using the number of cores and the nominal speed of the host's CPU as the upper limit of shares allowed to a VM. Then, this value will be scaled to the allowed interval of [1, 10000] of cgroup v2 by using a linear scale conversion. The VM shares would be calculated as Equation 2, presented below, where VM requested shares is the requested shares value calculated using Equation 1, cgroup upper limit is fixed with a value of 10000 (cgroups v2 upper limit), and host max shares is the maximum shares value of the host, calculated using Equation 1. Using Equation 2, the only case where a VM passes the cgroup v2 limit is when the user requests more resources than the host has, which is not possible with the current implementation of ACS. Equation 2 shares = (VM requested shares * cgroup upper limit)/host max shares To implement the proposal, the following APIs will be updated: deployVirtualMachine, migrateVirtualMachine and scaleVirtualMachine. When a VM is being deployed, a new verification will be added to find a suitable host. The max shares of each host will be calculated, and the VM calculated shares will be verified if it does not surpass the host's value. Likewise, the migration of VMs will have a similar new verification. Lastly, the scale of VMs will also have the same verification for the VM's host. To determine the max shares of a given host, we will use the same equation currently used in ACS for calculating the shares of VMs, presented in Section 1. When Equation 1 is used to determine the maximum shares of a host, CPU is the number of cores of the host, and speed is the nominal CPU speed, i.e., considering the CPU's base frequency. It is important to note that these changes are only for hosts with the KVM hypervisor using cgroup v2 for now. --- .../com/cloud/agent/api/MigrateCommand.java | 11 +++ .../agent/api/PrepareForMigrationAnswer.java | 10 ++ .../cloud/vm/VirtualMachineManagerImpl.java | 67 +++++++------ .../StorageSystemDataMotionStrategy.java | 13 ++- .../resource/LibvirtComputingResource.java | 82 +++++++++++++++- .../wrapper/LibvirtMigrateCommandWrapper.java | 41 ++++++++ ...virtPrepareForMigrationCommandWrapper.java | 22 ++++- .../wrapper/LibvirtScaleVmCommandWrapper.java | 3 +- .../LibvirtComputingResourceTest.java | 93 +++++++++++++++++++ .../LibvirtMigrateCommandWrapperTest.java | 80 ++++++++++++++++ ...PrepareForMigrationCommandWrapperTest.java | 75 +++++++++++++++ .../LibvirtScaleVmCommandWrapperTest.java | 5 + 12 files changed, 463 insertions(+), 39 deletions(-) create mode 100644 plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapperTest.java diff --git a/core/src/main/java/com/cloud/agent/api/MigrateCommand.java b/core/src/main/java/com/cloud/agent/api/MigrateCommand.java index 27251f4bb78..3acdb9c351b 100644 --- a/core/src/main/java/com/cloud/agent/api/MigrateCommand.java +++ b/core/src/main/java/com/cloud/agent/api/MigrateCommand.java @@ -40,6 +40,9 @@ public class MigrateCommand extends Command { private boolean executeInSequence = false; private List migrateDiskInfoList = new ArrayList<>(); private Map dpdkInterfaceMapping = new HashMap<>(); + + private int newVmCpuShares; + Map vlanToPersistenceMap = new HashMap<>(); public Map getDpdkInterfaceMapping() { @@ -138,6 +141,14 @@ public class MigrateCommand extends Command { this.migrateDiskInfoList = migrateDiskInfoList; } + public int getNewVmCpuShares() { + return newVmCpuShares; + } + + public void setNewVmCpuShares(int newVmCpuShares) { + this.newVmCpuShares = newVmCpuShares; + } + public static class MigrateDiskInfo { public enum DiskType { FILE, BLOCK; diff --git a/core/src/main/java/com/cloud/agent/api/PrepareForMigrationAnswer.java b/core/src/main/java/com/cloud/agent/api/PrepareForMigrationAnswer.java index d0a544ba081..190e844ddc5 100644 --- a/core/src/main/java/com/cloud/agent/api/PrepareForMigrationAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/PrepareForMigrationAnswer.java @@ -28,6 +28,8 @@ public class PrepareForMigrationAnswer extends Answer { private Map dpdkInterfaceMapping = new HashMap<>(); + private Integer newVmCpuShares = null; + protected PrepareForMigrationAnswer() { } @@ -50,4 +52,12 @@ public class PrepareForMigrationAnswer extends Answer { public Map getDpdkInterfaceMapping() { return this.dpdkInterfaceMapping; } + + public Integer getNewVmCpuShares() { + return newVmCpuShares; + } + + public void setNewVmCpuShares(Integer newVmCpuShares) { + this.newVmCpuShares = newVmCpuShares; + } } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 7792afd2c63..4c8883476a2 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -48,6 +48,7 @@ import javax.naming.ConfigurationException; import javax.persistence.EntityExistsException; import com.cloud.event.ActionEventUtils; +import com.google.gson.Gson; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; @@ -2790,23 +2791,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } boolean migrated = false; - Map dpdkInterfaceMapping = null; + Map dpdkInterfaceMapping = new HashMap<>(); try { - final boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); - Map vlanToPersistenceMap = getVlanToPersistenceMapForVM(vm.getId()); - final MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows, to, getExecuteInSequence(vm.getHypervisorType())); - if (MapUtils.isNotEmpty(vlanToPersistenceMap)) { - mc.setVlanToPersistenceMap(vlanToPersistenceMap); - } - - boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value(); - mc.setAutoConvergence(kvmAutoConvergence); - mc.setHostGuid(dest.getHost().getGuid()); - - dpdkInterfaceMapping = ((PrepareForMigrationAnswer) pfma).getDpdkInterfaceMapping(); - if (MapUtils.isNotEmpty(dpdkInterfaceMapping)) { - mc.setDpdkInterfaceMapping(dpdkInterfaceMapping); - } + final MigrateCommand mc = buildMigrateCommand(vm, to, dest, pfma, dpdkInterfaceMapping); try { final Answer ma = _agentMgr.send(vm.getLastHostId(), mc); @@ -2878,6 +2865,43 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } + /** + * Create and set parameters for the {@link MigrateCommand} used in the migration and scaling of VMs. + */ + protected MigrateCommand buildMigrateCommand(VMInstanceVO vmInstance, VirtualMachineTO virtualMachineTO, DeployDestination destination, Answer answer, + Map dpdkInterfaceMapping) { + final boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vmInstance.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); + final MigrateCommand migrateCommand = new MigrateCommand(vmInstance.getInstanceName(), destination.getHost().getPrivateIpAddress(), isWindows, virtualMachineTO, + getExecuteInSequence(vmInstance.getHypervisorType())); + + Map vlanToPersistenceMap = getVlanToPersistenceMapForVM(vmInstance.getId()); + if (MapUtils.isNotEmpty(vlanToPersistenceMap)) { + s_logger.debug(String.format("Setting VLAN persistence to [%s] as part of migrate command for VM [%s].", new Gson().toJson(vlanToPersistenceMap), virtualMachineTO)); + migrateCommand.setVlanToPersistenceMap(vlanToPersistenceMap); + } + + migrateCommand.setAutoConvergence(StorageManager.KvmAutoConvergence.value()); + migrateCommand.setHostGuid(destination.getHost().getGuid()); + + PrepareForMigrationAnswer prepareForMigrationAnswer = (PrepareForMigrationAnswer) answer; + + Map answerDpdkInterfaceMapping = prepareForMigrationAnswer.getDpdkInterfaceMapping(); + if (MapUtils.isNotEmpty(answerDpdkInterfaceMapping) && dpdkInterfaceMapping != null) { + s_logger.debug(String.format("Setting DPDK interface mapping to [%s] as part of migrate command for VM [%s].", new Gson().toJson(vlanToPersistenceMap), + virtualMachineTO)); + dpdkInterfaceMapping.putAll(answerDpdkInterfaceMapping); + migrateCommand.setDpdkInterfaceMapping(dpdkInterfaceMapping); + } + + Integer newVmCpuShares = prepareForMigrationAnswer.getNewVmCpuShares(); + if (newVmCpuShares != null) { + s_logger.debug(String.format("Setting CPU shares to [%d] as part of migrate command for VM [%s].", newVmCpuShares, virtualMachineTO)); + migrateCommand.setNewVmCpuShares(newVmCpuShares); + } + + return migrateCommand; + } + private void updateVmPod(VMInstanceVO vm, long dstHostId) { // update the VMs pod HostVO host = _hostDao.findById(dstHostId); @@ -4395,16 +4419,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac boolean migrated = false; try { - Map vlanToPersistenceMap = getVlanToPersistenceMapForVM(vm.getId()); - final boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); - final MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows, to, getExecuteInSequence(vm.getHypervisorType())); - if (MapUtils.isNotEmpty(vlanToPersistenceMap)) { - mc.setVlanToPersistenceMap(vlanToPersistenceMap); - } - - boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value(); - mc.setAutoConvergence(kvmAutoConvergence); - mc.setHostGuid(dest.getHost().getGuid()); + final MigrateCommand mc = buildMigrateCommand(vm, to, dest, pfma, null); try { final Answer ma = _agentMgr.send(vm.getLastHostId(), mc); diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 1419ae36d25..a63aa52799d 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -31,6 +31,7 @@ import java.util.concurrent.TimeUnit; import javax.inject.Inject; +import com.cloud.agent.api.PrepareForMigrationAnswer; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; @@ -1884,9 +1885,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO); + Answer pfma; try { - Answer pfma = agentManager.send(destHost.getId(), pfmc); + pfma = agentManager.send(destHost.getId(), pfmc); if (pfma == null || !pfma.getResult()) { String details = pfma != null ? pfma.getDetails() : "null answer returned"; @@ -1894,8 +1896,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { throw new AgentUnavailableException(msg, destHost.getId()); } - } - catch (final OperationTimedoutException e) { + } catch (final OperationTimedoutException e) { throw new AgentUnavailableException("Operation timed out", destHost.getId()); } @@ -1911,6 +1912,12 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { migrateCommand.setMigrateStorageManaged(managedStorageDestination); migrateCommand.setMigrateNonSharedInc(migrateNonSharedInc); + Integer newVmCpuShares = ((PrepareForMigrationAnswer) pfma).getNewVmCpuShares(); + if (newVmCpuShares != null) { + LOGGER.debug(String.format("Setting CPU shares to [%d] as part of migrate VM with volumes command for VM [%s].", newVmCpuShares, vmTO)); + migrateCommand.setNewVmCpuShares(newVmCpuShares); + } + boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value(); migrateCommand.setAutoConvergence(kvmAutoConvergence); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index b7611cd07bb..a3bee2f4134 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -72,6 +72,7 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang.BooleanUtils; import org.apache.commons.lang.math.NumberUtils; +import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; import org.apache.xerces.impl.xpath.regex.Match; @@ -472,6 +473,14 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv */ private static final String COMMAND_SET_MEM_BALLOON_STATS_PERIOD = "virsh dommemstat %s --period %s --live"; + private static int hostCpuMaxCapacity = 0; + + private static final int CGROUP_V2_UPPER_LIMIT = 10000; + + private static final String COMMAND_GET_CGROUP_HOST_VERSION = "stat -fc %T /sys/fs/cgroup/"; + + public static final String CGROUP_V2 = "cgroup2fs"; + protected long getHypervisorLibvirtVersion() { return _hypervisorLibvirtVersion; } @@ -547,6 +556,18 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return new ExecutionResult(true, null); } + /** + * @return the host CPU max capacity according to the method {@link LibvirtComputingResource#calculateHostCpuMaxCapacity(int, Long)}; if the host utilizes cgroup v1, this + * value is 0. + */ + public int getHostCpuMaxCapacity() { + return hostCpuMaxCapacity; + } + + public void setHostCpuMaxCapacity(int hostCpuMaxCapacity) { + LibvirtComputingResource.hostCpuMaxCapacity = hostCpuMaxCapacity; + } + public LibvirtKvmAgentHook getTransformer() throws IOException { return new LibvirtKvmAgentHook(_agentHooksBasedir, _agentHooksLibvirtXmlScript, _agentHooksLibvirtXmlMethod); } @@ -2673,12 +2694,41 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv */ protected CpuTuneDef createCpuTuneDef(VirtualMachineTO vmTO) { CpuTuneDef ctd = new CpuTuneDef(); - int shares = vmTO.getCpus() * (vmTO.getMinSpeed() != null ? vmTO.getMinSpeed() : vmTO.getSpeed()); - ctd.setShares(shares); + ctd.setShares(calculateCpuShares(vmTO)); setQuotaAndPeriod(vmTO, ctd); return ctd; } + /** + * Calculates the VM CPU shares considering the cgroup version of the host. + *
    + *
  • + * If the host utilize cgroup v1, then, the CPU shares is calculated as VM CPU shares = CPU cores * CPU frequency. + *
  • + *
  • + * If the host utilize cgroup v2, the CPU shares calculation considers the cgroup v2 upper limit of 10,000, and a linear scale conversion is applied + * considering the maximum host CPU shares (i.e. using the number of CPU cores and CPU nominal frequency of the host). Therefore, the VM CPU shares is calculated as + * VM CPU shares = (VM requested shares * cgroup upper limit) / host max shares. + *
  • + *
+ */ + public int calculateCpuShares(VirtualMachineTO vmTO) { + int vCpus = vmTO.getCpus(); + int cpuSpeed = ObjectUtils.defaultIfNull(vmTO.getMinSpeed(), vmTO.getSpeed()); + int requestedCpuShares = vCpus * cpuSpeed; + int hostCpuMaxCapacity = getHostCpuMaxCapacity(); + + if (hostCpuMaxCapacity > 0) { + int updatedCpuShares = (int) Math.ceil((requestedCpuShares * CGROUP_V2_UPPER_LIMIT) / (double) hostCpuMaxCapacity); + s_logger.debug(String.format("This host utilizes cgroupv2 (as the max shares value is [%s]), thus, the VM requested shares of [%s] will be converted to " + + "consider the host limits; the new CPU shares value is [%s].", hostCpuMaxCapacity, requestedCpuShares, updatedCpuShares)); + return updatedCpuShares; + } + s_logger.debug(String.format("This host does not have a maximum CPU shares set; therefore, this host utilizes cgroupv1 and the VM requested CPU shares [%s] will not be " + + "converted.", requestedCpuShares)); + return requestedCpuShares; + } + private CpuModeDef createCpuModeDef(VirtualMachineTO vmTO, int vcpus) { final CpuModeDef cmd = new CpuModeDef(); cmd.setMode(_guestCpuMode); @@ -3469,8 +3519,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv @Override public StartupCommand[] initialize() { - final KVMHostInfo info = new KVMHostInfo(_dom0MinMem, _dom0OvercommitMem, _manualCpuSpeed); + calculateHostCpuMaxCapacity(info.getCpus(), info.getCpuSpeed()); String capabilities = String.join(",", info.getCapabilities()); if (dpdkSupport) { @@ -3514,6 +3564,32 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return startupCommandsArray; } + /** + * Calculates and sets the host CPU max capacity according to the cgroup version of the host. + *
    + *
  • + * cgroup v1: the max CPU capacity for the host is set to 0. + *
  • + *
  • + * cgroup v2: the max CPU capacity for the host is the value of cpuCores * cpuSpeed. + *
  • + *
+ */ + protected void calculateHostCpuMaxCapacity(int cpuCores, Long cpuSpeed) { + String output = Script.runSimpleBashScript(COMMAND_GET_CGROUP_HOST_VERSION); + s_logger.info(String.format("Host uses control group [%s].", output)); + + if (!CGROUP_V2.equals(output)) { + s_logger.info(String.format("Setting host CPU max capacity to 0, as it uses cgroup v1.", getHostCpuMaxCapacity())); + setHostCpuMaxCapacity(0); + return; + } + + s_logger.info(String.format("Calculating the max shares of the host.")); + setHostCpuMaxCapacity(cpuCores * cpuSpeed.intValue()); + s_logger.info(String.format("The max shares of the host is [%d].", getHostCpuMaxCapacity())); + } + private StartupStorageCommand createLocalStoragePool(String localStoragePath, String localStorageUUID, StartupRoutingCommand cmd) { StartupStorageCommand sscmd = null; try { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java index d0ab77829af..fb526626ef8 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java @@ -23,6 +23,7 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Map; import java.util.Set; @@ -211,6 +212,8 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper + *
  • + * If both hosts utilize cgroup v1; then, the shares value of the VM is equal in both hosts, and there is no need to update the VM CPU shares value for the + * migration.
  • + *
  • + * If, at least, one of the hosts utilize cgroup v2, the VM CPU shares must be recalculated for the migration, accordingly to + * method {@link LibvirtComputingResource#calculateCpuShares(VirtualMachineTO)}. + *
  • + * + */ + protected String updateVmSharesIfNeeded(MigrateCommand migrateCommand, String xmlDesc, LibvirtComputingResource libvirtComputingResource) + throws ParserConfigurationException, IOException, SAXException, TransformerException { + Integer newVmCpuShares = migrateCommand.getNewVmCpuShares(); + int currentCpuShares = libvirtComputingResource.calculateCpuShares(migrateCommand.getVirtualMachine()); + + if (newVmCpuShares == currentCpuShares) { + s_logger.info(String.format("Current CPU shares [%s] is equal in both hosts; therefore, there is no need to update the CPU shares for the new host.", + currentCpuShares)); + return xmlDesc; + } + + InputStream inputStream = IOUtils.toInputStream(xmlDesc, StandardCharsets.UTF_8); + DocumentBuilderFactory docFactory = ParserUtils.getSaferDocumentBuilderFactory(); + DocumentBuilder docBuilder = docFactory.newDocumentBuilder(); + Document document = docBuilder.parse(inputStream); + + Element root = document.getDocumentElement(); + Node sharesNode = root.getElementsByTagName("shares").item(0); + String currentShares = sharesNode.getTextContent(); + + s_logger.info(String.format("VM [%s] will have CPU shares altered from [%s] to [%s] as part of migration because the cgroups version differs between hosts.", + migrateCommand.getVmName(), currentShares, newVmCpuShares)); + sharesNode.setTextContent(String.valueOf(newVmCpuShares)); + return getXml(document); + } + /** * Replace DPDK source path and target before migrations */ diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java index 9109d579c5b..3f281e54bba 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java @@ -122,11 +122,7 @@ public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapp return new PrepareForMigrationAnswer(command, "failed to connect physical disks to host"); } - PrepareForMigrationAnswer answer = new PrepareForMigrationAnswer(command); - if (MapUtils.isNotEmpty(dpdkInterfaceMapping)) { - answer.setDpdkInterfaceMapping(dpdkInterfaceMapping); - } - return answer; + return createPrepareForMigrationAnswer(command, dpdkInterfaceMapping, libvirtComputingResource, vm); } catch (final LibvirtException | CloudRuntimeException | InternalErrorException | URISyntaxException e) { if (MapUtils.isNotEmpty(dpdkInterfaceMapping)) { for (DpdkTO to : dpdkInterfaceMapping.values()) { @@ -143,6 +139,22 @@ public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapp } } + protected PrepareForMigrationAnswer createPrepareForMigrationAnswer(PrepareForMigrationCommand command, Map dpdkInterfaceMapping, + LibvirtComputingResource libvirtComputingResource, VirtualMachineTO vm) { + PrepareForMigrationAnswer answer = new PrepareForMigrationAnswer(command); + + if (MapUtils.isNotEmpty(dpdkInterfaceMapping)) { + s_logger.debug(String.format("Setting DPDK interface for the migration of VM [%s].", vm)); + answer.setDpdkInterfaceMapping(dpdkInterfaceMapping); + } + + int newCpuShares = libvirtComputingResource.calculateCpuShares(vm); + s_logger.debug(String.format("Setting CPU shares to [%s] for the migration of VM [%s].", newCpuShares, vm)); + answer.setNewVmCpuShares(newCpuShares); + + return answer; + } + private Answer handleRollback(PrepareForMigrationCommand command, LibvirtComputingResource libvirtComputingResource) { KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); VirtualMachineTO vmTO = command.getVirtualMachine(); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtScaleVmCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtScaleVmCommandWrapper.java index 963d13bff24..79d43ba2735 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtScaleVmCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtScaleVmCommandWrapper.java @@ -39,8 +39,7 @@ public class LibvirtScaleVmCommandWrapper extends CommandWrapper\n" + ""; + @Mock + MigrateCommand migrateCommandMock; + + @Mock + LibvirtComputingResource libvirtComputingResourceMock; + + @Mock + VirtualMachineTO virtualMachineTOMock; + + @Spy LibvirtMigrateCommandWrapper libvirtMigrateCmdWrapper = new LibvirtMigrateCommandWrapper(); final String memInfo = "MemTotal: 5830236 kB\n" + @@ -871,4 +888,67 @@ public class LibvirtMigrateCommandWrapperTest { Assert.assertTrue(replaced.contains("csdpdk-7")); Assert.assertFalse(replaced.contains("csdpdk-1")); } + + @Test + public void updateVmSharesIfNeededTestNewCpuSharesEqualCurrentSharesShouldNotUpdateVmShares() throws ParserConfigurationException, IOException, TransformerException, + SAXException { + int newVmCpuShares = 1000; + int currentVmCpuShares = 1000; + + Mockito.doReturn(newVmCpuShares).when(migrateCommandMock).getNewVmCpuShares(); + Mockito.doReturn(virtualMachineTOMock).when(migrateCommandMock).getVirtualMachine(); + Mockito.doReturn(currentVmCpuShares).when(libvirtComputingResourceMock).calculateCpuShares(virtualMachineTOMock); + + String finalXml = libvirtMigrateCmdWrapper.updateVmSharesIfNeeded(migrateCommandMock, fullfile, libvirtComputingResourceMock); + + Assert.assertEquals(finalXml, fullfile); + } + + @Test + public void updateVmSharesIfNeededTestNewCpuSharesHigherThanCurrentSharesShouldUpdateVmShares() throws ParserConfigurationException, IOException, TransformerException, + SAXException { + int newVmCpuShares = 2000; + int currentVmCpuShares = 1000; + + Mockito.doReturn(newVmCpuShares).when(migrateCommandMock).getNewVmCpuShares(); + Mockito.doReturn(virtualMachineTOMock).when(migrateCommandMock).getVirtualMachine(); + Mockito.doReturn(currentVmCpuShares).when(libvirtComputingResourceMock).calculateCpuShares(virtualMachineTOMock); + + String finalXml = libvirtMigrateCmdWrapper.updateVmSharesIfNeeded(migrateCommandMock, fullfile, libvirtComputingResourceMock); + + InputStream inputStream = IOUtils.toInputStream(finalXml, StandardCharsets.UTF_8); + DocumentBuilderFactory docFactory = ParserUtils.getSaferDocumentBuilderFactory(); + DocumentBuilder docBuilder = docFactory.newDocumentBuilder(); + Document document = docBuilder.parse(inputStream); + + Element root = document.getDocumentElement(); + Node sharesNode = root.getElementsByTagName("shares").item(0); + int updateShares = Integer.parseInt(sharesNode.getTextContent()); + + Assert.assertEquals(updateShares, newVmCpuShares); + } + + @Test + public void updateVmSharesIfNeededTestNewCpuSharesLowerThanCurrentSharesShouldUpdateVmShares() throws ParserConfigurationException, IOException, TransformerException, + SAXException { + int newVmCpuShares = 500; + int currentVmCpuShares = 1000; + + Mockito.doReturn(newVmCpuShares).when(migrateCommandMock).getNewVmCpuShares(); + Mockito.doReturn(virtualMachineTOMock).when(migrateCommandMock).getVirtualMachine(); + Mockito.doReturn(currentVmCpuShares).when(libvirtComputingResourceMock).calculateCpuShares(virtualMachineTOMock); + + String finalXml = libvirtMigrateCmdWrapper.updateVmSharesIfNeeded(migrateCommandMock, fullfile, libvirtComputingResourceMock); + + InputStream inputStream = IOUtils.toInputStream(finalXml, StandardCharsets.UTF_8); + DocumentBuilderFactory docFactory = ParserUtils.getSaferDocumentBuilderFactory(); + DocumentBuilder docBuilder = docFactory.newDocumentBuilder(); + Document document = docBuilder.parse(inputStream); + + Element root = document.getDocumentElement(); + Node sharesNode = root.getElementsByTagName("shares").item(0); + int updateShares = Integer.parseInt(sharesNode.getTextContent()); + + Assert.assertEquals(updateShares, newVmCpuShares); + } } diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapperTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapperTest.java new file mode 100644 index 00000000000..5530819c2e4 --- /dev/null +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapperTest.java @@ -0,0 +1,75 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.PrepareForMigrationAnswer; +import com.cloud.agent.api.PrepareForMigrationCommand; +import com.cloud.agent.api.to.DpdkTO; +import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import java.util.HashMap; +import java.util.Map; + +@RunWith(PowerMockRunner.class) +@PrepareForTest(value = {LibvirtPrepareForMigrationCommandWrapper.class}) +public class LibvirtPrepareForMigrationCommandWrapperTest { + + @Mock + LibvirtComputingResource libvirtComputingResourceMock; + + @Mock + PrepareForMigrationCommand prepareForMigrationCommandMock; + + @Mock + VirtualMachineTO virtualMachineTOMock; + + @Spy + LibvirtPrepareForMigrationCommandWrapper libvirtPrepareForMigrationCommandWrapperSpy = new LibvirtPrepareForMigrationCommandWrapper(); + + @Test + public void createPrepareForMigrationAnswerTestDpdkInterfaceNotEmptyShouldSetParamOnAnswer() { + Map dpdkInterfaceMapping = new HashMap<>(); + dpdkInterfaceMapping.put("Interface", new DpdkTO()); + + PrepareForMigrationAnswer prepareForMigrationAnswer = libvirtPrepareForMigrationCommandWrapperSpy.createPrepareForMigrationAnswer(prepareForMigrationCommandMock, dpdkInterfaceMapping, libvirtComputingResourceMock, + virtualMachineTOMock); + + Assert.assertEquals(prepareForMigrationAnswer.getDpdkInterfaceMapping(), dpdkInterfaceMapping); + } + + @Test + public void createPrepareForMigrationAnswerTestVerifyThatCpuSharesIsSet() { + int cpuShares = 1000; + Mockito.doReturn(cpuShares).when(libvirtComputingResourceMock).calculateCpuShares(virtualMachineTOMock); + PrepareForMigrationAnswer prepareForMigrationAnswer = libvirtPrepareForMigrationCommandWrapperSpy.createPrepareForMigrationAnswer(prepareForMigrationCommandMock,null, + libvirtComputingResourceMock, virtualMachineTOMock); + + Assert.assertEquals(cpuShares, prepareForMigrationAnswer.getNewVmCpuShares().intValue()); + } +} diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtScaleVmCommandWrapperTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtScaleVmCommandWrapperTest.java index fb963e87ed4..56f99d41abd 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtScaleVmCommandWrapperTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtScaleVmCommandWrapperTest.java @@ -207,9 +207,11 @@ public class LibvirtScaleVmCommandWrapperTest extends TestCase { @Test public void validateExecuteHandleLibvirtException() throws LibvirtException { String errorMessage = ""; + int shares = vmTo.getCpus() * vmTo.getSpeed(); Mockito.doReturn(vmTo).when(scaleVmCommandMock).getVirtualMachine(); Mockito.doReturn(libvirtUtilitiesHelperMock).when(libvirtComputingResourceMock).getLibvirtUtilitiesHelper(); + Mockito.doReturn(shares).when(libvirtComputingResourceMock).calculateCpuShares(vmTo); Mockito.doThrow(libvirtException).when(libvirtUtilitiesHelperMock).getConnectionByVmName(Mockito.anyString()); Mockito.doReturn(errorMessage).when(libvirtException).getMessage(); @@ -222,9 +224,12 @@ public class LibvirtScaleVmCommandWrapperTest extends TestCase { @Test public void validateExecuteSuccessfully() throws LibvirtException { + int shares = vmTo.getCpus() * vmTo.getSpeed(); + Mockito.doReturn(vmTo).when(scaleVmCommandMock).getVirtualMachine(); Mockito.doReturn(libvirtUtilitiesHelperMock).when(libvirtComputingResourceMock).getLibvirtUtilitiesHelper(); Mockito.doReturn(connectMock).when(libvirtUtilitiesHelperMock).getConnectionByVmName(Mockito.anyString()); + Mockito.doReturn(shares).when(libvirtComputingResourceMock).calculateCpuShares(vmTo); Mockito.doReturn(domainMock).when(connectMock).domainLookupByName(Mockito.anyString()); Mockito.doNothing().when(libvirtScaleVmCommandWrapperSpy).scaleMemory(Mockito.any(), Mockito.anyLong(), Mockito.anyString()); Mockito.doNothing().when(libvirtScaleVmCommandWrapperSpy).scaleVcpus(Mockito.any(), Mockito.anyInt(), Mockito.anyString()); From ab20b1220fea570a1da3c3efa4a2080cd3632070 Mon Sep 17 00:00:00 2001 From: kishankavala Date: Thu, 14 Dec 2023 13:08:56 +0530 Subject: [PATCH 21/22] KVM Ingestion - Import Instance (#7976) This PR adds new functionality to import KVM instances from an external host or from disk images in local or shared storage. Doc PR: https://github.com/apache/cloudstack-documentation/pull/356 --- .../main/java/com/cloud/vm/UserVmService.java | 4 +- .../java/com/cloud/vm/VmDetailConstants.java | 1 + .../apache/cloudstack/api/ApiConstants.java | 3 + .../admin/vm/ImportUnmanagedInstanceCmd.java | 2 +- .../api/command/admin/vm/ImportVmCmd.java | 134 ++- .../command/admin/vm/ListVmsForImportCmd.java | 134 +++ .../cloudstack/vm/UnmanagedInstanceTO.java | 20 + .../cloudstack/vm/UnmanagedVMsManager.java | 7 + .../apache/cloudstack/vm/VmImportService.java | 4 + .../cloud/agent/api/CheckVolumeAnswer.java | 40 + .../cloud/agent/api/CheckVolumeCommand.java | 59 + .../agent/api/CopyRemoteVolumeAnswer.java | 61 + .../agent/api/CopyRemoteVolumeCommand.java | 101 ++ .../cloud/agent/api/GetRemoteVmsAnswer.java | 75 ++ .../cloud/agent/api/GetRemoteVmsCommand.java | 70 ++ .../api/GetUnmanagedInstancesAnswer.java | 4 + .../service/VolumeOrchestrationService.java | 3 + .../orchestration/VolumeOrchestrator.java | 45 + .../java/com/cloud/storage/dao/VolumeDao.java | 2 + .../com/cloud/storage/dao/VolumeDaoImpl.java | 14 + .../resource/LibvirtComputingResource.java | 57 +- .../kvm/resource/LibvirtDomainXMLParser.java | 106 +- .../hypervisor/kvm/resource/LibvirtVMDef.java | 18 +- .../LibvirtCheckVolumeCommandWrapper.java | 86 ++ ...LibvirtCopyRemoteVolumeCommandWrapper.java | 93 ++ .../LibvirtGetRemoteVmsCommandWrapper.java | 194 ++++ ...rtGetUnmanagedInstancesCommandWrapper.java | 227 ++++ ...epareUnmanageVMInstanceCommandWrapper.java | 51 + .../com/cloud/api/query/QueryManagerImpl.java | 31 +- .../java/com/cloud/vm/UserVmManagerImpl.java | 22 +- .../vm/UnmanagedVMsManagerImpl.java | 1010 ++++++++++++++--- .../vm/UnmanagedVMsManagerImplTest.java | 141 ++- tools/apidoc/gen_toc.py | 4 +- ui/public/locales/en.json | 27 +- ui/src/config/section/compute.js | 2 +- .../wizard/ComputeOfferingSelection.vue | 2 +- .../compute/wizard/MultiDiskSelection.vue | 6 +- .../views/tools/ImportUnmanagedInstance.vue | 270 ++++- ui/src/views/tools/ManageInstances.vue | 747 +++++++++--- .../java/com/cloud/utils/ssh/SshHelper.java | 21 +- 40 files changed, 3504 insertions(+), 394 deletions(-) create mode 100644 api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVmsForImportCmd.java create mode 100644 core/src/main/java/com/cloud/agent/api/CheckVolumeAnswer.java create mode 100644 core/src/main/java/com/cloud/agent/api/CheckVolumeCommand.java create mode 100644 core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeAnswer.java create mode 100644 core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeCommand.java create mode 100644 core/src/main/java/com/cloud/agent/api/GetRemoteVmsAnswer.java create mode 100644 core/src/main/java/com/cloud/agent/api/GetRemoteVmsCommand.java create mode 100644 plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckVolumeCommandWrapper.java create mode 100644 plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java create mode 100644 plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java create mode 100644 plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetUnmanagedInstancesCommandWrapper.java create mode 100644 plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareUnmanageVMInstanceCommandWrapper.java diff --git a/api/src/main/java/com/cloud/vm/UserVmService.java b/api/src/main/java/com/cloud/vm/UserVmService.java index d58b75b0dca..c32c099ed3a 100644 --- a/api/src/main/java/com/cloud/vm/UserVmService.java +++ b/api/src/main/java/com/cloud/vm/UserVmService.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.vm; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -518,7 +519,8 @@ public interface UserVmService { UserVm importVM(final DataCenter zone, final Host host, final VirtualMachineTemplate template, final String instanceName, final String displayName, final Account owner, final String userData, final Account caller, final Boolean isDisplayVm, final String keyboard, final long accountId, final long userId, final ServiceOffering serviceOffering, final String sshPublicKey, - final String hostName, final HypervisorType hypervisorType, final Map customParameters, final VirtualMachine.PowerState powerState) throws InsufficientCapacityException; + final String hostName, final HypervisorType hypervisorType, final Map customParameters, + final VirtualMachine.PowerState powerState, final LinkedHashMap> networkNicMap) throws InsufficientCapacityException; /** * Unmanage a guest VM from CloudStack diff --git a/api/src/main/java/com/cloud/vm/VmDetailConstants.java b/api/src/main/java/com/cloud/vm/VmDetailConstants.java index 124d9d50d5b..9338cc11cd4 100644 --- a/api/src/main/java/com/cloud/vm/VmDetailConstants.java +++ b/api/src/main/java/com/cloud/vm/VmDetailConstants.java @@ -39,6 +39,7 @@ public interface VmDetailConstants { // KVM specific (internal) String KVM_VNC_PORT = "kvm.vnc.port"; String KVM_VNC_ADDRESS = "kvm.vnc.address"; + String KVM_VNC_PASSWORD = "kvm.vnc.password"; // KVM specific, custom virtual GPU hardware String VIDEO_HARDWARE = "video.hardware"; diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 6151b6f5945..d7767721667 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -212,6 +212,7 @@ public class ApiConstants { public static final String HOST_IDS = "hostids"; public static final String HOST_IP = "hostip"; public static final String HOST_NAME = "hostname"; + public static final String HOST = "host"; public static final String HOST_CONTROL_STATE = "hostcontrolstate"; public static final String HOSTS_MAP = "hostsmap"; public static final String HYPERVISOR = "hypervisor"; @@ -1064,7 +1065,9 @@ public class ApiConstants { public static final String SOURCE_NAT_IP = "sourcenatipaddress"; public static final String SOURCE_NAT_IP_ID = "sourcenatipaddressid"; public static final String HAS_RULES = "hasrules"; + public static final String DISK_PATH = "diskpath"; public static final String IMPORT_SOURCE = "importsource"; + public static final String TEMP_PATH = "temppath"; public static final String OBJECT_STORAGE = "objectstore"; public static final String HEURISTIC_RULE = "heuristicrule"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java index 532a3f0d392..d632c786a16 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java @@ -84,7 +84,7 @@ public class ImportUnmanagedInstanceCmd extends BaseAsyncCmd { @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, - description = "the hypervisor name of the instance") + description = "the name of the instance as it is known to the hypervisor") private String name; @Parameter(name = ApiConstants.DISPLAY_NAME, diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportVmCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportVmCmd.java index 01f517fb837..e8b9f3addde 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportVmCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportVmCmd.java @@ -31,13 +31,18 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ResponseObject; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.HostResponse; +import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VmwareDatacenterResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.vm.VmImportService; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; +import javax.inject.Inject; + @APICommand(name = "importVm", description = "Import virtual machine from a unmanaged host into CloudStack", responseObject = UserVmResponse.class, @@ -47,21 +52,72 @@ import org.apache.log4j.Logger; authorized = {RoleType.Admin}, since = "4.19.0") public class ImportVmCmd extends ImportUnmanagedInstanceCmd { - public static final Logger LOGGER = Logger.getLogger(ImportVmCmd.class); + @Inject + public VmImportService vmImportService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + + @Parameter(name = ApiConstants.ZONE_ID, + type = CommandType.UUID, + entityType = ZoneResponse.class, + required = true, + description = "the zone ID") + private Long zoneId; + + @Parameter(name = ApiConstants.USERNAME, + type = CommandType.STRING, + description = "the username for the host") + private String username; + + @Parameter(name = ApiConstants.PASSWORD, + type = CommandType.STRING, + description = "the password for the host") + private String password; + + @Parameter(name = ApiConstants.HOST, + type = CommandType.STRING, + description = "the host name or IP address") + private String host; + @Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, required = true, description = "hypervisor type of the host") private String hypervisor; + @Parameter(name = ApiConstants.DISK_PATH, + type = CommandType.STRING, + description = "path of the disk image") + private String diskPath; + @Parameter(name = ApiConstants.IMPORT_SOURCE, type = CommandType.STRING, required = true, description = "Source location for Import" ) private String importSource; + @Parameter(name = ApiConstants.NETWORK_ID, + type = CommandType.UUID, + entityType = NetworkResponse.class, + description = "the network ID") + private Long networkId; + + @Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "Host where local disk is located") + private Long hostId; + + @Parameter(name = ApiConstants.STORAGE_ID, type = CommandType.UUID, entityType = StoragePoolResponse.class, description = "Shared storage pool where disk is located") + private Long storagePoolId; + + @Parameter(name = ApiConstants.TEMP_PATH, + type = CommandType.STRING, + description = "Temp Path on external host for disk image copy" ) + private String tmpPath; + // Import from Vmware to KVM migration parameters @Parameter(name = ApiConstants.EXISTING_VCENTER_ID, @@ -73,7 +129,7 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd { @Parameter(name = ApiConstants.HOST_IP, type = BaseCmd.CommandType.STRING, description = "(only for importing migrated VMs from Vmware to KVM) VMware ESXi host IP/Name.") - private String host; + private String hostip; @Parameter(name = ApiConstants.VCENTER, type = CommandType.STRING, @@ -88,14 +144,6 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd { description = "(only for importing migrated VMs from Vmware to KVM) Name of VMware cluster.") private String clusterName; - @Parameter(name = ApiConstants.USERNAME, type = CommandType.STRING, - description = "(only for importing migrated VMs from Vmware to KVM) The Username required to connect to resource.") - private String username; - - @Parameter(name = ApiConstants.PASSWORD, type = CommandType.STRING, - description = "(only for importing migrated VMs from Vmware to KVM) The password for the specified username.") - private String password; - @Parameter(name = ApiConstants.CONVERT_INSTANCE_HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "(only for importing migrated VMs from Vmware to KVM) optional - the host to perform the virt-v2v migration from VMware to KVM.") private Long convertInstanceHostId; @@ -104,30 +152,20 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd { description = "(only for importing migrated VMs from Vmware to KVM) optional - the temporary storage pool to perform the virt-v2v migration from VMware to KVM.") private Long convertStoragePoolId; - @Override - public String getEventType() { - return EventTypes.EVENT_VM_IMPORT; - } + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// - @Override - public String getEventDescription() { - String vmName = getName(); - if (ObjectUtils.anyNotNull(vcenter, existingVcenterId)) { - String msg = StringUtils.isNotBlank(vcenter) ? - String.format("external vCenter: %s - datacenter: %s", vcenter, datacenterName) : - String.format("existing vCenter Datacenter with ID: %s", existingVcenterId); - return String.format("Importing unmanaged VM: %s from %s - VM: %s", getDisplayName(), msg, vmName); - } - return String.format("Importing unmanaged VM: %s", vmName); + public Long getZoneId() { + return zoneId; } - public Long getExistingVcenterId() { return existingVcenterId; } - public String getHost() { - return host; + public String getHostIp() { + return hostip; } public String getVcenter() { @@ -150,6 +188,10 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd { return password; } + public String getHost() { + return host; + } + public Long getConvertInstanceHostId() { return convertInstanceHostId; } @@ -162,10 +204,47 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd { return hypervisor; } + public String getDiskPath() { + return diskPath; + } + public String getImportSource() { return importSource; } + public Long getHostId() { + return hostId; + } + + public Long getStoragePoolId() { + return storagePoolId; + } + + public String getTmpPath() { + return tmpPath; + } + + public Long getNetworkId() { + return networkId; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_VM_IMPORT; + } + + @Override + public String getEventDescription() { + String vmName = getName(); + if (ObjectUtils.anyNotNull(vcenter, existingVcenterId)) { + String msg = StringUtils.isNotBlank(vcenter) ? + String.format("external vCenter: %s - datacenter: %s", vcenter, datacenterName) : + String.format("existing vCenter Datacenter with ID: %s", existingVcenterId); + return String.format("Importing unmanaged VM: %s from %s - VM: %s", getDisplayName(), msg, vmName); + } + return String.format("Importing unmanaged VM: %s", vmName); + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -176,5 +255,4 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd { response.setResponseName(getCommandName()); setResponseObject(response); } - } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVmsForImportCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVmsForImportCmd.java new file mode 100644 index 00000000000..88df04d9ef5 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVmsForImportCmd.java @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.vm; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.UnmanagedInstanceResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.vm.UnmanagedInstanceTO; +import org.apache.cloudstack.vm.VmImportService; +import org.apache.log4j.Logger; + +import javax.inject.Inject; + +@APICommand(name = "listVmsForImport", + description = "Lists virtual machines on a unmanaged host", + responseObject = UnmanagedInstanceResponse.class, + responseView = ResponseObject.ResponseView.Full, + entityType = {UnmanagedInstanceTO.class}, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = true, + authorized = {RoleType.Admin}, + since = "4.19.0") +public class ListVmsForImportCmd extends BaseListCmd { + public static final Logger LOGGER = Logger.getLogger(ListVmsForImportCmd.class.getName()); + + @Inject + public VmImportService vmImportService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ZONE_ID, + type = CommandType.UUID, + entityType = ZoneResponse.class, + required = true, + description = "the zone ID") + private Long zoneId; + + @Parameter(name = ApiConstants.USERNAME, + type = CommandType.STRING, + description = "the username for the host") + private String username; + + @Parameter(name = ApiConstants.PASSWORD, + type = CommandType.STRING, + description = "the password for the host") + private String password; + + @Parameter(name = ApiConstants.HOST, + type = CommandType.STRING, + required = true, + description = "the host name or IP address") + private String host; + + @Parameter(name = ApiConstants.HYPERVISOR, + type = CommandType.STRING, + required = true, + description = "hypervisor type of the host") + private String hypervisor; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getZoneId() { + return zoneId; + } + + public String getUsername() { + return username; + } + + public String getPassword() { + return password; + } + + public String getHost() { + return host; + } + + public String getHypervisor() { + return hypervisor; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + ListResponse response = vmImportService.listVmsForImport(this); + response.setResponseName(getCommandName()); + setResponseObject(response); + } + + @Override + public long getEntityOwnerId() { + Account account = CallContext.current().getCallingAccount(); + if (account != null) { + return account.getId(); + } + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java index a4748155b76..23e0e371714 100644 --- a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java +++ b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java @@ -55,6 +55,8 @@ public class UnmanagedInstanceTO { private List nics; + private String vncPassword; + public String getName() { return name; } @@ -167,6 +169,14 @@ public class UnmanagedInstanceTO { this.nics = nics; } + public String getVncPassword() { + return vncPassword; + } + + public void setVncPassword(String vncPassword) { + this.vncPassword = vncPassword; + } + public static class Disk { private String diskId; @@ -192,6 +202,8 @@ public class UnmanagedInstanceTO { private String datastorePath; + private int datastorePort; + private String datastoreType; public String getDiskId() { @@ -297,6 +309,14 @@ public class UnmanagedInstanceTO { public void setDatastoreType(String datastoreType) { this.datastoreType = datastoreType; } + + public void setDatastorePort(int datastorePort) { + this.datastorePort = datastorePort; + } + + public int getDatastorePort() { + return datastorePort; + } } public static class Nic { diff --git a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManager.java b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManager.java index 2876a0127be..53aece94964 100644 --- a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManager.java +++ b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManager.java @@ -17,13 +17,20 @@ package org.apache.cloudstack.vm; +import com.cloud.hypervisor.Hypervisor; import com.cloud.utils.component.PluggableService; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; +import static com.cloud.hypervisor.Hypervisor.HypervisorType.KVM; +import static com.cloud.hypervisor.Hypervisor.HypervisorType.VMware; public interface UnmanagedVMsManager extends VmImportService, UnmanageVMService, PluggableService, Configurable { ConfigKey UnmanageVMPreserveNic = new ConfigKey<>("Advanced", Boolean.class, "unmanage.vm.preserve.nics", "false", "If set to true, do not remove VM nics (and its MAC addresses) when unmanaging a VM, leaving them allocated but not reserved. " + "If set to false, nics are removed and MAC addresses can be reassigned", true, ConfigKey.Scope.Zone); + + static boolean isSupported(Hypervisor.HypervisorType hypervisorType) { + return hypervisorType == VMware || hypervisorType == KVM; + } } diff --git a/api/src/main/java/org/apache/cloudstack/vm/VmImportService.java b/api/src/main/java/org/apache/cloudstack/vm/VmImportService.java index e5b121cd2d6..04ef248fb8a 100644 --- a/api/src/main/java/org/apache/cloudstack/vm/VmImportService.java +++ b/api/src/main/java/org/apache/cloudstack/vm/VmImportService.java @@ -20,6 +20,7 @@ package org.apache.cloudstack.vm; import org.apache.cloudstack.api.command.admin.vm.ImportUnmanagedInstanceCmd; import org.apache.cloudstack.api.command.admin.vm.ImportVmCmd; import org.apache.cloudstack.api.command.admin.vm.ListUnmanagedInstancesCmd; +import org.apache.cloudstack.api.command.admin.vm.ListVmsForImportCmd; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.UnmanagedInstanceResponse; import org.apache.cloudstack.api.response.UserVmResponse; @@ -37,5 +38,8 @@ public interface VmImportService { ListResponse listUnmanagedInstances(ListUnmanagedInstancesCmd cmd); UserVmResponse importUnmanagedInstance(ImportUnmanagedInstanceCmd cmd); + UserVmResponse importVm(ImportVmCmd cmd); + + ListResponse listVmsForImport(ListVmsForImportCmd cmd); } diff --git a/core/src/main/java/com/cloud/agent/api/CheckVolumeAnswer.java b/core/src/main/java/com/cloud/agent/api/CheckVolumeAnswer.java new file mode 100644 index 00000000000..dd136d8642f --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/CheckVolumeAnswer.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.agent.api; + +@LogLevel(LogLevel.Log4jLevel.Trace) +public class CheckVolumeAnswer extends Answer { + + private long size; + + CheckVolumeAnswer() { + } + + public CheckVolumeAnswer(CheckVolumeCommand cmd, String details, long size) { + super(cmd, true, details); + this.size = size; + } + + public long getSize() { + return size; + } + + public String getString() { + return "CheckVolumeAnswer [size=" + size + "]"; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/CheckVolumeCommand.java b/core/src/main/java/com/cloud/agent/api/CheckVolumeCommand.java new file mode 100644 index 00000000000..b4036bebf3a --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/CheckVolumeCommand.java @@ -0,0 +1,59 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import com.cloud.agent.api.to.StorageFilerTO; + +@LogLevel(LogLevel.Log4jLevel.Trace) +public class CheckVolumeCommand extends Command { + + String srcFile; + + StorageFilerTO storageFilerTO; + + + public String getSrcFile() { + return srcFile; + } + + public void setSrcFile(String srcFile) { + this.srcFile = srcFile; + } + + public CheckVolumeCommand() { + } + + @Override + public boolean executeInSequence() { + return false; + } + + public String getString() { + return "CheckVolumeCommand [srcFile=" + srcFile + "]"; + } + + public StorageFilerTO getStorageFilerTO() { + return storageFilerTO; + } + + public void setStorageFilerTO(StorageFilerTO storageFilerTO) { + this.storageFilerTO = storageFilerTO; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeAnswer.java b/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeAnswer.java new file mode 100644 index 00000000000..f6d7cab4596 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeAnswer.java @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.agent.api; + +@LogLevel(LogLevel.Log4jLevel.Trace) +public class CopyRemoteVolumeAnswer extends Answer { + + private String remoteIp; + private String filename; + + private long size; + + CopyRemoteVolumeAnswer() { + } + + public CopyRemoteVolumeAnswer(CopyRemoteVolumeCommand cmd, String details, String filename, long size) { + super(cmd, true, details); + this.remoteIp = cmd.getRemoteIp(); + this.filename = filename; + this.size = size; + } + + public String getRemoteIp() { + return remoteIp; + } + + public void setRemoteIp(String remoteIp) { + this.remoteIp = remoteIp; + } + + public void setFilename(String filename) { + this.filename = filename; + } + + public String getFilename() { + return filename; + } + + public long getSize() { + return size; + } + + public String getString() { + return "CopyRemoteVolumeAnswer [remoteIp=" + remoteIp + "]"; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeCommand.java b/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeCommand.java new file mode 100644 index 00000000000..82bc4d7cb45 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeCommand.java @@ -0,0 +1,101 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import com.cloud.agent.api.to.StorageFilerTO; + +@LogLevel(LogLevel.Log4jLevel.Trace) +public class CopyRemoteVolumeCommand extends Command { + + String remoteIp; + String username; + String password; + String srcFile; + + String tmpPath; + + StorageFilerTO storageFilerTO; + + public CopyRemoteVolumeCommand(String remoteIp, String username, String password) { + this.remoteIp = remoteIp; + this.username = username; + this.password = password; + } + + public String getRemoteIp() { + return remoteIp; + } + + public void setRemoteIp(String remoteIp) { + this.remoteIp = remoteIp; + } + + public String getUsername() { + return username; + } + + public void setUsername(String username) { + this.username = username; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public String getSrcFile() { + return srcFile; + } + + public void setSrcFile(String srcFile) { + this.srcFile = srcFile; + } + + public CopyRemoteVolumeCommand() { + } + + @Override + public boolean executeInSequence() { + return false; + } + + public String getString() { + return "CopyRemoteVolumeCommand [remoteIp=" + remoteIp + "]"; + } + + public void setTempPath(String tmpPath) { + this.tmpPath = tmpPath; + } + + public String getTmpPath() { + return tmpPath; + } + + public StorageFilerTO getStorageFilerTO() { + return storageFilerTO; + } + + public void setStorageFilerTO(StorageFilerTO storageFilerTO) { + this.storageFilerTO = storageFilerTO; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/GetRemoteVmsAnswer.java b/core/src/main/java/com/cloud/agent/api/GetRemoteVmsAnswer.java new file mode 100644 index 00000000000..8cd072f1da1 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/GetRemoteVmsAnswer.java @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.agent.api; + +import org.apache.cloudstack.vm.UnmanagedInstanceTO; + +import java.util.HashMap; +import java.util.List; + +@LogLevel(LogLevel.Log4jLevel.Trace) +public class GetRemoteVmsAnswer extends Answer { + + private String remoteIp; + private HashMap unmanagedInstances; + + List vmNames; + + GetRemoteVmsAnswer() { + } + + public GetRemoteVmsAnswer(GetRemoteVmsCommand cmd, String details, HashMap unmanagedInstances) { + super(cmd, true, details); + this.remoteIp = cmd.getRemoteIp(); + this.unmanagedInstances = unmanagedInstances; + } + + public GetRemoteVmsAnswer(GetRemoteVmsCommand cmd, String details, List vmNames) { + super(cmd, true, details); + this.remoteIp = cmd.getRemoteIp(); + this.vmNames = vmNames; + } + + public String getRemoteIp() { + return remoteIp; + } + + public void setRemoteIp(String remoteIp) { + this.remoteIp = remoteIp; + } + + public HashMap getUnmanagedInstances() { + return unmanagedInstances; + } + + public void setUnmanagedInstances(HashMap unmanagedInstances) { + this.unmanagedInstances = unmanagedInstances; + } + + public List getVmNames() { + return vmNames; + } + + public void setVmNames(List vmNames) { + this.vmNames = vmNames; + } + + public String getString() { + return "GetRemoteVmsAnswer [remoteIp=" + remoteIp + "]"; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/GetRemoteVmsCommand.java b/core/src/main/java/com/cloud/agent/api/GetRemoteVmsCommand.java new file mode 100644 index 00000000000..5c71d12dbd0 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/GetRemoteVmsCommand.java @@ -0,0 +1,70 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +@LogLevel(LogLevel.Log4jLevel.Trace) +public class GetRemoteVmsCommand extends Command { + + String remoteIp; + String username; + String password; + + public GetRemoteVmsCommand(String remoteIp, String username, String password) { + this.remoteIp = remoteIp; + this.username = username; + this.password = password; + } + + public String getRemoteIp() { + return remoteIp; + } + + public void setRemoteIp(String remoteIp) { + this.remoteIp = remoteIp; + } + + public String getUsername() { + return username; + } + + public void setUsername(String username) { + this.username = username; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public GetRemoteVmsCommand() { + } + + @Override + public boolean executeInSequence() { + return false; + } + + public String getString() { + return "GetRemoteVmsCommand [remoteIp=" + remoteIp + "]"; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/GetUnmanagedInstancesAnswer.java b/core/src/main/java/com/cloud/agent/api/GetUnmanagedInstancesAnswer.java index 3c6118d426e..771d472be2a 100644 --- a/core/src/main/java/com/cloud/agent/api/GetUnmanagedInstancesAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/GetUnmanagedInstancesAnswer.java @@ -30,6 +30,10 @@ public class GetUnmanagedInstancesAnswer extends Answer { GetUnmanagedInstancesAnswer() { } + public GetUnmanagedInstancesAnswer(GetUnmanagedInstancesCommand cmd, String details) { + super(cmd, false, details); + } + public GetUnmanagedInstancesAnswer(GetUnmanagedInstancesCommand cmd, String details, HashMap unmanagedInstances) { super(cmd, true, details); this.instanceName = cmd.getInstanceName(); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java index 15f5b231be2..01123401fac 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java @@ -168,6 +168,9 @@ public interface VolumeOrchestrationService { DiskProfile importVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner, Long deviceId, Long poolId, String path, String chainInfo); + DiskProfile updateImportedVolume(Type type, DiskOffering offering, VirtualMachine vm, VirtualMachineTemplate template, + Long deviceId, Long poolId, String path, String chainInfo, DiskProfile diskProfile); + /** * Unmanage VM volumes */ diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 6f945479bd4..b8f3e5a10e5 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -2224,6 +2224,51 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati return toDiskProfile(vol, offering); } + @Override + public DiskProfile updateImportedVolume(Type type, DiskOffering offering, VirtualMachine vm, VirtualMachineTemplate template, + Long deviceId, Long poolId, String path, String chainInfo, DiskProfile diskProfile) { + + VolumeVO vol = _volsDao.findById(diskProfile.getVolumeId()); + if (vm != null) { + vol.setInstanceId(vm.getId()); + } + + if (deviceId != null) { + vol.setDeviceId(deviceId); + } else if (type.equals(Type.ROOT)) { + vol.setDeviceId(0l); + } else { + vol.setDeviceId(1l); + } + + if (template != null) { + if (ImageFormat.ISO.equals(template.getFormat())) { + vol.setIsoId(template.getId()); + } else if (Storage.TemplateType.DATADISK.equals(template.getTemplateType())) { + vol.setTemplateId(template.getId()); + } + if (type == Type.ROOT) { + vol.setTemplateId(template.getId()); + } + } + + // display flag matters only for the User vms + if (VirtualMachine.Type.User.equals(vm.getType())) { + UserVmVO userVm = _userVmDao.findById(vm.getId()); + vol.setDisplayVolume(userVm.isDisplayVm()); + } + + vol.setFormat(getSupportedImageFormatForCluster(vm.getHypervisorType())); + vol.setPoolId(poolId); + vol.setPath(path); + vol.setChainInfo(chainInfo); + vol.setSize(diskProfile.getSize()); + vol.setState(Volume.State.Ready); + vol.setAttached(new Date()); + _volsDao.update(vol.getId(), vol); + return toDiskProfile(vol, offering); + } + @Override public void unmanageVolumes(long vmId) { if (s_logger.isDebugEnabled()) { diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java index 79899b7119e..be6588e3189 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java @@ -152,5 +152,7 @@ public interface VolumeDao extends GenericDao, StateDao listByPoolIdAndPaths(long id, List pathList); + VolumeVO findByPoolIdAndPath(long id, String path); + List listByIds(List ids); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java index 056b7206d72..bf556622463 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java @@ -71,6 +71,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol protected GenericSearchBuilder primaryStorageSearch; protected GenericSearchBuilder primaryStorageSearch2; protected GenericSearchBuilder secondaryStorageSearch; + private final SearchBuilder poolAndPathSearch; @Inject ResourceTagDao _tagsDao; @@ -487,6 +488,11 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol volumeIdSearch.and("idIN", volumeIdSearch.entity().getId(), Op.IN); volumeIdSearch.done(); + poolAndPathSearch = createSearchBuilder(); + poolAndPathSearch.and("poolId", poolAndPathSearch.entity().getPoolId(), Op.EQ); + poolAndPathSearch.and("path", poolAndPathSearch.entity().getPath(), Op.EQ); + poolAndPathSearch.done(); + } @Override @@ -802,6 +808,14 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol return listBy(sc); } + @Override + public VolumeVO findByPoolIdAndPath(long id, String path) { + SearchCriteria sc = poolAndPathSearch.create(); + sc.setParameters("poolId", id); + sc.setParameters("path", path); + return findOneBy(sc); + } + @Override public List listByIds(List ids) { if (CollectionUtils.isEmpty(ids)) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index c8cd9854c57..60e6bcffeb6 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -2295,7 +2295,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return new Pair, Integer>(macAddressToNicNum, devNum); } - protected PowerState convertToPowerState(final DomainState ps) { + public PowerState convertToPowerState(final DomainState ps) { final PowerState state = POWER_STATES_TABLE.get(ps); return state == null ? PowerState.PowerUnknown : state; } @@ -3777,7 +3777,39 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } } - protected List getAllVmNames(final Connect conn) { + /** + * Given a disk path on KVM host, attempts to find source host and path using mount command + * @param diskPath KVM host path for virtual disk + * @return Pair with IP of host and path + */ + public Pair getSourceHostPath(String diskPath) { + String sourceHostIp = null; + String sourcePath = null; + try { + String mountResult = Script.runSimpleBashScript("mount | grep \"" + diskPath + "\""); + s_logger.debug("Got mount result for " + diskPath + "\n\n" + mountResult); + if (StringUtils.isNotEmpty(mountResult)) { + String[] res = mountResult.strip().split(" "); + if (res[0].contains(":")) { + res = res[0].split(":"); + sourceHostIp = res[0].strip(); + sourcePath = res[1].strip(); + } else { + // Assume local storage + sourceHostIp = getPrivateIp(); + sourcePath = diskPath; + } + } + if (StringUtils.isNotEmpty(sourceHostIp) && StringUtils.isNotEmpty(sourcePath)) { + return new Pair<>(sourceHostIp, sourcePath); + } + } catch (Exception ex) { + s_logger.warn("Failed to list source host and IP for " + diskPath + ex.toString()); + } + return null; + } + + public List getAllVmNames(final Connect conn) { final ArrayList la = new ArrayList(); try { final String names[] = conn.listDefinedDomains(); @@ -5339,4 +5371,25 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } } } + + /* + Scp volume from remote host to local directory + */ + public String copyVolume(String srcIp, String username, String password, String localDir, String remoteFile, String tmpPath) { + try { + String outputFile = UUID.randomUUID().toString(); + StringBuilder command = new StringBuilder("qemu-img convert -O qcow2 "); + command.append(remoteFile); + command.append(" "+tmpPath); + command.append(outputFile); + s_logger.debug("Converting remoteFile: "+remoteFile); + SshHelper.sshExecute(srcIp, 22, username, null, password, command.toString()); + s_logger.debug("Copying remoteFile to: "+localDir); + SshHelper.scpFrom(srcIp, 22, username, null, password, localDir, tmpPath+outputFile); + s_logger.debug("Successfully copyied remoteFile to: "+localDir+"/"+outputFile); + return outputFile; + } catch (Exception e) { + throw new RuntimeException(e); + } + } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java index a5565c2de34..f165796adef 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java @@ -57,8 +57,10 @@ public class LibvirtDomainXMLParser { private final List channels = new ArrayList(); private final List watchDogDefs = new ArrayList(); private Integer vncPort; + private String vncPasswd; private String desc; - + private LibvirtVMDef.CpuTuneDef cpuTuneDef; + private LibvirtVMDef.CpuModeDef cpuModeDef; private String name; public boolean parseDomainXML(String domXML) { @@ -278,6 +280,14 @@ public class LibvirtDomainXMLParser { String name = getAttrValue("target", "name", channel); String state = getAttrValue("target", "state", channel); + if (ChannelDef.ChannelType.valueOf(type.toUpperCase()).equals(ChannelDef.ChannelType.SPICEVMC)) { + continue; + } + + if (path == null) { + path = ""; + } + ChannelDef def = null; if (StringUtils.isBlank(state)) { def = new ChannelDef(name, ChannelDef.ChannelType.valueOf(type.toUpperCase()), new File(path)); @@ -305,6 +315,12 @@ public class LibvirtDomainXMLParser { vncPort = null; } } + + String passwd = graphic.getAttribute("passwd"); + if (passwd != null) { + vncPasswd = passwd; + } + } NodeList rngs = devices.getElementsByTagName("rng"); @@ -317,6 +333,26 @@ public class LibvirtDomainXMLParser { String period = getAttrValue("rate", "period", rng); if (StringUtils.isAnyEmpty(bytes, period)) { s_logger.debug(String.format("Bytes and period in the rng section should not be null, please check the VM %s", name)); + } + + if (bytes == null) { + bytes = "0"; + } + + if (period == null) { + period = "0"; + } + + if (bytes == null) { + bytes = "0"; + } + + if (period == null) { + period = "0"; + } + + if (StringUtils.isEmpty(backendModel)) { + def = new RngDef(path, Integer.parseInt(bytes), Integer.parseInt(period)); } else { if (StringUtils.isEmpty(backendModel)) { def = new RngDef(path, Integer.parseInt(bytes), Integer.parseInt(period)); @@ -350,7 +386,8 @@ public class LibvirtDomainXMLParser { watchDogDefs.add(def); } - + extractCpuTuneDef(rootElement); + extractCpuModeDef(rootElement); return true; } catch (ParserConfigurationException e) { s_logger.debug(e.toString()); @@ -411,6 +448,10 @@ public class LibvirtDomainXMLParser { return interfaces; } + public String getVncPasswd() { + return vncPasswd; + } + public MemBalloonDef getMemBalloon() { return memBalloonDef; } @@ -438,4 +479,65 @@ public class LibvirtDomainXMLParser { public String getName() { return name; } + + public LibvirtVMDef.CpuTuneDef getCpuTuneDef() { + return cpuTuneDef; + } + + public LibvirtVMDef.CpuModeDef getCpuModeDef() { + return cpuModeDef; + } + + private void extractCpuTuneDef(final Element rootElement) { + NodeList cpuTunesList = rootElement.getElementsByTagName("cputune"); + if (cpuTunesList.getLength() > 0) { + cpuTuneDef = new LibvirtVMDef.CpuTuneDef(); + final Element cpuTuneDefElement = (Element) cpuTunesList.item(0); + final String cpuShares = getTagValue("shares", cpuTuneDefElement); + if (StringUtils.isNotBlank(cpuShares)) { + cpuTuneDef.setShares((Integer.parseInt(cpuShares))); + } + + final String quota = getTagValue("quota", cpuTuneDefElement); + if (StringUtils.isNotBlank(quota)) { + cpuTuneDef.setQuota((Integer.parseInt(quota))); + } + + final String period = getTagValue("period", cpuTuneDefElement); + if (StringUtils.isNotBlank(period)) { + cpuTuneDef.setPeriod((Integer.parseInt(period))); + } + } + } + + private void extractCpuModeDef(final Element rootElement){ + NodeList cpuModeList = rootElement.getElementsByTagName("cpu"); + if (cpuModeList.getLength() > 0){ + cpuModeDef = new LibvirtVMDef.CpuModeDef(); + final Element cpuModeDefElement = (Element) cpuModeList.item(0); + final String cpuModel = getTagValue("model", cpuModeDefElement); + if (StringUtils.isNotBlank(cpuModel)){ + cpuModeDef.setModel(cpuModel); + } + NodeList cpuFeatures = cpuModeDefElement.getElementsByTagName("features"); + if (cpuFeatures.getLength() > 0) { + final ArrayList features = new ArrayList<>(cpuFeatures.getLength()); + for (int i = 0; i < cpuFeatures.getLength(); i++) { + final Element feature = (Element)cpuFeatures.item(i); + final String policy = feature.getAttribute("policy"); + String featureName = feature.getAttribute("name"); + if ("disable".equals(policy)) { + featureName = "-" + featureName; + } + features.add(featureName); + } + cpuModeDef.setFeatures(features); + } + final String sockets = getAttrValue("topology", "sockets", cpuModeDefElement); + final String cores = getAttrValue("topology", "cores", cpuModeDefElement); + if (StringUtils.isNotBlank(sockets) && StringUtils.isNotBlank(cores)) { + cpuModeDef.setTopology(Integer.parseInt(cores), Integer.parseInt(sockets)); + } + } + } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java index d31a6ab38db..6b5fac0e942 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java @@ -1072,6 +1072,18 @@ public class LibvirtVMDef { public LibvirtDiskEncryptDetails getLibvirtDiskEncryptDetails() { return this.encryptDetails; } + public String getSourceHost() { + return _sourceHost; + } + + public int getSourceHostPort() { + return _sourcePort; + } + + public String getSourcePath() { + return _sourcePath; + } + @Override public String toString() { StringBuilder diskBuilder = new StringBuilder(); @@ -1737,6 +1749,10 @@ public class LibvirtVMDef { modeBuilder.append(""); return modeBuilder.toString(); } + + public int getCoresPerSocket() { + return _coresPerSocket; + } } public static class SerialDef { @@ -1793,7 +1809,7 @@ public class LibvirtVMDef { public final static class ChannelDef { enum ChannelType { - UNIX("unix"), SERIAL("serial"); + UNIX("unix"), SERIAL("serial"), SPICEVMC("spicevmc"); String type; ChannelType(String type) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckVolumeCommandWrapper.java new file mode 100644 index 00000000000..8b0a5aab461 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckVolumeCommandWrapper.java @@ -0,0 +1,86 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CheckVolumeAnswer; +import com.cloud.agent.api.CheckVolumeCommand; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.storage.Storage; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.log4j.Logger; +import org.libvirt.LibvirtException; + +import java.util.Map; + +@ResourceWrapper(handles = CheckVolumeCommand.class) +public final class LibvirtCheckVolumeCommandWrapper extends CommandWrapper { + + private static final Logger s_logger = Logger.getLogger(LibvirtCheckVolumeCommandWrapper.class); + + @Override + public Answer execute(final CheckVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) { + String result = null; + String srcFile = command.getSrcFile(); + StorageFilerTO storageFilerTO = command.getStorageFilerTO(); + KVMStoragePoolManager poolMgr = libvirtComputingResource.getStoragePoolMgr(); + KVMStoragePool pool = poolMgr.getStoragePool(storageFilerTO.getType(), storageFilerTO.getUuid()); + + try { + if (storageFilerTO.getType() == Storage.StoragePoolType.Filesystem || + storageFilerTO.getType() == Storage.StoragePoolType.NetworkFilesystem) { + final KVMPhysicalDisk vol = pool.getPhysicalDisk(srcFile); + final String path = vol.getPath(); + long size = getVirtualSizeFromFile(path); + return new CheckVolumeAnswer(command, "", size); + } else { + return new Answer(command, false, "Unsupported Storage Pool"); + } + + } catch (final Exception e) { + s_logger.error("Error while locating disk: "+ e.getMessage()); + return new Answer(command, false, result); + } + } + + private long getVirtualSizeFromFile(String path) { + try { + QemuImg qemu = new QemuImg(0); + QemuImgFile qemuFile = new QemuImgFile(path); + Map info = qemu.info(qemuFile); + if (info.containsKey(QemuImg.VIRTUAL_SIZE)) { + return Long.parseLong(info.get(QemuImg.VIRTUAL_SIZE)); + } else { + throw new CloudRuntimeException("Unable to determine virtual size of volume at path " + path); + } + } catch (QemuImgException | LibvirtException ex) { + throw new CloudRuntimeException("Error when inspecting volume at path " + path, ex); + } + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java new file mode 100644 index 00000000000..e48edd8eec0 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java @@ -0,0 +1,93 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CopyRemoteVolumeAnswer; +import com.cloud.agent.api.CopyRemoteVolumeCommand; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.storage.Storage; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.log4j.Logger; +import org.libvirt.LibvirtException; + +import java.util.Map; + +@ResourceWrapper(handles = CopyRemoteVolumeCommand.class) +public final class LibvirtCopyRemoteVolumeCommandWrapper extends CommandWrapper { + + private static final Logger s_logger = Logger.getLogger(LibvirtCopyRemoteVolumeCommandWrapper.class); + + @Override + public Answer execute(final CopyRemoteVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) { + String result = null; + String srcIp = command.getRemoteIp(); + String username = command.getUsername(); + String password = command.getPassword(); + String srcFile = command.getSrcFile(); + StorageFilerTO storageFilerTO = command.getStorageFilerTO(); + String tmpPath = command.getTmpPath(); + KVMStoragePoolManager poolMgr = libvirtComputingResource.getStoragePoolMgr(); + KVMStoragePool pool = poolMgr.getStoragePool(storageFilerTO.getType(), storageFilerTO.getUuid()); + String dstPath = pool.getLocalPath(); + + try { + if (storageFilerTO.getType() == Storage.StoragePoolType.Filesystem || + storageFilerTO.getType() == Storage.StoragePoolType.NetworkFilesystem) { + String filename = libvirtComputingResource.copyVolume(srcIp, username, password, dstPath, srcFile, tmpPath); + s_logger.debug("Volume Copy Successful"); + final KVMPhysicalDisk vol = pool.getPhysicalDisk(filename); + final String path = vol.getPath(); + long size = getVirtualSizeFromFile(path); + return new CopyRemoteVolumeAnswer(command, "", filename, size); + } else { + return new Answer(command, false, "Unsupported Storage Pool"); + } + + } catch (final Exception e) { + s_logger.error("Error while copying file from remote host: "+ e.getMessage()); + return new Answer(command, false, result); + } + } + + private long getVirtualSizeFromFile(String path) { + try { + QemuImg qemu = new QemuImg(0); + QemuImgFile qemuFile = new QemuImgFile(path); + Map info = qemu.info(qemuFile); + if (info.containsKey(QemuImg.VIRTUAL_SIZE)) { + return Long.parseLong(info.get(QemuImg.VIRTUAL_SIZE)); + } else { + throw new CloudRuntimeException("Unable to determine virtual size of volume at path " + path); + } + } catch (QemuImgException | LibvirtException ex) { + throw new CloudRuntimeException("Error when inspecting volume at path " + path, ex); + } + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java new file mode 100644 index 00000000000..700f058b59b --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java @@ -0,0 +1,194 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.GetRemoteVmsAnswer; +import com.cloud.agent.api.GetRemoteVmsCommand; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.resource.LibvirtConnection; +import com.cloud.hypervisor.kvm.resource.LibvirtDomainXMLParser; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.vm.UnmanagedInstanceTO; +import org.apache.log4j.Logger; +import org.libvirt.Connect; +import org.libvirt.Domain; +import org.libvirt.DomainBlockInfo; +import org.libvirt.DomainInfo; +import org.libvirt.LibvirtException; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +@ResourceWrapper(handles = GetRemoteVmsCommand.class) +public final class LibvirtGetRemoteVmsCommandWrapper extends CommandWrapper { + + private static final Logger s_logger = Logger.getLogger(LibvirtGetRemoteVmsCommandWrapper.class); + + @Override + public Answer execute(final GetRemoteVmsCommand command, final LibvirtComputingResource libvirtComputingResource) { + String result = null; + String hypervisorURI = "qemu+tcp://" + command.getRemoteIp() + + "/system"; + HashMap unmanagedInstances = new HashMap<>(); + try { + Connect conn = LibvirtConnection.getConnection(hypervisorURI); + final List allVmNames = libvirtComputingResource.getAllVmNames(conn); + for (String name : allVmNames) { + final Domain domain = libvirtComputingResource.getDomain(conn, name); + + final DomainInfo.DomainState ps = domain.getInfo().state; + + final VirtualMachine.PowerState state = libvirtComputingResource.convertToPowerState(ps); + + s_logger.debug("VM " + domain.getName() + ": powerstate = " + ps + "; vm state=" + state.toString()); + + if (state == VirtualMachine.PowerState.PowerOff) { + try { + UnmanagedInstanceTO instance = getUnmanagedInstance(libvirtComputingResource, domain, conn); + unmanagedInstances.put(instance.getName(), instance); + } catch (Exception e) { + s_logger.error("Error while fetching instance details", e); + } + } + domain.free(); + } + s_logger.debug("Found Vms: "+ unmanagedInstances.size()); + return new GetRemoteVmsAnswer(command, "", unmanagedInstances); + } catch (final LibvirtException e) { + s_logger.error("Error while listing stopped Vms on remote host: "+ e.getMessage()); + return new Answer(command, false, result); + } + } + + private UnmanagedInstanceTO getUnmanagedInstance(LibvirtComputingResource libvirtComputingResource, Domain domain, Connect conn) { + try { + final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser(); + parser.parseDomainXML(domain.getXMLDesc(1)); + + final UnmanagedInstanceTO instance = new UnmanagedInstanceTO(); + instance.setName(domain.getName()); + if (parser.getCpuModeDef() != null) { + instance.setCpuCoresPerSocket(parser.getCpuModeDef().getCoresPerSocket()); + } + Long memory = domain.getMaxMemory(); + instance.setMemory(memory.intValue()/1024); + if (parser.getCpuTuneDef() !=null) { + instance.setCpuSpeed(parser.getCpuTuneDef().getShares()); + } + instance.setPowerState(getPowerState(libvirtComputingResource.getVmState(conn,domain.getName()))); + instance.setNics(getUnmanagedInstanceNics(parser.getInterfaces())); + instance.setDisks(getUnmanagedInstanceDisks(parser.getDisks(),libvirtComputingResource, domain)); + instance.setVncPassword(parser.getVncPasswd() + "aaaaaaaaaaaaaa"); // Suffix back extra characters for DB compatibility + + return instance; + } catch (Exception e) { + s_logger.debug("Unable to retrieve unmanaged instance info. ", e); + throw new CloudRuntimeException("Unable to retrieve unmanaged instance info. " + e.getMessage()); + } + } + + private UnmanagedInstanceTO.PowerState getPowerState(VirtualMachine.PowerState vmPowerState) { + switch (vmPowerState) { + case PowerOn: + return UnmanagedInstanceTO.PowerState.PowerOn; + case PowerOff: + return UnmanagedInstanceTO.PowerState.PowerOff; + default: + return UnmanagedInstanceTO.PowerState.PowerUnknown; + + } + } + + private List getUnmanagedInstanceNics(List interfaces) { + final ArrayList nics = new ArrayList<>(interfaces.size()); + int counter = 0; + for (LibvirtVMDef.InterfaceDef interfaceDef : interfaces) { + final UnmanagedInstanceTO.Nic nic = new UnmanagedInstanceTO.Nic(); + nic.setNicId(String.valueOf(counter++)); + nic.setMacAddress(interfaceDef.getMacAddress()); + nic.setAdapterType(interfaceDef.getModel().toString()); + nic.setNetwork(interfaceDef.getDevName()); + nic.setPciSlot(interfaceDef.getSlot().toString()); + nic.setVlan(interfaceDef.getVlanTag()); + nics.add(nic); + } + return nics; + } + + private List getUnmanagedInstanceDisks(List disksInfo, + LibvirtComputingResource libvirtComputingResource, + Domain dm){ + final ArrayList disks = new ArrayList<>(disksInfo.size()); + int counter = 0; + for (LibvirtVMDef.DiskDef diskDef : disksInfo) { + if (diskDef.getDeviceType() != LibvirtVMDef.DiskDef.DeviceType.DISK) { + continue; + } + + final UnmanagedInstanceTO.Disk disk = new UnmanagedInstanceTO.Disk(); + + disk.setPosition(counter); + + Long size; + try { + DomainBlockInfo blockInfo = dm.blockInfo(diskDef.getSourcePath()); + size = blockInfo.getCapacity(); + } catch (LibvirtException e) { + throw new RuntimeException(e); + } + + disk.setCapacity(size); + disk.setDiskId(String.valueOf(counter++)); + disk.setLabel(diskDef.getDiskLabel()); + disk.setController(diskDef.getBusType().toString()); + + + Pair sourceHostPath = getSourceHostPath(libvirtComputingResource, diskDef.getSourcePath()); + if (sourceHostPath != null) { + disk.setDatastoreHost(sourceHostPath.first()); + disk.setDatastorePath(sourceHostPath.second()); + } else { + disk.setDatastorePath(diskDef.getSourcePath()); + disk.setDatastoreHost(diskDef.getSourceHost()); + } + + disk.setDatastoreType(diskDef.getDiskType().toString()); + disk.setDatastorePort(diskDef.getSourceHostPort()); + disks.add(disk); + } + return disks; + } + + private Pair getSourceHostPath(LibvirtComputingResource libvirtComputingResource, String diskPath) { + int pathEnd = diskPath.lastIndexOf("/"); + if (pathEnd >= 0) { + diskPath = diskPath.substring(0, pathEnd); + return libvirtComputingResource.getSourceHostPath(diskPath); + } + return null; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetUnmanagedInstancesCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetUnmanagedInstancesCommandWrapper.java new file mode 100644 index 00000000000..a2d84063d74 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetUnmanagedInstancesCommandWrapper.java @@ -0,0 +1,227 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.GetUnmanagedInstancesAnswer; +import com.cloud.agent.api.GetUnmanagedInstancesCommand; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.resource.LibvirtDomainXMLParser; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.cloudstack.vm.UnmanagedInstanceTO; +import org.apache.commons.lang3.StringUtils; +import org.apache.log4j.Logger; +import org.libvirt.Connect; +import org.libvirt.Domain; +import org.libvirt.LibvirtException; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +@ResourceWrapper(handles=GetUnmanagedInstancesCommand.class) +public final class LibvirtGetUnmanagedInstancesCommandWrapper extends CommandWrapper { + private static final Logger LOGGER = Logger.getLogger(LibvirtGetUnmanagedInstancesCommandWrapper.class); + + @Override + public GetUnmanagedInstancesAnswer execute(GetUnmanagedInstancesCommand command, LibvirtComputingResource libvirtComputingResource) { + LOGGER.info("Fetching unmanaged instance on host"); + + HashMap unmanagedInstances = new HashMap<>(); + try { + final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper(); + final Connect conn = libvirtUtilitiesHelper.getConnection(); + final List domains = getDomains(command, libvirtComputingResource, conn); + + for (Domain domain : domains) { + UnmanagedInstanceTO instance = getUnmanagedInstance(libvirtComputingResource, domain, conn); + if (instance != null) { + unmanagedInstances.put(instance.getName(), instance); + domain.free(); + } + } + } catch (Exception e) { + String err = String.format("Error listing unmanaged instances: %s", e.getMessage()); + LOGGER.error(err, e); + return new GetUnmanagedInstancesAnswer(command, err); + } + + return new GetUnmanagedInstancesAnswer(command, "OK", unmanagedInstances); + } + + private List getDomains(GetUnmanagedInstancesCommand command, + LibvirtComputingResource libvirtComputingResource, + Connect conn) throws LibvirtException, CloudRuntimeException { + final List domains = new ArrayList<>(); + final String vmNameCmd = command.getInstanceName(); + if (StringUtils.isNotBlank(vmNameCmd)) { + final Domain domain = libvirtComputingResource.getDomain(conn, vmNameCmd); + if (domain == null) { + String msg = String.format("VM %s not found", vmNameCmd); + LOGGER.error(msg); + throw new CloudRuntimeException(msg); + } + + checkIfVmExists(vmNameCmd,domain); + checkIfVmIsManaged(command,vmNameCmd,domain); + + domains.add(domain); + } else { + final List allVmNames = libvirtComputingResource.getAllVmNames(conn); + for (String name : allVmNames) { + if (!command.hasManagedInstance(name)) { + final Domain domain = libvirtComputingResource.getDomain(conn, name); + domains.add(domain); + } + } + } + return domains; + } + + private void checkIfVmExists(String vmNameCmd,final Domain domain) throws LibvirtException { + if (StringUtils.isNotEmpty(vmNameCmd) && + !vmNameCmd.equals(domain.getName())) { + LOGGER.error("GetUnmanagedInstancesCommand: exact vm name not found " + vmNameCmd); + throw new CloudRuntimeException("GetUnmanagedInstancesCommand: exact vm name not found " + vmNameCmd); + } + } + + private void checkIfVmIsManaged(GetUnmanagedInstancesCommand command,String vmNameCmd,final Domain domain) throws LibvirtException { + if (command.hasManagedInstance(domain.getName())) { + LOGGER.error("GetUnmanagedInstancesCommand: vm already managed " + vmNameCmd); + throw new CloudRuntimeException("GetUnmanagedInstancesCommand: vm already managed " + vmNameCmd); + } + } + private UnmanagedInstanceTO getUnmanagedInstance(LibvirtComputingResource libvirtComputingResource, Domain domain, Connect conn) { + try { + final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser(); + parser.parseDomainXML(domain.getXMLDesc(1)); + + final UnmanagedInstanceTO instance = new UnmanagedInstanceTO(); + instance.setName(domain.getName()); + + instance.setCpuCores((int) LibvirtComputingResource.countDomainRunningVcpus(domain)); + instance.setCpuSpeed(parser.getCpuTuneDef().getShares()/instance.getCpuCores()); + + if (parser.getCpuModeDef() != null) { + instance.setCpuCoresPerSocket(parser.getCpuModeDef().getCoresPerSocket()); + } + instance.setPowerState(getPowerState(libvirtComputingResource.getVmState(conn,domain.getName()))); + instance.setMemory((int) LibvirtComputingResource.getDomainMemory(domain) / 1024); + instance.setNics(getUnmanagedInstanceNics(parser.getInterfaces())); + instance.setDisks(getUnmanagedInstanceDisks(parser.getDisks(),libvirtComputingResource)); + instance.setVncPassword(parser.getVncPasswd() + "aaaaaaaaaaaaaa"); // Suffix back extra characters for DB compatibility + + return instance; + } catch (Exception e) { + LOGGER.info("Unable to retrieve unmanaged instance info. " + e.getMessage(), e); + return null; + } + } + + private UnmanagedInstanceTO.PowerState getPowerState(VirtualMachine.PowerState vmPowerState) { + switch (vmPowerState) { + case PowerOn: + return UnmanagedInstanceTO.PowerState.PowerOn; + case PowerOff: + return UnmanagedInstanceTO.PowerState.PowerOff; + default: + return UnmanagedInstanceTO.PowerState.PowerUnknown; + + } + } + + private List getUnmanagedInstanceNics(List interfaces) { + final ArrayList nics = new ArrayList<>(interfaces.size()); + int counter = 0; + for (LibvirtVMDef.InterfaceDef interfaceDef : interfaces) { + final UnmanagedInstanceTO.Nic nic = new UnmanagedInstanceTO.Nic(); + nic.setNicId(String.valueOf(counter++)); + nic.setMacAddress(interfaceDef.getMacAddress()); + nic.setAdapterType(interfaceDef.getModel().toString()); + nic.setNetwork(interfaceDef.getDevName()); + nic.setPciSlot(interfaceDef.getSlot().toString()); + nic.setVlan(interfaceDef.getVlanTag()); + nics.add(nic); + } + return nics; + } + + private List getUnmanagedInstanceDisks(List disksInfo, LibvirtComputingResource libvirtComputingResource){ + final ArrayList disks = new ArrayList<>(disksInfo.size()); + int counter = 0; + for (LibvirtVMDef.DiskDef diskDef : disksInfo) { + if (diskDef.getDeviceType() != LibvirtVMDef.DiskDef.DeviceType.DISK) { + continue; + } + + final UnmanagedInstanceTO.Disk disk = new UnmanagedInstanceTO.Disk(); + Long size = null; + String imagePath = null; + try { + QemuImgFile file = new QemuImgFile(diskDef.getSourcePath()); + QemuImg qemu = new QemuImg(0); + Map info = qemu.info(file); + size = Long.parseLong(info.getOrDefault("virtual_size", "0")); + imagePath = info.getOrDefault("image", null); + } catch (QemuImgException | LibvirtException e) { + throw new RuntimeException(e); + } + + disk.setPosition(counter); + disk.setCapacity(size); + disk.setDiskId(String.valueOf(counter++)); + disk.setLabel(diskDef.getDiskLabel()); + disk.setController(diskDef.getBusType().toString()); + + + Pair sourceHostPath = getSourceHostPath(libvirtComputingResource, diskDef.getSourcePath()); + if (sourceHostPath != null) { + disk.setDatastoreHost(sourceHostPath.first()); + disk.setDatastorePath(sourceHostPath.second()); + } else { + disk.setDatastorePath(diskDef.getSourcePath()); + disk.setDatastoreHost(diskDef.getSourceHost()); + } + + disk.setDatastoreType(diskDef.getDiskType().toString()); + disk.setDatastorePort(diskDef.getSourceHostPort()); + disk.setImagePath(imagePath); + disk.setDatastoreName(imagePath.substring(imagePath.lastIndexOf("/"))); + disks.add(disk); + } + return disks; + } + + private Pair getSourceHostPath(LibvirtComputingResource libvirtComputingResource, String diskPath) { + int pathEnd = diskPath.lastIndexOf("/"); + if (pathEnd >= 0) { + diskPath = diskPath.substring(0, pathEnd); + return libvirtComputingResource.getSourceHostPath(diskPath); + } + return null; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareUnmanageVMInstanceCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareUnmanageVMInstanceCommandWrapper.java new file mode 100644 index 00000000000..68373089038 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareUnmanageVMInstanceCommandWrapper.java @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.PrepareUnmanageVMInstanceAnswer; +import com.cloud.agent.api.PrepareUnmanageVMInstanceCommand; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import org.apache.log4j.Logger; +import org.libvirt.Connect; +import org.libvirt.Domain; + +@ResourceWrapper(handles=PrepareUnmanageVMInstanceCommand.class) +public final class LibvirtPrepareUnmanageVMInstanceCommandWrapper extends CommandWrapper { + private static final Logger LOGGER = Logger.getLogger(LibvirtPrepareUnmanageVMInstanceCommandWrapper.class); + @Override + public PrepareUnmanageVMInstanceAnswer execute(PrepareUnmanageVMInstanceCommand command, LibvirtComputingResource libvirtComputingResource) { + final String vmName = command.getInstanceName(); + final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper(); + LOGGER.debug(String.format("Verify if KVM instance: [%s] is available before Unmanaging VM.", vmName)); + try { + final Connect conn = libvirtUtilitiesHelper.getConnectionByVmName(vmName); + final Domain domain = libvirtComputingResource.getDomain(conn, vmName); + if (domain == null) { + LOGGER.error("Prepare Unmanage VMInstanceCommand: vm not found " + vmName); + new PrepareUnmanageVMInstanceAnswer(command, false, String.format("Cannot find VM with name [%s] in KVM host.", vmName)); + } + } catch (Exception e){ + LOGGER.error("PrepareUnmanagedInstancesCommand failed due to " + e.getMessage()); + return new PrepareUnmanageVMInstanceAnswer(command, false, "Error: " + e.getMessage()); + } + + return new PrepareUnmanageVMInstanceAnswer(command, true, "OK"); + } +} diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index 6b0144045a2..c3807fdb8d7 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -34,7 +34,10 @@ import java.util.stream.Stream; import javax.inject.Inject; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.host.Host; import com.cloud.host.dao.HostDao; @@ -572,6 +575,9 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q @Inject private PublicIpQuarantineDao publicIpQuarantineDao; + @Inject + private StoragePoolHostDao storagePoolHostDao; + private SearchCriteria getMinimumCpuServiceOfferingJoinSearchCriteria(int cpu) { SearchCriteria sc = _srvOfferingJoinDao.createSearchCriteria(); SearchCriteria sc1 = _srvOfferingJoinDao.createSearchCriteria(); @@ -2816,23 +2822,24 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q @Override public ListResponse searchForStoragePools(ListStoragePoolsCmd cmd) { - Pair, Integer> result = cmd.getHostId() != null ? searchForLocalStorages(cmd) : searchForStoragePoolsInternal(cmd); + Pair, Integer> result = (ScopeType.HOST.name().equalsIgnoreCase(cmd.getScope()) && cmd.getHostId() != null) ? + searchForLocalStorages(cmd) : searchForStoragePoolsInternal(cmd); return createStoragesPoolResponse(result); } private Pair, Integer> searchForLocalStorages(ListStoragePoolsCmd cmd) { long id = cmd.getHostId(); - String scope = ScopeType.HOST.toString(); - Pair, Integer> localStorages; - - ListHostsCmd listHostsCmd = new ListHostsCmd(); - listHostsCmd.setId(id); - Pair, Integer> hosts = searchForServersInternal(listHostsCmd); - - cmd.setScope(scope); - localStorages = searchForStoragePoolsInternal(cmd); - - return localStorages; + List localstoragePools = storagePoolHostDao.listByHostId(id); + Long[] poolIds = new Long[localstoragePools.size()]; + int i = 0; + for(StoragePoolHostVO localstoragePool : localstoragePools) { + StoragePool storagePool = storagePoolDao.findById(localstoragePool.getPoolId()); + if (storagePool != null && storagePool.isLocal()) { + poolIds[i++] = localstoragePool.getPoolId(); + } + } + List pools = _poolJoinDao.searchByIds(poolIds); + return new Pair<>(pools, pools.size()); } private ListResponse createStoragesPoolResponse(Pair, Integer> storagePools) { diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 158fc38b979..121ca95e365 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -129,6 +129,7 @@ import org.apache.cloudstack.userdata.UserDataManager; import org.apache.cloudstack.utils.bytescale.ByteScaleUtils; import org.apache.cloudstack.utils.security.ParserUtils; import org.apache.cloudstack.vm.schedule.VMScheduleManager; +import org.apache.cloudstack.vm.UnmanagedVMsManager; import org.apache.commons.codec.binary.Base64; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; @@ -235,7 +236,6 @@ import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; import com.cloud.hypervisor.kvm.dpdk.DpdkHelper; @@ -4489,6 +4489,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir setVmRequiredFieldsForImport(isImport, vm, zone, hypervisorType, host, lastHost, powerState); + setVncPasswordForKvmIfAvailable(customParameters, vm); + vm.setUserVmType(vmType); _vmDao.persist(vm); for (String key : customParameters.keySet()) { @@ -4891,7 +4893,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir Long hostId = cmd.getHostId(); Map additionalParams = new HashMap<>(); Map diskOfferingMap = cmd.getDataDiskTemplateToDiskOfferingMap(); - Map details = cmd.getDetails(); if (cmd instanceof DeployVMCmdByAdmin) { DeployVMCmdByAdmin adminCmd = (DeployVMCmdByAdmin)cmd; podId = adminCmd.getPodId(); @@ -8207,7 +8208,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir public UserVm importVM(final DataCenter zone, final Host host, final VirtualMachineTemplate template, final String instanceName, final String displayName, final Account owner, final String userData, final Account caller, final Boolean isDisplayVm, final String keyboard, final long accountId, final long userId, final ServiceOffering serviceOffering, final String sshPublicKeys, - final String hostName, final HypervisorType hypervisorType, final Map customParameters, final VirtualMachine.PowerState powerState) throws InsufficientCapacityException { + final String hostName, final HypervisorType hypervisorType, final Map customParameters, + final VirtualMachine.PowerState powerState, final LinkedHashMap> networkNicMap) throws InsufficientCapacityException { if (zone == null) { throw new InvalidParameterValueException("Unable to import virtual machine with invalid zone"); } @@ -8227,7 +8229,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir final Boolean dynamicScalingEnabled = checkIfDynamicScalingCanBeEnabled(null, serviceOffering, template, zone.getId()); return commitUserVm(true, zone, host, lastHost, template, hostName, displayName, owner, null, null, userData, null, null, isDisplayVm, keyboard, - accountId, userId, serviceOffering, template.getFormat().equals(ImageFormat.ISO), sshPublicKeys, null, + accountId, userId, serviceOffering, template.getFormat().equals(ImageFormat.ISO), sshPublicKeys, networkNicMap, id, instanceName, uuidName, hypervisorType, customParameters, null, null, null, powerState, dynamicScalingEnabled, null, serviceOffering.getDiskOfferingId(), null); } @@ -8247,8 +8249,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir return false; } - if (vm.getHypervisorType() != Hypervisor.HypervisorType.VMware) { - throw new UnsupportedServiceException("Unmanaging a VM is currently allowed for VMware VMs only"); + if (!UnmanagedVMsManager.isSupported(vm.getHypervisorType())) { + throw new UnsupportedServiceException("Unmanaging a VM is currently not supported on hypervisor " + + vm.getHypervisorType().toString()); } List volumes = _volsDao.findByInstance(vm.getId()); @@ -8427,4 +8430,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir public Boolean getDestroyRootVolumeOnVmDestruction(Long domainId){ return DestroyRootVolumeOnVmDestruction.valueIn(domainId); } + + private void setVncPasswordForKvmIfAvailable(Map customParameters, UserVmVO vm){ + if (customParameters.containsKey(VmDetailConstants.KVM_VNC_PASSWORD) + && StringUtils.isNotEmpty(customParameters.get(VmDetailConstants.KVM_VNC_PASSWORD))) { + vm.setVncPassword(customParameters.get(VmDetailConstants.KVM_VNC_PASSWORD)); + } + } } diff --git a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java index 92079868e75..b7190f4ff21 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java @@ -17,37 +17,129 @@ package org.apache.cloudstack.vm; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.stream.Collectors; - -import javax.inject.Inject; - +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CheckVolumeAnswer; +import com.cloud.agent.api.CheckVolumeCommand; import com.cloud.agent.api.ConvertInstanceAnswer; import com.cloud.agent.api.ConvertInstanceCommand; +import com.cloud.agent.api.CopyRemoteVolumeAnswer; +import com.cloud.agent.api.CopyRemoteVolumeCommand; +import com.cloud.agent.api.GetRemoteVmsAnswer; +import com.cloud.agent.api.GetRemoteVmsCommand; +import com.cloud.agent.api.GetUnmanagedInstancesAnswer; +import com.cloud.agent.api.GetUnmanagedInstancesCommand; +import com.cloud.agent.api.PrepareUnmanageVMInstanceAnswer; +import com.cloud.agent.api.PrepareUnmanageVMInstanceCommand; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.RemoteInstanceTO; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.configuration.Config; +import com.cloud.configuration.Resource; +import com.cloud.dc.DataCenter; +import com.cloud.dc.DataCenterVO; import com.cloud.dc.VmwareDatacenterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.VmwareDatacenterDao; +import com.cloud.deploy.DataCenterDeployment; +import com.cloud.deploy.DeployDestination; +import com.cloud.deploy.DeploymentPlanner; +import com.cloud.deploy.DeploymentPlanningManager; +import com.cloud.event.ActionEvent; import com.cloud.event.ActionEventUtils; +import com.cloud.event.EventTypes; import com.cloud.event.EventVO; +import com.cloud.event.UsageEventUtils; import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.InsufficientAddressCapacityException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InsufficientVirtualNetworkCapacityException; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.UnsupportedServiceException; import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.HypervisorGuru; import com.cloud.hypervisor.HypervisorGuruManager; +import com.cloud.network.IpAddressManager; +import com.cloud.network.Network; +import com.cloud.network.NetworkModel; +import com.cloud.network.Networks; +import com.cloud.network.PhysicalNetwork; +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkVO; +import com.cloud.network.dao.PhysicalNetworkDao; +import com.cloud.offering.DiskOffering; +import com.cloud.offering.NetworkOffering; +import com.cloud.offering.ServiceOffering; +import com.cloud.offerings.NetworkOfferingVO; +import com.cloud.offerings.dao.NetworkOfferingDao; +import com.cloud.org.Cluster; +import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceState; +import com.cloud.serializer.GsonHelper; +import com.cloud.server.ManagementService; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.DataStoreRole; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.GuestOS; +import com.cloud.storage.GuestOSHypervisor; import com.cloud.storage.ScopeType; +import com.cloud.storage.Snapshot; +import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeApiService; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.GuestOSHypervisorDao; +import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.template.VirtualMachineTemplate; +import com.cloud.user.Account; +import com.cloud.user.AccountService; +import com.cloud.user.ResourceLimitService; +import com.cloud.user.UserVO; +import com.cloud.user.dao.UserDao; +import com.cloud.uservm.UserVm; +import com.cloud.utils.LogUtils; +import com.cloud.utils.Pair; +import com.cloud.utils.db.EntityManager; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.net.NetUtils; +import com.cloud.vm.DiskProfile; +import com.cloud.vm.NicProfile; +import com.cloud.vm.NicVO; +import com.cloud.vm.UserVmManager; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.VirtualMachineName; +import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.VirtualMachineProfileImpl; +import com.cloud.vm.VmDetailConstants; +import com.cloud.vm.dao.NicDao; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.UserVmDetailsDao; +import com.cloud.vm.dao.VMInstanceDao; +import com.google.gson.Gson; +import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; @@ -57,8 +149,11 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.command.admin.vm.ImportUnmanagedInstanceCmd; import org.apache.cloudstack.api.command.admin.vm.ImportVmCmd; import org.apache.cloudstack.api.command.admin.vm.ListUnmanagedInstancesCmd; +import org.apache.cloudstack.api.command.admin.vm.ListVmsForImportCmd; import org.apache.cloudstack.api.command.admin.vm.UnmanageVMInstanceCmd; import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.NicResponse; +import org.apache.cloudstack.api.response.UnmanagedInstanceDiskResponse; import org.apache.cloudstack.api.response.UnmanagedInstanceResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.context.CallContext; @@ -78,95 +173,20 @@ import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; -import com.cloud.agent.AgentManager; -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.GetUnmanagedInstancesAnswer; -import com.cloud.agent.api.GetUnmanagedInstancesCommand; -import com.cloud.agent.api.PrepareUnmanageVMInstanceAnswer; -import com.cloud.agent.api.PrepareUnmanageVMInstanceCommand; -import com.cloud.configuration.Config; -import com.cloud.configuration.Resource; -import com.cloud.dc.DataCenter; -import com.cloud.dc.dao.ClusterDao; -import com.cloud.dc.dao.DataCenterDao; -import com.cloud.deploy.DataCenterDeployment; -import com.cloud.deploy.DeployDestination; -import com.cloud.deploy.DeploymentPlanner; -import com.cloud.deploy.DeploymentPlanningManager; -import com.cloud.event.ActionEvent; -import com.cloud.event.EventTypes; -import com.cloud.event.UsageEventUtils; -import com.cloud.exception.InsufficientAddressCapacityException; -import com.cloud.exception.InsufficientCapacityException; -import com.cloud.exception.InsufficientVirtualNetworkCapacityException; -import com.cloud.exception.InvalidParameterValueException; -import com.cloud.exception.PermissionDeniedException; -import com.cloud.exception.ResourceAllocationException; -import com.cloud.exception.UnsupportedServiceException; -import com.cloud.host.HostVO; -import com.cloud.host.Status; -import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor; -import com.cloud.network.Network; -import com.cloud.network.NetworkModel; -import com.cloud.network.Networks; -import com.cloud.network.dao.NetworkDao; -import com.cloud.network.dao.NetworkVO; -import com.cloud.offering.DiskOffering; -import com.cloud.offering.ServiceOffering; -import com.cloud.org.Cluster; -import com.cloud.resource.ResourceManager; -import com.cloud.serializer.GsonHelper; -import com.cloud.server.ManagementService; -import com.cloud.service.ServiceOfferingVO; -import com.cloud.service.dao.ServiceOfferingDao; -import com.cloud.storage.DiskOfferingVO; -import com.cloud.storage.GuestOS; -import com.cloud.storage.GuestOSHypervisor; -import com.cloud.storage.Snapshot; -import com.cloud.storage.SnapshotVO; -import com.cloud.storage.StoragePool; -import com.cloud.storage.VMTemplateStoragePoolVO; -import com.cloud.storage.VMTemplateVO; -import com.cloud.storage.Volume; -import com.cloud.storage.VolumeApiService; -import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.DiskOfferingDao; -import com.cloud.storage.dao.GuestOSDao; -import com.cloud.storage.dao.GuestOSHypervisorDao; -import com.cloud.storage.dao.SnapshotDao; -import com.cloud.storage.dao.VMTemplateDao; -import com.cloud.storage.dao.VMTemplatePoolDao; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.template.VirtualMachineTemplate; -import com.cloud.user.Account; -import com.cloud.user.AccountService; -import com.cloud.user.ResourceLimitService; -import com.cloud.user.UserVO; -import com.cloud.user.dao.UserDao; -import com.cloud.uservm.UserVm; -import com.cloud.utils.LogUtils; -import com.cloud.utils.Pair; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.net.NetUtils; -import com.cloud.vm.DiskProfile; -import com.cloud.vm.NicProfile; -import com.cloud.vm.NicVO; -import com.cloud.vm.UserVmManager; -import com.cloud.vm.UserVmVO; -import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachineManager; -import com.cloud.vm.VirtualMachineProfile; -import com.cloud.vm.VirtualMachineProfileImpl; -import com.cloud.vm.VmDetailConstants; -import com.cloud.vm.dao.NicDao; -import com.cloud.vm.dao.UserVmDao; -import com.cloud.vm.dao.VMInstanceDao; -import com.google.gson.Gson; +import javax.inject.Inject; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.stream.Collectors; public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { public static final String VM_IMPORT_DEFAULT_TEMPLATE_NAME = "system-default-vm-import-dummy-template.iso"; + public static final String KVM_VM_IMPORT_DEFAULT_TEMPLATE_NAME = "kvm-default-vm-import-dummy-template"; private static final Logger LOGGER = Logger.getLogger(UnmanagedVMsManagerImpl.class); private static final List importUnmanagedInstancesSupportedHypervisors = Arrays.asList(Hypervisor.HypervisorType.VMware, Hypervisor.HypervisorType.KVM); @@ -196,6 +216,8 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { @Inject private ResourceLimitService resourceLimitService; @Inject + private UserVmDetailsDao userVmDetailsDao; + @Inject private UserVmManager userVmManager; @Inject private ResponseGenerator responseGenerator; @@ -234,14 +256,24 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { @Inject private UserVmDao userVmDao; @Inject + private NetworkOfferingDao networkOfferingDao; + @Inject + EntityManager entityMgr; + @Inject + private NetworkOrchestrationService networkMgr; + @Inject + private PhysicalNetworkDao physicalNetworkDao; + @Inject + private IpAddressManager ipAddressManager; + @Inject + private StoragePoolHostDao storagePoolHostDao; + @Inject private HypervisorGuruManager hypervisorGuruManager; @Inject private VmwareDatacenterDao vmwareDatacenterDao; @Inject private ImageStoreDao imageStoreDao; @Inject - private StoragePoolHostDao storagePoolHostDao; - @Inject private DataStoreManager dataStoreManager; protected Gson gson; @@ -250,10 +282,11 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { gson = GsonHelper.getGsonLogger(); } - private VMTemplateVO createDefaultDummyVmImportTemplate() { + private VMTemplateVO createDefaultDummyVmImportTemplate(boolean isKVM) { + String templateName = (isKVM) ? KVM_VM_IMPORT_DEFAULT_TEMPLATE_NAME : VM_IMPORT_DEFAULT_TEMPLATE_NAME; VMTemplateVO template = null; try { - template = VMTemplateVO.createSystemIso(templateDao.getNextInSequence(Long.class, "id"), VM_IMPORT_DEFAULT_TEMPLATE_NAME, VM_IMPORT_DEFAULT_TEMPLATE_NAME, true, + template = VMTemplateVO.createSystemIso(templateDao.getNextInSequence(Long.class, "id"), templateName, templateName, true, "", true, 64, Account.ACCOUNT_ID_SYSTEM, "", "VM Import Default Template", false, 1); template.setState(VirtualMachineTemplate.State.Inactive); @@ -262,13 +295,75 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { return null; } templateDao.remove(template.getId()); - template = templateDao.findByName(VM_IMPORT_DEFAULT_TEMPLATE_NAME); + template = templateDao.findByName(templateName); } catch (Exception e) { LOGGER.error("Unable to create default dummy template for VM import", e); } return template; } + private UnmanagedInstanceResponse createUnmanagedInstanceResponse(UnmanagedInstanceTO instance, Cluster cluster, Host host) { + UnmanagedInstanceResponse response = new UnmanagedInstanceResponse(); + response.setName(instance.getName()); + + if (cluster != null) { + response.setClusterId(cluster.getUuid()); + } + if (host != null) { + response.setHostId(host.getUuid()); + response.setHostName(host.getName()); + } + response.setPowerState(instance.getPowerState().toString()); + response.setCpuCores(instance.getCpuCores()); + response.setCpuSpeed(instance.getCpuSpeed()); + response.setCpuCoresPerSocket(instance.getCpuCoresPerSocket()); + response.setMemory(instance.getMemory()); + response.setOperatingSystemId(instance.getOperatingSystemId()); + response.setOperatingSystem(instance.getOperatingSystem()); + response.setObjectName("unmanagedinstance"); + + if (instance.getDisks() != null) { + for (UnmanagedInstanceTO.Disk disk : instance.getDisks()) { + UnmanagedInstanceDiskResponse diskResponse = new UnmanagedInstanceDiskResponse(); + diskResponse.setDiskId(disk.getDiskId()); + if (StringUtils.isNotEmpty(disk.getLabel())) { + diskResponse.setLabel(disk.getLabel()); + } + diskResponse.setCapacity(disk.getCapacity()); + diskResponse.setController(disk.getController()); + diskResponse.setControllerUnit(disk.getControllerUnit()); + diskResponse.setPosition(disk.getPosition()); + diskResponse.setImagePath(disk.getImagePath()); + diskResponse.setDatastoreName(disk.getDatastoreName()); + diskResponse.setDatastoreHost(disk.getDatastoreHost()); + diskResponse.setDatastorePath(disk.getDatastorePath()); + diskResponse.setDatastoreType(disk.getDatastoreType()); + response.addDisk(diskResponse); + } + } + + if (instance.getNics() != null) { + for (UnmanagedInstanceTO.Nic nic : instance.getNics()) { + NicResponse nicResponse = new NicResponse(); + nicResponse.setId(nic.getNicId()); + nicResponse.setNetworkName(nic.getNetwork()); + nicResponse.setMacAddress(nic.getMacAddress()); + if (StringUtils.isNotEmpty(nic.getAdapterType())) { + nicResponse.setAdapterType(nic.getAdapterType()); + } + if (!CollectionUtils.isEmpty(nic.getIpAddress())) { + nicResponse.setIpAddresses(nic.getIpAddress()); + } + nicResponse.setVlanId(nic.getVlan()); + nicResponse.setIsolatedPvlanId(nic.getPvlan()); + nicResponse.setIsolatedPvlanType(nic.getPvlanType()); + response.addNic(nicResponse); + } + } + + return response; + } + private List getAdditionalNameFilters(Cluster cluster) { List additionalNameFilter = new ArrayList<>(); if (cluster == null) { @@ -358,7 +453,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { return volumeApiService.doesTargetStorageSupportDiskOffering(pool, diskOffering.getTags()); } - private ServiceOfferingVO getUnmanagedInstanceServiceOffering(final UnmanagedInstanceTO instance, ServiceOfferingVO serviceOffering, final Account owner, final DataCenter zone, final Map details) + private ServiceOfferingVO getUnmanagedInstanceServiceOffering(final UnmanagedInstanceTO instance, ServiceOfferingVO serviceOffering, final Account owner, final DataCenter zone, final Map details, Hypervisor.HypervisorType hypervisorType) throws ServerApiException, PermissionDeniedException, ResourceAllocationException { if (instance == null) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Cannot find VM to import."); @@ -402,7 +497,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { if (!memory.equals(serviceOffering.getRamSize()) && !instance.getPowerState().equals(UnmanagedInstanceTO.PowerState.PowerOff)) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Service offering (%s) %dMB memory does not match VM memory %dMB and VM is not in powered off state (Power state: %s)", serviceOffering.getUuid(), serviceOffering.getRamSize(), memory, instance.getPowerState())); } - if (cpuSpeed != null && cpuSpeed > 0 && !cpuSpeed.equals(serviceOffering.getSpeed()) && !instance.getPowerState().equals(UnmanagedInstanceTO.PowerState.PowerOff)) { + if (hypervisorType == Hypervisor.HypervisorType.VMware && cpuSpeed != null && cpuSpeed > 0 && !cpuSpeed.equals(serviceOffering.getSpeed()) && !instance.getPowerState().equals(UnmanagedInstanceTO.PowerState.PowerOff)) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Service offering (%s) %dMHz CPU speed does not match VM CPU speed %dMHz and VM is not in powered off state (Power state: %s)", serviceOffering.getUuid(), serviceOffering.getSpeed(), cpuSpeed, instance.getPowerState())); } } @@ -468,16 +563,15 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { return storagePool; } - private Pair> getRootAndDataDisks(List disks, final Map dataDiskOfferingMap) { + private Pair> getRootAndDataDisks( + List disks, + final Map dataDiskOfferingMap) { UnmanagedInstanceTO.Disk rootDisk = null; List dataDisks = new ArrayList<>(); - if (disks.size() == 1) { - rootDisk = disks.get(0); - return new Pair<>(rootDisk, dataDisks); - } + Set callerDiskIds = dataDiskOfferingMap.keySet(); if (callerDiskIds.size() != disks.size() - 1) { - String msg = String.format("VM has total %d disks for which %d disk offering mappings provided. %d disks need a disk offering for import", disks.size(), callerDiskIds.size(), disks.size()-1); + String msg = String.format("VM has total %d disks for which %d disk offering mappings provided. %d disks need a disk offering for import", disks.size(), callerDiskIds.size(), disks.size() - 1); LOGGER.error(String.format("%s. %s parameter can be used to provide disk offerings for the disks", msg, ApiConstants.DATADISK_OFFERING_LIST)); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, msg); } @@ -489,11 +583,16 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { rootDisk = disk; } else { dataDisks.add(disk); + DiskOffering diskOffering = diskOfferingDao.findById(dataDiskOfferingMap.getOrDefault(disk.getDiskId(), null)); + if ((disk.getCapacity() == null || disk.getCapacity() <= 0) && diskOffering != null) { + disk.setCapacity(diskOffering.getDiskSize()); + } } } - if (diskIdsWithoutOffering.size() > 1) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM has total %d disks, disk offering mapping not provided for %d disks. Disk IDs that may need a disk offering - %s", disks.size(), diskIdsWithoutOffering.size()-1, String.join(", ", diskIdsWithoutOffering))); + if (diskIdsWithoutOffering.size() > 1 || rootDisk == null) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM has total %d disks, disk offering mapping not provided for %d disks. Disk IDs that may need a disk offering - %s", disks.size(), diskIdsWithoutOffering.size() - 1, String.join(", ", diskIdsWithoutOffering))); } + return new Pair<>(rootDisk, dataDisks); } @@ -551,7 +650,10 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { if (!autoAssign && network.getGuestType().equals(Network.GuestType.Isolated)) { return; } + checksOnlyNeededForVmware(nic, network, hypervisorType); + } + private void checksOnlyNeededForVmware(UnmanagedInstanceTO.Nic nic, Network network, final Hypervisor.HypervisorType hypervisorType) { if (hypervisorType == Hypervisor.HypervisorType.VMware) { String networkBroadcastUri = network.getBroadcastUri() == null ? null : network.getBroadcastUri().toString(); if (nic.getVlan() != null && nic.getVlan() != 0 && nic.getPvlan() == null && @@ -600,7 +702,9 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } } - private Map getUnmanagedNicNetworkMap(String instanceName, List nics, final Map callerNicNetworkMap, final Map callerNicIpAddressMap, final DataCenter zone, final String hostName, final Account owner, Hypervisor.HypervisorType hypervisorType) throws ServerApiException { + private Map getUnmanagedNicNetworkMap(String instanceName, List nics, final Map callerNicNetworkMap, + final Map callerNicIpAddressMap, final DataCenter zone, final String hostName, + final Account owner, Hypervisor.HypervisorType hypervisorType) throws ServerApiException { Map nicNetworkMap = new HashMap<>(); String nicAdapter = null; for (int i = 0; i < nics.size(); i++) { @@ -656,6 +760,81 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { return nicNetworkMap; } + private Pair importExternalDisk(UnmanagedInstanceTO.Disk disk, VirtualMachine vm, DeployDestination dest, DiskOffering diskOffering, + Volume.Type type, VirtualMachineTemplate template,Long deviceId, String remoteUrl, String username, String password, + String tmpPath, DiskProfile diskProfile) { + final String path = StringUtils.isEmpty(disk.getDatastorePath()) ? disk.getImagePath() : disk.getDatastorePath(); + String chainInfo = disk.getChainInfo(); + if (StringUtils.isEmpty(chainInfo)) { + VirtualMachineDiskInfo diskInfo = new VirtualMachineDiskInfo(); + diskInfo.setDiskDeviceBusName(String.format("%s%d:%d", disk.getController(), disk.getControllerUnit(), disk.getPosition())); + diskInfo.setDiskChain(new String[]{disk.getImagePath()}); + chainInfo = gson.toJson(diskInfo); + } + Map storage = dest.getStorageForDisks(); + Volume volume = volumeDao.findById(diskProfile.getVolumeId()); + StoragePool storagePool = storage.get(volume); + CopyRemoteVolumeCommand copyRemoteVolumeCommand = new CopyRemoteVolumeCommand(); + copyRemoteVolumeCommand.setRemoteIp(remoteUrl); + copyRemoteVolumeCommand.setUsername(username); + copyRemoteVolumeCommand.setPassword(password); + copyRemoteVolumeCommand.setSrcFile(path); + StorageFilerTO storageTO = new StorageFilerTO(storagePool); + copyRemoteVolumeCommand.setStorageFilerTO(storageTO); + if(tmpPath == null || tmpPath.length() < 1) { + tmpPath = "/tmp/"; + } else { + // Add / if path doesn't end with / + if(tmpPath.charAt(tmpPath.length() - 1) != '/') { + tmpPath += "/"; + } + } + copyRemoteVolumeCommand.setTempPath(tmpPath); + Answer answer = agentManager.easySend(dest.getHost().getId(), copyRemoteVolumeCommand); + if (!(answer instanceof CopyRemoteVolumeAnswer)) { + throw new CloudRuntimeException("Error while copying volume"); + } + CopyRemoteVolumeAnswer copyRemoteVolumeAnswer = (CopyRemoteVolumeAnswer) answer; + if(!copyRemoteVolumeAnswer.getResult()) { + throw new CloudRuntimeException("Error while copying volume"); + } + diskProfile.setSize(copyRemoteVolumeAnswer.getSize()); + DiskProfile profile = volumeManager.updateImportedVolume(type, diskOffering, vm, template, deviceId, + storagePool.getId(), copyRemoteVolumeAnswer.getFilename(), chainInfo, diskProfile); + + return new Pair<>(profile, storagePool); + } + + private Pair importKVMLocalDisk(VirtualMachine vm, DiskOffering diskOffering, + Volume.Type type, VirtualMachineTemplate template, + Long deviceId, Long hostId, String diskPath, DiskProfile diskProfile) { + List storagePools = primaryDataStoreDao.findLocalStoragePoolsByHostAndTags(hostId, null); + + if(storagePools.size() < 1) { + throw new CloudRuntimeException("Local Storage not found for host"); + } + + StoragePool storagePool = storagePools.get(0); + + DiskProfile profile = volumeManager.updateImportedVolume(type, diskOffering, vm, template, deviceId, + storagePool.getId(), diskPath, null, diskProfile); + + return new Pair<>(profile, storagePool); + } + + + private Pair importKVMSharedDisk(VirtualMachine vm, DiskOffering diskOffering, + Volume.Type type, VirtualMachineTemplate template, + Long deviceId, Long poolId, String diskPath, DiskProfile diskProfile) { + StoragePool storagePool = primaryDataStoreDao.findById(poolId); + + DiskProfile profile = volumeManager.updateImportedVolume(type, diskOffering, vm, template, deviceId, + poolId, diskPath, null, diskProfile); + + return new Pair<>(profile, storagePool); + } + + private Pair importDisk(UnmanagedInstanceTO.Disk disk, VirtualMachine vm, Cluster cluster, DiskOffering diskOffering, Volume.Type type, String name, Long diskSize, Long minIops, Long maxIops, VirtualMachineTemplate template, Account owner, Long deviceId) { @@ -779,7 +958,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { for (StoragePool pool : storagePools) { if (diskProfileStoragePool.second().getId() != pool.getId() && storagePoolSupportsDiskOffering(pool, dOffering) - ) { + ) { storagePool = pool; break; } @@ -791,7 +970,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { for (StoragePool pool : storagePools) { if (diskProfileStoragePool.second().getId() != pool.getId() && storagePoolSupportsDiskOffering(pool, dOffering) - ) { + ) { storagePool = pool; break; } @@ -888,10 +1067,9 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { LOGGER.debug(LogUtils.logGsonWithoutException("Trying to import VM [%s] with name [%s], in zone [%s], cluster [%s], and host [%s], using template [%s], service offering [%s], disks map [%s], NICs map [%s] and details [%s].", unmanagedInstance, instanceName, zone, cluster, host, template, serviceOffering, dataDiskOfferingMap, nicNetworkMap, details)); UserVm userVm = null; - ServiceOfferingVO validatedServiceOffering = null; try { - validatedServiceOffering = getUnmanagedInstanceServiceOffering(unmanagedInstance, serviceOffering, owner, zone, details); + validatedServiceOffering = getUnmanagedInstanceServiceOffering(unmanagedInstance, serviceOffering, owner, zone, details, cluster.getHypervisorType()); } catch (Exception e) { String errorMsg = String.format("Failed to import Unmanaged VM [%s] because the service offering [%s] is not compatible due to [%s].", unmanagedInstance.getName(), serviceOffering.getUuid(), StringUtils.defaultIfEmpty(e.getMessage(), "")); LOGGER.error(errorMsg, e); @@ -899,7 +1077,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } String internalCSName = unmanagedInstance.getInternalCSName(); - if(StringUtils.isEmpty(internalCSName)){ + if (StringUtils.isEmpty(internalCSName)) { internalCSName = instanceName; } Map allDetails = new HashMap<>(details); @@ -925,7 +1103,16 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { if (rootDisk == null || StringUtils.isEmpty(rootDisk.getController())) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed. Unable to retrieve root disk details for VM: %s ", instanceName)); } + if (cluster.getHypervisorType() == Hypervisor.HypervisorType.KVM) { + Long rootDiskOfferingId = validatedServiceOffering.getDiskOfferingId(); + DiskOffering rootDiskOffering = diskOfferingDao.findById(rootDiskOfferingId); + if ((rootDisk.getCapacity() == null || rootDisk.getCapacity() <= 0) && rootDiskOffering != null) { + rootDisk.setCapacity(rootDiskOffering.getDiskSize()); + } + } allDetails.put(VmDetailConstants.ROOT_DISK_CONTROLLER, rootDisk.getController()); + allDetails.put(VmDetailConstants.ROOT_DISK_SIZE, String.valueOf(rootDisk.getCapacity() / Resource.ResourceType.bytesToGiB)); + try { checkUnmanagedDiskAndOfferingForImport(unmanagedInstance.getName(), rootDisk, null, validatedServiceOffering, owner, zone, cluster, migrateAllowed); if (CollectionUtils.isNotEmpty(dataDisks)) { // Data disk(s) present @@ -943,20 +1130,27 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { if (!CollectionUtils.isEmpty(unmanagedInstance.getNics())) { allDetails.put(VmDetailConstants.NIC_ADAPTER, unmanagedInstance.getNics().get(0).getAdapterType()); } + + if (StringUtils.isNotEmpty(unmanagedInstance.getVncPassword())) { + allDetails.put(VmDetailConstants.KVM_VNC_PASSWORD, unmanagedInstance.getVncPassword()); + } + VirtualMachine.PowerState powerState = VirtualMachine.PowerState.PowerOff; if (unmanagedInstance.getPowerState().equals(UnmanagedInstanceTO.PowerState.PowerOn)) { powerState = VirtualMachine.PowerState.PowerOn; } + try { userVm = userVmManager.importVM(zone, host, template, internalCSName, displayName, owner, null, caller, true, null, owner.getAccountId(), userId, validatedServiceOffering, null, hostName, - cluster.getHypervisorType(), allDetails, powerState); + cluster.getHypervisorType(), allDetails, powerState, null); } catch (InsufficientCapacityException ice) { String errorMsg = String.format("Failed to import VM [%s] due to [%s].", instanceName, ice.getMessage()); LOGGER.error(errorMsg, ice); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, errorMsg); } + if (userVm == null) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import vm name: %s", instanceName)); } @@ -998,7 +1192,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { for (UnmanagedInstanceTO.Nic nic : unmanagedInstance.getNics()) { Network network = networkDao.findById(allNicNetworkMap.get(nic.getNicId())); Network.IpAddresses ipAddresses = nicIpAddressMap.get(nic.getNicId()); - importNic(nic, userVm, network, ipAddresses, nicIndex, nicIndex==0, forced); + importNic(nic, userVm, network, ipAddresses, nicIndex, nicIndex == 0, forced); nicIndex++; } } catch (Exception e) { @@ -1023,7 +1217,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { command.setInstanceName(instanceName); command.setManagedInstancesNames(managedVms); Answer answer = agentManager.easySend(host.getId(), command); - if (!(answer instanceof GetUnmanagedInstancesAnswer)) { + if (!(answer instanceof GetUnmanagedInstancesAnswer) || !answer.getResult()) { return unmanagedInstances; } GetUnmanagedInstancesAnswer unmanagedInstancesAnswer = (GetUnmanagedInstancesAnswer) answer; @@ -1043,6 +1237,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { if (cluster == null) { throw new InvalidParameterValueException(String.format("Cluster with ID [%d] cannot be found.", clusterId)); } + if (!importUnmanagedInstancesSupportedHypervisors.contains(cluster.getHypervisorType())) { throw new InvalidParameterValueException(String.format("VM import is currently not supported for hypervisor [%s].", cluster.getHypervisorType().toString())); } @@ -1235,7 +1430,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { if (templateId == null) { template = templateDao.findByName(VM_IMPORT_DEFAULT_TEMPLATE_NAME); if (template == null) { - template = createDefaultDummyVmImportTemplate(); + template = createDefaultDummyVmImportTemplate(false); if (template == null) { throw new InvalidParameterValueException(String.format("Default VM import template with unique name: %s for hypervisor: %s cannot be created. Please use templateid parameter for import", VM_IMPORT_DEFAULT_TEMPLATE_NAME, hypervisorType.toString())); } @@ -1252,7 +1447,13 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { @Override @ActionEvent(eventType = EventTypes.EVENT_VM_IMPORT, eventDescription = "importing VM", async = true) public UserVmResponse importVm(ImportVmCmd cmd) { - return baseImportInstance(cmd); + String source = cmd.getImportSource().toUpperCase(); + ImportSource importSource = Enum.valueOf(ImportSource.class, source); + if (ImportSource.VMWARE == importSource || ImportSource.UNMANAGED == importSource) { + return baseImportInstance(cmd); + } else { + return importKvmInstance(cmd); + } } private UserVm importUnmanagedInstanceFromVmwareToVmware(DataCenter zone, Cluster cluster, @@ -1277,12 +1478,18 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { if (unmanagedInstance == null) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Unable to retrieve details for unmanaged VM: %s", name)); } + + if (template.getName().equals(VM_IMPORT_DEFAULT_TEMPLATE_NAME) && cluster.getHypervisorType().equals(Hypervisor.HypervisorType.KVM)) { + throw new InvalidParameterValueException("Template is needed and unable to use default template for hypervisor " + host.getHypervisorType().toString()); + } + if (template.getName().equals(VM_IMPORT_DEFAULT_TEMPLATE_NAME)) { String osName = unmanagedInstance.getOperatingSystem(); GuestOS guestOS = null; if (StringUtils.isNotEmpty(osName)) { guestOS = guestOSDao.findOneByDisplayName(osName); } + GuestOSHypervisor guestOSHypervisor = null; if (guestOS != null) { guestOSHypervisor = guestOSHypervisorDao.findByOsIdAndHypervisor(guestOS.getId(), host.getHypervisorType().toString(), host.getHypervisorVersion()); @@ -1296,6 +1503,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Unable to retrieve guest OS details for unmanaged VM: %s with OS name: %s, OS ID: %s for hypervisor: %s version: %s. templateid parameter can be used to assign template for VM", name, osName, unmanagedInstance.getOperatingSystemId(), host.getHypervisorType().toString(), host.getHypervisorVersion())); } + template.setGuestOSId(guestOSHypervisor.getGuestOsId()); } userVm = importVirtualMachineInternal(unmanagedInstance, instanceName, zone, cluster, host, @@ -1333,7 +1541,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { String username = cmd.getUsername(); String password = cmd.getPassword(); String clusterName = cmd.getClusterName(); - String sourceHostName = cmd.getHost(); + String sourceHostName = cmd.getHostIp(); Long convertInstanceHostId = cmd.getConvertInstanceHostId(); Long convertStoragePoolId = cmd.getConvertStoragePoolId(); @@ -1633,6 +1841,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { cmdList.add(ListUnmanagedInstancesCmd.class); cmdList.add(ImportUnmanagedInstanceCmd.class); cmdList.add(UnmanageVMInstanceCmd.class); + cmdList.add(ListVmsForImportCmd.class); cmdList.add(ImportVmCmd.class); return cmdList; } @@ -1708,8 +1917,9 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { throw new InvalidParameterValueException("Could not find VM to unmanage, it is either removed or not existing VM"); } else if (vmVO.getState() != VirtualMachine.State.Running && vmVO.getState() != VirtualMachine.State.Stopped) { throw new InvalidParameterValueException("VM with id = " + vmVO.getUuid() + " must be running or stopped to be unmanaged"); - } else if (vmVO.getHypervisorType() != Hypervisor.HypervisorType.VMware) { - throw new UnsupportedServiceException("Unmanage VM is currently allowed for VMware VMs only"); + } else if (!UnmanagedVMsManager.isSupported(vmVO.getHypervisorType())) { + throw new UnsupportedServiceException("Unmanage VM is currently not allowed for hypervisor " + + vmVO.getHypervisorType().toString()); } else if (vmVO.getType() != VirtualMachine.Type.User) { throw new UnsupportedServiceException("Unmanage VM is currently allowed for guest VMs only"); } @@ -1743,6 +1953,552 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { return answer.getResult(); } + private UserVmResponse importKvmInstance(ImportVmCmd cmd) { + final Account caller = CallContext.current().getCallingAccount(); + if (caller.getType() != Account.Type.ADMIN) { + throw new PermissionDeniedException(String.format("Cannot perform this operation, Calling account is not root admin: %s", caller.getUuid())); + } + final Long zoneId = cmd.getZoneId(); + final DataCenterVO zone = dataCenterDao.findById(zoneId); + if (zone == null) { + throw new InvalidParameterValueException("Please specify a valid zone."); + } + final String hypervisorType = cmd.getHypervisor(); + if (!Hypervisor.HypervisorType.KVM.toString().equalsIgnoreCase(hypervisorType)) { + throw new InvalidParameterValueException(String.format("VM import is currently not supported for hypervisor: %s", hypervisorType)); + } + + final String instanceName = cmd.getName(); + if (StringUtils.isEmpty(instanceName)) { + throw new InvalidParameterValueException(String.format("Instance name cannot be empty")); + } + if (cmd.getDomainId() != null && StringUtils.isEmpty(cmd.getAccountName())) { + throw new InvalidParameterValueException("domainid parameter must be specified with account parameter"); + } + final Account owner = accountService.getActiveAccountById(cmd.getEntityOwnerId()); + long userId = CallContext.current().getCallingUserId(); + List userVOs = userDao.listByAccount(owner.getAccountId()); + if (CollectionUtils.isNotEmpty(userVOs)) { + userId = userVOs.get(0).getId(); + } + VMTemplateVO template = templateDao.findByName(KVM_VM_IMPORT_DEFAULT_TEMPLATE_NAME); + if (template == null) { + template = createDefaultDummyVmImportTemplate(true); + if (template == null) { + throw new InvalidParameterValueException("Error while creating default Import Vm Template"); + } + } + + final Long serviceOfferingId = cmd.getServiceOfferingId(); + if (serviceOfferingId == null) { + throw new InvalidParameterValueException(String.format("Service offering ID cannot be null")); + } + final ServiceOfferingVO serviceOffering = serviceOfferingDao.findById(serviceOfferingId); + if (serviceOffering == null) { + throw new InvalidParameterValueException(String.format("Service offering ID: %d cannot be found", serviceOfferingId)); + } + accountService.checkAccess(owner, serviceOffering, zone); + try { + resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.user_vm, 1); + } catch (ResourceAllocationException e) { + LOGGER.error(String.format("VM resource allocation error for account: %s", owner.getUuid()), e); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM resource allocation error for account: %s. %s", owner.getUuid(), StringUtils.defaultString(e.getMessage()))); + } + String displayName = cmd.getDisplayName(); + if (StringUtils.isEmpty(displayName)) { + displayName = instanceName; + } + String hostName = cmd.getHostName(); + if (StringUtils.isEmpty(hostName)) { + if (!NetUtils.verifyDomainNameLabel(instanceName, true)) { + throw new InvalidParameterValueException(String.format("Please provide hostname for the VM. VM name contains unsupported characters for it to be used as hostname")); + } + hostName = instanceName; + } + if (!NetUtils.verifyDomainNameLabel(hostName, true)) { + throw new InvalidParameterValueException("Invalid VM hostname. VM hostname can contain ASCII letters 'a' through 'z', the digits '0' through '9', " + + "and the hyphen ('-'), must be between 1 and 63 characters long, and can't start or end with \"-\" and can't start with digit"); + } + + final Map nicNetworkMap = cmd.getNicNetworkList(); + final Map nicIpAddressMap = cmd.getNicIpAddressList(); + final Map dataDiskOfferingMap = cmd.getDataDiskToDiskOfferingList(); + final Map details = cmd.getDetails(); + + String remoteUrl = cmd.getHost(); + String source = cmd.getImportSource().toUpperCase(); + String diskPath = cmd.getDiskPath(); + ImportSource importSource = Enum.valueOf(ImportSource.class, source); + Long hostId = cmd.getHostId(); + Long poolId = cmd.getStoragePoolId(); + Long networkId = cmd.getNetworkId(); + + UnmanagedInstanceTO unmanagedInstanceTO = null; + if (ImportSource.EXTERNAL == importSource) { + if (StringUtils.isBlank(cmd.getUsername())) { + throw new InvalidParameterValueException("Username need to be provided."); + } + + HashMap instancesMap = getRemoteVms(zoneId, remoteUrl, cmd.getUsername(), cmd.getPassword()); + unmanagedInstanceTO = instancesMap.get(cmd.getName()); + if (unmanagedInstanceTO == null) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Vm with name: %s not found on remote host", instanceName)); + } + } + + if (ImportSource.SHARED == importSource || ImportSource.LOCAL == importSource) { + if (diskPath == null) { + throw new InvalidParameterValueException("Disk Path is required for Import from shared/local storage"); + } + + if (networkId == null) { + throw new InvalidParameterValueException("Network is required for Import from shared/local storage"); + } + + if (poolId == null) { + throw new InvalidParameterValueException("Storage Pool is required for Import from shared/local storage"); + } + + StoragePool storagePool = primaryDataStoreDao.findById(poolId); + if (storagePool == null) { + throw new InvalidParameterValueException("Storage Pool not found"); + } + + if (volumeDao.findByPoolIdAndPath(poolId, diskPath) != null) { + throw new InvalidParameterValueException("Disk image is already in use"); + } + + DiskOffering diskOffering = diskOfferingDao.findById(serviceOffering.getDiskOfferingId()); + + if (diskOffering != null && !storagePoolSupportsDiskOffering(storagePool, diskOffering)) { + throw new InvalidParameterValueException(String.format("Service offering: %s storage tags are not compatible with selected storage pool: %s", serviceOffering.getUuid(), storagePool.getUuid())); + } + } + + if (ImportSource.LOCAL == importSource) { + if (hostId == null) { + throw new InvalidParameterValueException("Host is required for Import from local storage"); + } + + if (hostDao.findById(hostId) == null) { + throw new InvalidParameterValueException("Host not found"); + } + + if(storagePoolHostDao.findByPoolHost(poolId, hostId) == null) { + throw new InvalidParameterValueException("Specified Local Storage Pool not found on Host"); + } + } + + UserVm userVm = null; + + if (ImportSource.EXTERNAL == importSource) { + String username = cmd.getUsername(); + String password = cmd.getPassword(); + String tmpPath = cmd.getTmpPath(); + userVm = importExternalKvmVirtualMachine(unmanagedInstanceTO, instanceName, zone, + template, displayName, hostName, caller, owner, userId, + serviceOffering, dataDiskOfferingMap, + nicNetworkMap, nicIpAddressMap, remoteUrl, username, password, tmpPath, details); + } else if (ImportSource.SHARED == importSource || ImportSource.LOCAL == importSource) { + try { + userVm = importKvmVirtualMachineFromDisk(importSource, instanceName, zone, + template, displayName, hostName, caller, owner, userId, + serviceOffering, dataDiskOfferingMap, networkId, hostId, poolId, diskPath, + details); + } catch (InsufficientCapacityException e) { + throw new RuntimeException(e); + } catch (ResourceAllocationException e) { + throw new RuntimeException(e); + } + } + if (userVm == null) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import Vm with name: %s ", instanceName)); + } + + CallContext.current().setEventResourceId(userVm.getId()); + CallContext.current().setEventResourceType(ApiCommandResourceType.VirtualMachine); + return responseGenerator.createUserVmResponse(ResponseObject.ResponseView.Full, "virtualmachine", userVm).get(0); + } + + private UserVm importExternalKvmVirtualMachine(final UnmanagedInstanceTO unmanagedInstance, final String instanceName, final DataCenter zone, + final VirtualMachineTemplate template, final String displayName, final String hostName, final Account caller, final Account owner, final Long userId, + final ServiceOfferingVO serviceOffering, final Map dataDiskOfferingMap, + final Map nicNetworkMap, final Map callerNicIpAddressMap, + final String remoteUrl, String username, String password, String tmpPath, final Map details) { + UserVm userVm = null; + + Map allDetails = new HashMap<>(details); + if (serviceOffering.isDynamic()) { + allDetails.put(VmDetailConstants.CPU_NUMBER, String.valueOf(serviceOffering.getCpu())); + allDetails.put(VmDetailConstants.MEMORY, String.valueOf(serviceOffering.getRamSize())); + allDetails.put(VmDetailConstants.CPU_SPEED, String.valueOf(serviceOffering.getSpeed())); + } + // Check disks and supplied disk offerings + List unmanagedInstanceDisks = unmanagedInstance.getDisks(); + + if (CollectionUtils.isEmpty(unmanagedInstanceDisks)) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("No attached disks found for the unmanaged VM: %s", instanceName)); + } + + Pair> rootAndDataDisksPair = getRootAndDataDisks(unmanagedInstanceDisks, dataDiskOfferingMap); + final UnmanagedInstanceTO.Disk rootDisk = rootAndDataDisksPair.first(); + final List dataDisks = rootAndDataDisksPair.second(); + if (rootDisk == null || StringUtils.isEmpty(rootDisk.getController())) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed. Unable to retrieve root disk details for VM: %s ", instanceName)); + } + allDetails.put(VmDetailConstants.ROOT_DISK_CONTROLLER, rootDisk.getController()); + + // Check NICs and supplied networks + Map nicIpAddressMap = getNicIpAddresses(unmanagedInstance.getNics(), callerNicIpAddressMap); + Map allNicNetworkMap = getUnmanagedNicNetworkMap(unmanagedInstance.getName(), unmanagedInstance.getNics(), nicNetworkMap, nicIpAddressMap, zone, hostName, owner, Hypervisor.HypervisorType.KVM); + if (!CollectionUtils.isEmpty(unmanagedInstance.getNics())) { + allDetails.put(VmDetailConstants.NIC_ADAPTER, unmanagedInstance.getNics().get(0).getAdapterType()); + } + VirtualMachine.PowerState powerState = VirtualMachine.PowerState.PowerOff; + + String internalName = getInternalName(owner.getAccountId()); + + try { + userVm = userVmManager.importVM(zone, null, template, internalName, displayName, owner, + null, caller, true, null, owner.getAccountId(), userId, + serviceOffering, null, hostName, + Hypervisor.HypervisorType.KVM, allDetails, powerState, null); + } catch (InsufficientCapacityException ice) { + LOGGER.error(String.format("Failed to import vm name: %s", instanceName), ice); + throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ice.getMessage()); + } + if (userVm == null) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import vm name: %s", instanceName)); + } + DiskOfferingVO diskOffering = diskOfferingDao.findById(serviceOffering.getDiskOfferingId()); + String rootVolumeName = String.format("ROOT-%s", userVm.getId()); + DiskProfile diskProfile = volumeManager.allocateRawVolume(Volume.Type.ROOT, rootVolumeName, diskOffering, null, null, null, userVm, template, owner, null); + + DiskProfile[] dataDiskProfiles = new DiskProfile[dataDisks.size()]; + int diskSeq = 0; + for (UnmanagedInstanceTO.Disk disk : dataDisks) { + if (disk.getCapacity() == null || disk.getCapacity() == 0) { + throw new InvalidParameterValueException(String.format("Disk ID: %s size is invalid", disk.getDiskId())); + } + DiskOffering offering = diskOfferingDao.findById(dataDiskOfferingMap.get(disk.getDiskId())); + DiskProfile dataDiskProfile = volumeManager.allocateRawVolume(Volume.Type.DATADISK, String.format("DATA-%d-%s", userVm.getId(), disk.getDiskId()), offering, null, null, null, userVm, template, owner, null); + dataDiskProfiles[diskSeq++] = dataDiskProfile; + } + + final VirtualMachineProfile profile = new VirtualMachineProfileImpl(userVm, template, serviceOffering, owner, null); + DeploymentPlanner.ExcludeList excludeList = new DeploymentPlanner.ExcludeList(); + final DataCenterDeployment plan = new DataCenterDeployment(zone.getId(), null, null, null, null, null); + DeployDestination dest = null; + try { + dest = deploymentPlanningManager.planDeployment(profile, plan, excludeList, null); + } catch (Exception e) { + LOGGER.warn(String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName()), e); + cleanupFailedImportVM(userVm); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName())); + } + if(dest == null) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Import failed for Vm: %s. Suitable deployment destination not found", userVm.getInstanceName())); + } + + List> diskProfileStoragePoolList = new ArrayList<>(); + try { + if (rootDisk.getCapacity() == null || rootDisk.getCapacity() == 0) { + throw new InvalidParameterValueException(String.format("Root disk ID: %s size is invalid", rootDisk.getDiskId())); + } + + diskProfileStoragePoolList.add(importExternalDisk(rootDisk, userVm, dest, diskOffering, Volume.Type.ROOT, + template, null, remoteUrl, username, password, tmpPath, diskProfile)); + + long deviceId = 1L; + diskSeq = 0; + for (UnmanagedInstanceTO.Disk disk : dataDisks) { + DiskProfile dataDiskProfile = dataDiskProfiles[diskSeq++]; + DiskOffering offering = diskOfferingDao.findById(dataDiskOfferingMap.get(disk.getDiskId())); + + diskProfileStoragePoolList.add(importExternalDisk(disk, userVm, dest, offering, Volume.Type.DATADISK, + template, deviceId, remoteUrl, username, password, tmpPath, dataDiskProfile)); + deviceId++; + } + } catch (Exception e) { + LOGGER.error(String.format("Failed to import volumes while importing vm: %s", instanceName), e); + cleanupFailedImportVM(userVm); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import volumes while importing vm: %s. %s", instanceName, StringUtils.defaultString(e.getMessage()))); + } + try { + int nicIndex = 0; + for (UnmanagedInstanceTO.Nic nic : unmanagedInstance.getNics()) { + Network network = networkDao.findById(allNicNetworkMap.get(nic.getNicId())); + Network.IpAddresses ipAddresses = nicIpAddressMap.get(nic.getNicId()); + importNic(nic, userVm, network, ipAddresses, nicIndex, nicIndex==0, true); + nicIndex++; + } + } catch (Exception e) { + LOGGER.error(String.format("Failed to import NICs while importing vm: %s", instanceName), e); + cleanupFailedImportVM(userVm); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import NICs while importing vm: %s. %s", instanceName, StringUtils.defaultString(e.getMessage()))); + } + publishVMUsageUpdateResourceCount(userVm, serviceOffering); + return userVm; + } + + private UserVm importKvmVirtualMachineFromDisk(final ImportSource importSource, final String instanceName, final DataCenter zone, + final VirtualMachineTemplate template, final String displayName, final String hostName, final Account caller, final Account owner, final Long userId, + final ServiceOfferingVO serviceOffering, final Map dataDiskOfferingMap, final Long networkId, + final Long hostId, final Long poolId, final String diskPath, final Map details) throws InsufficientCapacityException, ResourceAllocationException { + + UserVm userVm = null; + + Map allDetails = new HashMap<>(details); + if (serviceOffering.isDynamic()) { + allDetails.put(VmDetailConstants.CPU_NUMBER, String.valueOf(serviceOffering.getCpu())); + allDetails.put(VmDetailConstants.MEMORY, String.valueOf(serviceOffering.getRamSize())); + allDetails.put(VmDetailConstants.CPU_SPEED, String.valueOf(serviceOffering.getSpeed())); + } + + VirtualMachine.PowerState powerState = VirtualMachine.PowerState.PowerOff; + + NetworkVO network = networkDao.findById(networkId); + if (network == null) { + throw new InvalidParameterValueException("Unable to find network by id " + networkId); + } + + networkModel.checkNetworkPermissions(owner, network); + + // don't allow to use system networks + NetworkOffering networkOffering = entityMgr.findById(NetworkOffering.class, network.getNetworkOfferingId()); + if (networkOffering.isSystemOnly()) { + throw new InvalidParameterValueException("Network id=" + networkId + " is system only and can't be used for vm deployment"); + } + + LinkedHashMap> networkNicMap = new LinkedHashMap<>(); + + if ((network.getDataCenterId() != zone.getId())) { + if (!network.isStrechedL2Network()) { + throw new InvalidParameterValueException("Network id=" + network.getId() + + " doesn't belong to zone " + zone.getId()); + } + } + + String macAddress = networkModel.getNextAvailableMacAddressInNetwork(networkId); + String ipAddress = null; + if (network.getGuestType() != Network.GuestType.L2) { + ipAddress = ipAddressManager.acquireGuestIpAddress(network, null); + } + + Network.IpAddresses requestedIpPair = new Network.IpAddresses(ipAddress, null, macAddress); + + NicProfile nicProfile = new NicProfile(requestedIpPair.getIp4Address(), requestedIpPair.getIp6Address(), requestedIpPair.getMacAddress()); + nicProfile.setOrderIndex(0); + + boolean securityGroupEnabled = false; + if (networkModel.isSecurityGroupSupportedInNetwork(network)) { + securityGroupEnabled = true; + } + List profiles = networkNicMap.get(network.getUuid()); + if (CollectionUtils.isEmpty(profiles)) { + profiles = new ArrayList<>(); + } + profiles.add(nicProfile); + networkNicMap.put(network.getUuid(), profiles); + + String internalName = getInternalName(owner.getAccountId()); + + try { + userVm = userVmManager.importVM(zone, null, template, internalName, displayName, owner, + null, caller, true, null, owner.getAccountId(), userId, + serviceOffering, null, hostName, + Hypervisor.HypervisorType.KVM, allDetails, powerState, networkNicMap); + } catch (InsufficientCapacityException ice) { + LOGGER.error(String.format("Failed to import vm name: %s", instanceName), ice); + throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ice.getMessage()); + } + if (userVm == null) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import vm name: %s", instanceName)); + } + DiskOfferingVO diskOffering = diskOfferingDao.findById(serviceOffering.getDiskOfferingId()); + String rootVolumeName = String.format("ROOT-%s", userVm.getId()); + DiskProfile diskProfile = volumeManager.allocateRawVolume(Volume.Type.ROOT, rootVolumeName, diskOffering, null, null, null, userVm, template, owner, null); + + final VirtualMachineProfile profile = new VirtualMachineProfileImpl(userVm, template, serviceOffering, owner, null); + DeploymentPlanner.ExcludeList excludeList = new DeploymentPlanner.ExcludeList(); + final DataCenterDeployment plan = new DataCenterDeployment(zone.getId(), null, null, hostId, poolId, null); + DeployDestination dest = null; + try { + dest = deploymentPlanningManager.planDeployment(profile, plan, excludeList, null); + } catch (Exception e) { + LOGGER.warn(String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName()), e); + cleanupFailedImportVM(userVm); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName())); + } + if(dest == null) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Import failed for Vm: %s. Suitable deployment destination not found", userVm.getInstanceName())); + } + + + Map storage = dest.getStorageForDisks(); + Volume volume = volumeDao.findById(diskProfile.getVolumeId()); + StoragePool storagePool = storage.get(volume); + CheckVolumeCommand checkVolumeCommand = new CheckVolumeCommand(); + checkVolumeCommand.setSrcFile(diskPath); + StorageFilerTO storageTO = new StorageFilerTO(storagePool); + checkVolumeCommand.setStorageFilerTO(storageTO); + Answer answer = agentManager.easySend(dest.getHost().getId(), checkVolumeCommand); + if (!(answer instanceof CheckVolumeAnswer)) { + cleanupFailedImportVM(userVm); + throw new CloudRuntimeException("Disk not found or is invalid"); + } + CheckVolumeAnswer checkVolumeAnswer = (CheckVolumeAnswer) answer; + if(!checkVolumeAnswer.getResult()) { + cleanupFailedImportVM(userVm); + throw new CloudRuntimeException("Disk not found or is invalid"); + } + diskProfile.setSize(checkVolumeAnswer.getSize()); + + + List> diskProfileStoragePoolList = new ArrayList<>(); + try { + long deviceId = 1L; + if(ImportSource.SHARED == importSource) { + diskProfileStoragePoolList.add(importKVMSharedDisk(userVm, diskOffering, Volume.Type.ROOT, + template, deviceId, poolId, diskPath, diskProfile)); + } else if(ImportSource.LOCAL == importSource) { + diskProfileStoragePoolList.add(importKVMLocalDisk(userVm, diskOffering, Volume.Type.ROOT, + template, deviceId, hostId, diskPath, diskProfile)); + } + } catch (Exception e) { + LOGGER.error(String.format("Failed to import volumes while importing vm: %s", instanceName), e); + cleanupFailedImportVM(userVm); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import volumes while importing vm: %s. %s", instanceName, StringUtils.defaultString(e.getMessage()))); + } + networkOrchestrationService.importNic(macAddress,0,network, true, userVm, requestedIpPair, true); + publishVMUsageUpdateResourceCount(userVm, serviceOffering); + return userVm; + } + + + private NetworkVO getDefaultNetwork(DataCenter zone, Account owner, boolean selectAny) throws InsufficientCapacityException, ResourceAllocationException { + NetworkVO defaultNetwork = null; + + // if no network is passed in + // Check if default virtual network offering has + // Availability=Required. If it's true, search for corresponding + // network + // * if network is found, use it. If more than 1 virtual network is + // found, throw an error + // * if network is not found, create a new one and use it + + List requiredOfferings = networkOfferingDao.listByAvailability(NetworkOffering.Availability.Required, false); + if (requiredOfferings.size() < 1) { + throw new InvalidParameterValueException("Unable to find network offering with availability=" + NetworkOffering.Availability.Required + + " to automatically create the network as a part of vm creation"); + } + + if (requiredOfferings.get(0).getState() == NetworkOffering.State.Enabled) { + // get Virtual networks + List virtualNetworks = networkModel.listNetworksForAccount(owner.getId(), zone.getId(), Network.GuestType.Isolated); + if (virtualNetworks == null) { + throw new InvalidParameterValueException("No (virtual) networks are found for account " + owner); + } + if (virtualNetworks.isEmpty()) { + defaultNetwork = createDefaultNetworkForAccount(zone, owner, requiredOfferings); + } else if (virtualNetworks.size() > 1 && !selectAny) { + throw new InvalidParameterValueException("More than 1 default Isolated networks are found for account " + owner + "; please specify networkIds"); + } else { + defaultNetwork = networkDao.findById(virtualNetworks.get(0).getId()); + } + } else { + throw new InvalidParameterValueException("Required network offering id=" + requiredOfferings.get(0).getId() + " is not in " + NetworkOffering.State.Enabled); + } + + return defaultNetwork; + } + + private NetworkVO createDefaultNetworkForAccount(DataCenter zone, Account owner, List requiredOfferings) + throws InsufficientCapacityException, ResourceAllocationException { + NetworkVO defaultNetwork = null; + long physicalNetworkId = networkModel.findPhysicalNetworkId(zone.getId(), requiredOfferings.get(0).getTags(), requiredOfferings.get(0).getTrafficType()); + // Validate physical network + PhysicalNetwork physicalNetwork = physicalNetworkDao.findById(physicalNetworkId); + if (physicalNetwork == null) { + throw new InvalidParameterValueException("Unable to find physical network with id: " + physicalNetworkId + " and tag: " + + requiredOfferings.get(0).getTags()); + } + LOGGER.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of deployVM process"); + Network newNetwork = networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", + null, null, null, false, null, owner, null, physicalNetwork, zone.getId(), ControlledEntity.ACLType.Account, null, null, null, null, true, null, null, + null, null, null, null, null, null, null, null); + if (newNetwork != null) { + defaultNetwork = networkDao.findById(newNetwork.getId()); + } + return defaultNetwork; + } + + //generate unit test + public ListResponse listVmsForImport(ListVmsForImportCmd cmd) { + final Account caller = CallContext.current().getCallingAccount(); + if (caller.getType() != Account.Type.ADMIN) { + throw new PermissionDeniedException(String.format("Cannot perform this operation, Calling account is not root admin: %s", caller.getUuid())); + } + final Long zoneId = cmd.getZoneId(); + final DataCenterVO zone = dataCenterDao.findById(zoneId); + if (zone == null) { + throw new InvalidParameterValueException("Please specify a valid zone."); + } + final String hypervisorType = cmd.getHypervisor(); + if (Hypervisor.HypervisorType.KVM.toString().equalsIgnoreCase(hypervisorType)) { + if (StringUtils.isBlank(cmd.getUsername())) { + throw new InvalidParameterValueException("Username need to be provided."); + } + } else { + throw new InvalidParameterValueException(String.format("VM Import is currently not supported for hypervisor: %s", hypervisorType)); + } + + String keyword = cmd.getKeyword(); + if (StringUtils.isNotEmpty(keyword)) { + keyword = keyword.toLowerCase(); + } + + List responses = new ArrayList<>(); + HashMap vmMap = getRemoteVms(zoneId, cmd.getHost(), cmd.getUsername(), cmd.getPassword()); + for (String key : vmMap.keySet()) { + UnmanagedInstanceTO instance = vmMap.get(key); + if (StringUtils.isNotEmpty(keyword) && + !instance.getName().toLowerCase().contains(keyword)) { + continue; + } + responses.add(createUnmanagedInstanceResponse(instance, null, null)); + } + + ListResponse listResponses = new ListResponse<>(); + listResponses.setResponses(responses, responses.size()); + return listResponses; + } + + private HashMap getRemoteVms(long zoneId, String remoteUrl, String username, String password) { + //ToDo: add option to list one Vm by name + List hosts = resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.KVM, zoneId); + if(hosts.size() < 1) { + throw new CloudRuntimeException("No hosts available for Vm Import"); + } + HostVO host = hosts.get(0); + GetRemoteVmsCommand getRemoteVmsCommand = new GetRemoteVmsCommand(remoteUrl, username, password); + Answer answer = agentManager.easySend(host.getId(), getRemoteVmsCommand); + if (!(answer instanceof GetRemoteVmsAnswer)) { + throw new CloudRuntimeException("Error while listing remote Vms"); + } + GetRemoteVmsAnswer getRemoteVmsAnswer = (GetRemoteVmsAnswer) answer; + return getRemoteVmsAnswer.getUnmanagedInstances(); + } + + private String getInternalName(long accounId) { + String instanceSuffix = configurationDao.getValue(Config.InstanceName.key()); + if (instanceSuffix == null) { + instanceSuffix = "DEFAULT"; + } + long vmId = userVmDao.getNextInSequence(Long.class, "id"); + return VirtualMachineName.getVmName(vmId, accounId, instanceSuffix); + } + @Override public String getConfigComponentName() { return UnmanagedVMsManagerImpl.class.getSimpleName(); @@ -1750,6 +2506,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] { UnmanageVMPreserveNic }; + return new ConfigKey[]{UnmanageVMPreserveNic}; } } diff --git a/server/src/test/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImplTest.java b/server/src/test/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImplTest.java index 1a66f1ea9cd..e7831998353 100644 --- a/server/src/test/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImplTest.java @@ -19,8 +19,14 @@ package org.apache.cloudstack.vm; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CheckVolumeAnswer; +import com.cloud.agent.api.CheckVolumeCommand; import com.cloud.agent.api.ConvertInstanceAnswer; import com.cloud.agent.api.ConvertInstanceCommand; +import com.cloud.agent.api.CopyRemoteVolumeAnswer; +import com.cloud.agent.api.CopyRemoteVolumeCommand; +import com.cloud.agent.api.GetRemoteVmsAnswer; +import com.cloud.agent.api.GetRemoteVmsCommand; import com.cloud.agent.api.GetUnmanagedInstancesAnswer; import com.cloud.agent.api.GetUnmanagedInstancesCommand; import com.cloud.agent.api.to.DataStoreTO; @@ -32,9 +38,12 @@ import com.cloud.dc.VmwareDatacenterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.VmwareDatacenterDao; +import com.cloud.deploy.DeployDestination; +import com.cloud.deploy.DeploymentPlanningManager; import com.cloud.event.ActionEventUtils; import com.cloud.event.UsageEventUtils; import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.PermissionDeniedException; @@ -50,6 +59,7 @@ import com.cloud.network.Network; import com.cloud.network.NetworkModel; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; +import com.cloud.offering.NetworkOffering; import com.cloud.offering.ServiceOffering; import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceState; @@ -59,8 +69,11 @@ import com.cloud.storage.DataStoreRole; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.ScopeType; import com.cloud.storage.Storage; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.Volume; import com.cloud.storage.VolumeApiService; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; @@ -79,7 +92,9 @@ import com.cloud.user.UserVO; import com.cloud.user.dao.UserDao; import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; +import com.cloud.utils.db.EntityManager; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.DiskProfile; import com.cloud.vm.NicProfile; import com.cloud.vm.NicVO; import com.cloud.vm.UserVmManager; @@ -96,6 +111,8 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.command.admin.vm.ImportUnmanagedInstanceCmd; import org.apache.cloudstack.api.command.admin.vm.ImportVmCmd; import org.apache.cloudstack.api.command.admin.vm.ListUnmanagedInstancesCmd; +import org.apache.cloudstack.api.command.admin.vm.ListVmsForImportCmd; +import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; @@ -123,10 +140,12 @@ import org.mockito.junit.MockitoJUnitRunner; import java.net.URI; import java.util.ArrayList; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.UUID; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyMap; @@ -148,6 +167,10 @@ public class UnmanagedVMsManagerImplTest { @Mock private ClusterDao clusterDao; @Mock + private ClusterVO clusterVO; + @Mock + private UserVmVO userVm; + @Mock private ResourceManager resourceManager; @Mock private VMTemplatePoolDao templatePoolDao; @@ -212,6 +235,10 @@ public class UnmanagedVMsManagerImplTest { private VMInstanceVO virtualMachine; @Mock private NicVO nicVO; + @Mock + EntityManager entityMgr; + @Mock + DeploymentPlanningManager deploymentPlanningManager; private static final long virtualMachineId = 1L; @@ -275,6 +302,7 @@ public class UnmanagedVMsManagerImplTest { hosts.add(hostVO); when(hostVO.checkHostServiceOfferingTags(Mockito.any())).thenReturn(true); when(resourceManager.listHostsInClusterByStatus(Mockito.anyLong(), Mockito.any(Status.class))).thenReturn(hosts); + when(resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(any(Hypervisor.HypervisorType.class), Mockito.anyLong())).thenReturn(hosts); List templates = new ArrayList<>(); when(templatePoolDao.listAll()).thenReturn(templates); List volumes = new ArrayList<>(); @@ -284,6 +312,9 @@ public class UnmanagedVMsManagerImplTest { map.put(instance.getName(), instance); Answer answer = new GetUnmanagedInstancesAnswer(cmd, "", map); when(agentManager.easySend(Mockito.anyLong(), Mockito.any(GetUnmanagedInstancesCommand.class))).thenReturn(answer); + GetRemoteVmsCommand remoteVmListcmd = Mockito.mock(GetRemoteVmsCommand.class); + Answer remoteVmListAnswer = new GetRemoteVmsAnswer(remoteVmListcmd, "", map); + when(agentManager.easySend(Mockito.anyLong(), any(GetRemoteVmsCommand.class))).thenReturn(remoteVmListAnswer); DataCenterVO zone = Mockito.mock(DataCenterVO.class); when(zone.getId()).thenReturn(1L); when(dataCenterDao.findById(Mockito.anyLong())).thenReturn(zone); @@ -323,7 +354,7 @@ public class UnmanagedVMsManagerImplTest { when(userVmManager.importVM(nullable(DataCenter.class), nullable(Host.class), nullable(VirtualMachineTemplate.class), nullable(String.class), nullable(String.class), nullable(Account.class), nullable(String.class), nullable(Account.class), nullable(Boolean.class), nullable(String.class), nullable(Long.class), nullable(Long.class), nullable(ServiceOffering.class), nullable(String.class), - nullable(String.class), nullable(Hypervisor.HypervisorType.class), nullable(Map.class), nullable(VirtualMachine.PowerState.class))).thenReturn(userVm); + nullable(String.class), nullable(Hypervisor.HypervisorType.class), nullable(Map.class), nullable(VirtualMachine.PowerState.class), nullable(LinkedHashMap.class))).thenReturn(userVm); NetworkVO networkVO = Mockito.mock(NetworkVO.class); when(networkVO.getGuestType()).thenReturn(Network.GuestType.L2); when(networkVO.getBroadcastUri()).thenReturn(URI.create(String.format("vlan://%d", instanceNic.getVlan()))); @@ -426,19 +457,71 @@ public class UnmanagedVMsManagerImplTest { @Test(expected = UnsupportedServiceException.class) public void unmanageVMInstanceExistingVMSnapshotsTest() { + when(virtualMachine.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.None); unmanagedVMsManager.unmanageVMInstance(virtualMachineId); } @Test(expected = UnsupportedServiceException.class) public void unmanageVMInstanceExistingVolumeSnapshotsTest() { + when(virtualMachine.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.None); unmanagedVMsManager.unmanageVMInstance(virtualMachineId); } @Test(expected = UnsupportedServiceException.class) public void unmanageVMInstanceExistingISOAttachedTest() { + when(virtualMachine.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.None); unmanagedVMsManager.unmanageVMInstance(virtualMachineId); } + @Test + public void testListRemoteInstancesTest() { + ListVmsForImportCmd cmd = Mockito.mock(ListVmsForImportCmd.class); + when(cmd.getHypervisor()).thenReturn(Hypervisor.HypervisorType.KVM.toString()); + when(cmd.getUsername()).thenReturn("user"); + when(cmd.getPassword()).thenReturn("pass"); + ListResponse response = unmanagedVMsManager.listVmsForImport(cmd); + Assert.assertEquals(1, response.getCount().intValue()); + } + + @Test(expected = InvalidParameterValueException.class) + public void testListRemoteInstancesTestNonKVM() { + ListVmsForImportCmd cmd = Mockito.mock(ListVmsForImportCmd.class); + unmanagedVMsManager.listVmsForImport(cmd); + } + @Test + public void testImportFromExternalTest() throws InsufficientServerCapacityException { + String vmname = "TestInstance"; + ImportVmCmd cmd = Mockito.mock(ImportVmCmd.class); + when(cmd.getHypervisor()).thenReturn(Hypervisor.HypervisorType.KVM.toString()); + when(cmd.getName()).thenReturn(vmname); + when(cmd.getUsername()).thenReturn("user"); + when(cmd.getPassword()).thenReturn("pass"); + when(cmd.getImportSource()).thenReturn("external"); + when(cmd.getDomainId()).thenReturn(null); + VMTemplateVO template = Mockito.mock(VMTemplateVO.class); + when(templateDao.findByName(anyString())).thenReturn(template); + HostVO host = Mockito.mock(HostVO.class); + when(userVmDao.getNextInSequence(Long.class, "id")).thenReturn(1L); + DeployDestination mockDest = Mockito.mock(DeployDestination.class); + when(deploymentPlanningManager.planDeployment(any(), any(), any(), any())).thenReturn(mockDest); + DiskProfile diskProfile = Mockito.mock(DiskProfile.class); + when(volumeManager.allocateRawVolume(any(), any(), any(), any(), any(), any(), any(), any(), any(), any())) + .thenReturn(diskProfile); + Map storage = new HashMap<>(); + VolumeVO volume = Mockito.mock(VolumeVO.class); + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + storage.put(volume, storagePool); + when(mockDest.getStorageForDisks()).thenReturn(storage); + when(mockDest.getHost()).thenReturn(host); + when(volumeDao.findById(anyLong())).thenReturn(volume); + CopyRemoteVolumeAnswer copyAnswer = Mockito.mock(CopyRemoteVolumeAnswer.class); + when(copyAnswer.getResult()).thenReturn(true); + when(agentManager.easySend(anyLong(), any(CopyRemoteVolumeCommand.class))).thenReturn(copyAnswer); + try (MockedStatic ignored = Mockito.mockStatic(UsageEventUtils.class)) { + unmanagedVMsManager.importVm(cmd); + } + } + private void baseBasicParametersCheckForImportInstance(String name, Long domainId, String accountName) { unmanagedVMsManager.basicParametersCheckForImportInstance(name, domainId, accountName); } @@ -518,7 +601,7 @@ public class UnmanagedVMsManagerImplTest { when(importVmCmd.getClusterId()).thenReturn(clusterId); when(importVmCmd.getDomainId()).thenReturn(null); when(importVmCmd.getImportSource()).thenReturn(VmImportService.ImportSource.VMWARE.toString()); - when(importVmCmd.getHost()).thenReturn(host); + when(importVmCmd.getHostIp()).thenReturn(host); when(importVmCmd.getNicNetworkList()).thenReturn(Map.of("NIC 1", networkId)); when(importVmCmd.getConvertInstanceHostId()).thenReturn(null); when(importVmCmd.getConvertStoragePoolId()).thenReturn(null); @@ -544,9 +627,6 @@ public class UnmanagedVMsManagerImplTest { if (selectConvertHost) { when(importVmCmd.getConvertInstanceHostId()).thenReturn(convertHostId); when(hostDao.findById(convertHostId)).thenReturn(convertHost); - } else { - when(hostDao.listByClusterAndHypervisorType(clusterId, Hypervisor.HypervisorType.KVM)) - .thenReturn(List.of(convertHost)); } DataStoreTO dataStoreTO = mock(DataStoreTO.class); @@ -613,6 +693,57 @@ public class UnmanagedVMsManagerImplTest { } @Test + public void testImportFromLocalDisk() throws InsufficientServerCapacityException { + testImportFromDisk("local"); + } + + @Test + public void testImportFromsharedStorage() throws InsufficientServerCapacityException { + testImportFromDisk("shared"); + } + + private void testImportFromDisk(String source) throws InsufficientServerCapacityException { + String vmname = "testVm"; + ImportVmCmd cmd = Mockito.mock(ImportVmCmd.class); + when(cmd.getHypervisor()).thenReturn(Hypervisor.HypervisorType.KVM.toString()); + when(cmd.getName()).thenReturn(vmname); + when(cmd.getImportSource()).thenReturn(source); + when(cmd.getDiskPath()).thenReturn("/var/lib/libvirt/images/test.qcow2"); + when(cmd.getDomainId()).thenReturn(null); + VMTemplateVO template = Mockito.mock(VMTemplateVO.class); + when(templateDao.findByName(anyString())).thenReturn(template); + HostVO host = Mockito.mock(HostVO.class); + when(hostDao.findById(anyLong())).thenReturn(host); + NetworkOffering netOffering = Mockito.mock(NetworkOffering.class); + when(entityMgr.findById(NetworkOffering.class, 0L)).thenReturn(netOffering); + when(userVmDao.getNextInSequence(Long.class, "id")).thenReturn(1L); + DeployDestination mockDest = Mockito.mock(DeployDestination.class); + when(deploymentPlanningManager.planDeployment(any(), any(), any(), any())).thenReturn(mockDest); + DiskProfile diskProfile = Mockito.mock(DiskProfile.class); + when(volumeManager.allocateRawVolume(any(), any(), any(), any(), any(), any(), any(), any(), any(), any())) + .thenReturn(diskProfile); + Map storage = new HashMap<>(); + VolumeVO volume = Mockito.mock(VolumeVO.class); + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + storage.put(volume, storagePool); + when(mockDest.getStorageForDisks()).thenReturn(storage); + when(mockDest.getHost()).thenReturn(host); + when(volumeDao.findById(anyLong())).thenReturn(volume); + CheckVolumeAnswer answer = Mockito.mock(CheckVolumeAnswer.class); + when(answer.getResult()).thenReturn(true); + when(agentManager.easySend(anyLong(), any(CheckVolumeCommand.class))).thenReturn(answer); + List storagePools = new ArrayList<>(); + storagePools.add(storagePool); + when(primaryDataStoreDao.findLocalStoragePoolsByHostAndTags(anyLong(), any())).thenReturn(storagePools); + when(primaryDataStoreDao.findById(anyLong())).thenReturn(storagePool); + when(volumeApiService.doesTargetStorageSupportDiskOffering(any(StoragePool.class), any())).thenReturn(true); + StoragePoolHostVO storagePoolHost = Mockito.mock(StoragePoolHostVO.class); + when(storagePoolHostDao.findByPoolHost(anyLong(), anyLong())).thenReturn(storagePoolHost); + try (MockedStatic ignored = Mockito.mockStatic(UsageEventUtils.class)) { + unmanagedVMsManager.importVm(cmd); + } + } + public void testImportVmFromVmwareToKvmExistingVcenter() throws OperationTimedoutException, AgentUnavailableException { baseTestImportVmFromVmwareToKvm(VcenterParameter.EXISTING, false, false); } diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index 5b164466d4c..b971d244941 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -269,7 +269,9 @@ known_categories = { 'createBucket': 'Object Store', 'updateBucket': 'Object Store', 'deleteBucket': 'Object Store', - 'listBuckets': 'Object Store' + 'listBuckets': 'Object Store', + 'listVmsForImport': 'Virtual Machine', + 'importVm': 'Virtual Machine' } diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 61fe69e7edb..7de4c709225 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -138,6 +138,7 @@ "label.action.image.store.read.only": "Make image store read-only", "label.action.image.store.read.write": "Make image store read-write", "label.action.import.export.instances": "Import-Export Instances", +"label.action.ingest.instances": "Ingest instances", "label.action.iso.permission": "Update ISO permissions", "label.action.iso.share": "Update ISO sharing", "label.action.lock.account": "Lock Account", @@ -673,7 +674,11 @@ "label.deployasis": "Read Instance settings from OVA", "label.deploymentplanner": "Deployment planner", "label.desc.db.stats": "Database Statistics", -"label.desc.importexportinstancewizard": "Import and export Instances to/from an existing VMware cluster.", +"label.desc.importexportinstancewizard": "Import and export Instances to/from an existing VMware or KVM cluster.", +"label.desc.import.ext.kvm.wizard": "Import libvirt domain from KVM Host", +"label.desc.import.local.kvm.wizard": "Import QCOW image from Local Storage", +"label.desc.import.shared.kvm.wizard": "Import QCOW image from Shared Storage", +"label.desc.ingesttinstancewizard": "Ingest instances from an external KVM host", "label.desc.importmigratefromvmwarewizard": "Import instances from VMware into a KVM cluster", "label.desc.usage.stats": "Usage Server Statistics", "label.description": "Description", @@ -683,10 +688,10 @@ "label.desthost": "Destination host", "label.destination": "Destination", "label.destination.cluster": "Destination Cluster", -"label.destination.hypervisor": "Destination Hypervisor", "label.destination.pod": "Destination Pod", "label.destination.zone": "Destination Zone", "label.destinationphysicalnetworkid": "Destination physical Network ID", +"label.destination.hypervisor": "Destination Hypervisor", "label.destinationtype": "Destination Type", "label.destipprefix": "Destination Network Address", "label.destipprefixlen": "Destination Prefix Length", @@ -717,6 +722,8 @@ "label.disconnected": "Last disconnected", "label.disk": "Disk", "label.disk.offerings": "Disk offerings", +"label.disk.path": "Disk Path", +"label.disk.tooltip": "Disk Image filename in the selected Storage Pool", "label.disk.selection": "Disk selection", "label.disk.size": "Disk size", "label.disk.usage.info": "Disk usage information", @@ -866,6 +873,7 @@ "label.expunged": "Expunged", "label.expunging": "Expunging", "label.export.rules": "Export Rules", +"label.ext.hostname.tooltip": "External Host Name or IP Address", "label.external.managed": "ExternalManaged", "label.external": "External", "label.external.link": "External link", @@ -876,6 +884,7 @@ "label.f5.ip.loadbalancer": "F5 BIG-IP load balancer.", "label.failed": "Failed", "label.featured": "Featured", +"label.fetch.instances": "Fetch Instances", "label.fetch.latest": "Fetch latest", "label.files": "Alternate files to retrieve", "label.filter": "Filter", @@ -977,6 +986,7 @@ "label.hostcontrolstate": "Control Plane Status", "label.hostid": "Host", "label.hostname": "Host", +"label.hostname.tooltip": "Destination Host. Volume should be located in local storage of this Host", "label.hostnamelabel": "Host name", "label.hosts": "Hosts", "label.hosttags": "Host tags", @@ -1013,6 +1023,7 @@ "label.info": "Info", "label.info.upper": "INFO", "label.infrastructure": "Infrastructure", +"label.ingest.instance": "Ingest Instance", "label.ingress": "Ingress", "label.ingress.rule": "Ingress Rule", "label.initial": "Inital", @@ -1503,6 +1514,7 @@ "label.password": "Password", "label.password.default": "Default Password", "label.password.reset.confirm": "Password has been reset to ", +"label.password.tooltip": "The password for the Host", "label.passwordenabled": "Password enabled", "label.path": "Path", "label.patp": "Palo Alto threat profile", @@ -1677,6 +1689,7 @@ "label.release.dedicated.pod": "Release dedicated pod", "label.release.dedicated.zone": "Release dedicated zone", "label.releasing.ip": "Releasing IP", +"label.remote.instances": "Remote Instances", "label.remove": "Remove", "label.remove.annotation": "Remove comment", "label.remove.egress.rule": "Remove egress rule", @@ -1802,6 +1815,7 @@ "label.scheduled.snapshots": "Scheduled Snapshots", "label.schedules": "Schedules", "label.scope": "Scope", +"label.scope.tooltip": "Primary Storage Pool Scope", "label.search": "Search", "label.secondary.isolated.vlan.type.isolated": "Isolated", "label.secondary.isolated.vlan.type.promiscuous": "Promiscuous", @@ -1831,6 +1845,7 @@ "label.select.project": "Select project", "label.select.projects": "Select projects", "label.select.ps": "Select primary storage", +"label.select.root.disk": "Select the ROOT disk", "label.select.source.vcenter.datacenter": "Select the source VMware vCenter Datacenter", "label.select.tier": "Select Network Tier", "label.select.zones": "Select zones", @@ -1986,6 +2001,7 @@ "label.storagemotionenabled": "Storage motion enabled", "label.storagepolicy": "Storage policy", "label.storagepool": "Storage pool", +"label.storagepool.tooltip": "Destination Storage Pool. Volume should be located in this Storage Pool", "label.storagetags": "Storage tags", "label.storagetype": "Storage type", "label.strict": "Strict", @@ -2075,6 +2091,8 @@ "label.timeout": "Timeout", "label.timeout.in.second ": " Timeout (seconds)", "label.timezone": "Timezone", +"label.tmppath": "Temp Path", +"label.tmppath.tooltip": "Temporary Path to store disk images on External Host before copying to destination storage pool. Default is /tmp", "label.to": "to", "label.token": "Token", "label.token.for.dashboard.login": "Token for dashboard login can be retrieved using following command", @@ -2180,6 +2198,7 @@ "label.userdata": "Userdata", "label.userdatal2": "User data", "label.username": "Username", +"label.username.tooltip": "The Username for the Host", "label.users": "Users", "label.usersource": "User type", "label.using.cli": "Using CLI", @@ -2678,6 +2697,10 @@ "message.desc.create.ssh.key.pair": "Please fill in the following data to create or register a ssh key pair.

    (1) If public key is set, CloudStack will register the public key. You can use it through your private key.

    (2) If public key is not set, CloudStack will create a new SSH key pair. In this case, please copy and save the private key. CloudStack will not keep it.
    ", "message.desc.created.ssh.key.pair": "Created a SSH key pair.", "message.desc.host": "Each cluster must contain at least one host (computer) for guest Instances to run on. We will add the first host now. For a host to function in CloudStack, you must install hypervisor software on the host, assign an IP address to the host, and ensure the host is connected to the CloudStack management server.

    Give the host's DNS or IP address, the user name (usually root) and password, and any labels you use to categorize hosts.", +"message.desc.importingestinstancewizard": "This feature only applies to libvirt based KVM instances. Only Stopped instances can be ingested", +"message.desc.import.ext.kvm.wizard": "Import libvirt domain from External KVM Host not managed by CloudStack", +"message.desc.import.local.kvm.wizard": "Import QCOW image from Local Storage of selected KVM Host", +"message.desc.import.shared.kvm.wizard": "Import QCOW image from selected Primary Storage Pool", "message.desc.importexportinstancewizard": "By choosing to manage an Instance, CloudStack takes over the orchestration of that Instance. Unmanaging an Instance removes CloudStack ability to manage it. In both cases, the Instance is left running and no changes are done to the VM on the hypervisor.

    For KVM, managing a VM is an experimental feature.", "message.desc.importmigratefromvmwarewizard": "By selecting an existing or external VMware Datacenter and an instance to import, CloudStack migrates the selected instance from VMware to KVM on a conversion host using virt-v2v and imports it into a KVM cluster", "message.desc.primary.storage": "Each cluster must contain one or more primary storage servers. We will add the first one now. Primary storage contains the disk volumes for all the Instances running on hosts in the cluster. Use any standards-compliant protocol that is supported by the underlying hypervisor.", diff --git a/ui/src/config/section/compute.js b/ui/src/config/section/compute.js index 16349668650..b56c8eeead9 100644 --- a/ui/src/config/section/compute.js +++ b/ui/src/config/section/compute.js @@ -424,7 +424,7 @@ export default { label: 'label.action.unmanage.virtualmachine', message: 'message.action.unmanage.virtualmachine', dataView: true, - show: (record) => { return ['Running', 'Stopped'].includes(record.state) && record.hypervisor === 'VMware' } + show: (record) => { return ['Running', 'Stopped'].includes(record.state) && ['VMware', 'KVM'].includes(record.hypervisor) } }, { api: 'expungeVirtualMachine', diff --git a/ui/src/views/compute/wizard/ComputeOfferingSelection.vue b/ui/src/views/compute/wizard/ComputeOfferingSelection.vue index 2887a415e57..4450ce1144c 100644 --- a/ui/src/views/compute/wizard/ComputeOfferingSelection.vue +++ b/ui/src/views/compute/wizard/ComputeOfferingSelection.vue @@ -173,7 +173,7 @@ export default { disabled = true } if (disabled === false && maxMemory && this.minimumMemory > 0 && - ((item.iscustomized === false && maxMemory < this.minimumMemory) || + ((item.iscustomized === false && ((maxMemory < this.minimumMemory) || this.exactMatch && maxMemory !== this.minimumMemory)) || (item.iscustomized === true && maxMemory < this.minimumMemory))) { disabled = true } diff --git a/ui/src/views/compute/wizard/MultiDiskSelection.vue b/ui/src/views/compute/wizard/MultiDiskSelection.vue index 5dd8466279f..8344508ad33 100644 --- a/ui/src/views/compute/wizard/MultiDiskSelection.vue +++ b/ui/src/views/compute/wizard/MultiDiskSelection.vue @@ -31,7 +31,7 @@ {{ record.displaytext || record.name }}
    - {{ meta.key + ': ' + meta.value }} + {{ meta.key + ': ' + meta.value }}
    @@ -104,6 +104,10 @@ export default { autoSelectLabel: { type: String, default: '' + }, + isKVMUnmanage: { + type: Boolean, + default: false } }, data () { diff --git a/ui/src/views/tools/ImportUnmanagedInstance.vue b/ui/src/views/tools/ImportUnmanagedInstance.vue index 86decf75d57..6433e68da57 100644 --- a/ui/src/views/tools/ImportUnmanagedInstance.vue +++ b/ui/src/views/tools/ImportUnmanagedInstance.vue @@ -19,7 +19,7 @@
    - + - + @@ -120,7 +120,7 @@ :value="templateType" @change="changeTemplateType"> - + {{ $t('label.template.temporary.import') }} @@ -235,7 +235,7 @@ - + + @change="onSelectRootDisk"> {{ opt.label || opt.id }} + + +
    @@ -283,17 +299,40 @@ :zoneId="cluster.zoneid" :selectionEnabled="false" :filterUnimplementedNetworks="true" - filterMatchKey="broadcasturi" :hypervisor="this.cluster.hypervisortype" + :filterMatchKey="isKVMUnmanage ? undefined : 'broadcasturi'" @select-multi-network="updateMultiNetworkOffering" />
    - - - - + +
    + + + +
    +
    + + + + + + {{ network.label }} + + + + +
    @@ -358,6 +397,26 @@ export default { type: Object, required: true }, + host: { + type: Object, + required: true + }, + pool: { + type: Object, + required: true + }, + resource: { + type: Object, + required: true + }, + isOpen: { + type: Boolean, + required: false + }, + zoneid: { + type: String, + required: false + }, importsource: { type: String, required: false @@ -366,12 +425,24 @@ export default { type: String, required: false }, - resource: { - type: Object, - required: true + exthost: { + type: String, + required: false }, - isOpen: { - type: Boolean, + username: { + type: String, + required: false + }, + password: { + type: String, + required: false + }, + tmppath: { + type: String, + required: false + }, + diskpath: { + type: String, required: false }, selectedVmwareVcenter: { @@ -384,12 +455,14 @@ export default { options: { domains: [], projects: [], + networks: [], templates: [] }, rowCount: {}, optionsLoading: { domains: false, projects: false, + networks: false, templates: false }, domains: [], @@ -397,7 +470,7 @@ export default { selectedDomainId: null, templates: [], templateLoading: false, - templateType: 'auto', + templateType: this.defaultTemplateType(), totalComputeOfferings: 0, computeOfferings: [], computeOfferingLoading: false, @@ -426,7 +499,15 @@ export default { storagePoolsForConversion: [], selectedStorageOptionForConversion: null, selectedStoragePoolForConversion: null, - showStoragePoolsForConversion: false + showStoragePoolsForConversion: false, + selectedRootDiskColumns: [ + { + key: 'name', + dataIndex: 'name', + title: this.$t('label.rootdisk') + } + ], + selectedRootDiskSources: [] } }, beforeCreate () { @@ -461,6 +542,15 @@ export default { showicon: true } }, + networks: { + list: 'listNetworks', + isLoad: true, + field: 'networkid', + options: { + zoneid: this.zoneid, + details: 'min' + } + }, templates: { list: 'listTemplates', isLoad: true, @@ -479,6 +569,21 @@ export default { } return false }, + isDiskImport () { + if (this.importsource === 'local' || this.importsource === 'shared') { + return true + } + return false + }, + isExternalImport () { + if (this.importsource === 'external') { + return true + } + return false + }, + isKVMUnmanage () { + return this.hypervisor && this.hypervisor === 'kvm' && (this.importsource === 'unmanaged' || this.importsource === 'external') + }, domainSelectOptions () { var domains = this.options.domains.map((domain) => { return { @@ -507,6 +612,19 @@ export default { }) return projects }, + networkSelectOptions () { + var networks = this.options.networks.map((network) => { + return { + label: network.name + ' (' + network.displaytext + ')', + value: network.id + } + }) + networks.unshift({ + label: '', + value: null + }) + return networks + }, templateSelectOptions () { return this.options.templates.map((template) => { return { @@ -540,6 +658,9 @@ export default { var nic = { ...nicEntry } nic.name = nic.name || nic.id nic.displaytext = nic.name + if (this.isExternalImport && nic.vlanid === -1) { + delete nic.vlanid + } if (nic.vlanid) { nic.broadcasturi = 'vlan://' + nic.vlanid if (nic.isolatedpvlan) { @@ -592,6 +713,9 @@ export default { page: 1 }) this.fetchKvmHostsForConversion() + if (this.resource.disk.length > 1) { + this.updateSelectedRootDisk() + } }, getMeta (obj, metaKeys) { var meta = [] @@ -724,6 +848,12 @@ export default { updateMultiNetworkOffering (data) { this.nicsNetworksMapping = data }, + defaultTemplateType () { + if (this.cluster.hypervisortype === 'VMWare') { + return 'auto' + } + return 'custom' + }, changeTemplateType (e) { this.templateType = e.target.value if (this.templateType === 'auto') { @@ -834,6 +964,17 @@ export default { } ] }, + onSelectRootDisk (val) { + this.selectedRootDiskIndex = val + this.updateSelectedRootDisk() + }, + updateSelectedRootDisk () { + var rootDisk = this.resource.disk[this.selectedRootDiskIndex] + rootDisk.size = rootDisk.capacity / (1024 * 1024 * 1024) + rootDisk.name = `${rootDisk.label} (${rootDisk.size} GB)` + rootDisk.meta = this.getMeta(rootDisk, { controller: 'controller', datastorename: 'datastore', position: 'position' }) + this.selectedRootDiskSources = [rootDisk] + }, handleSubmit (e) { e.preventDefault() if (this.loading) return @@ -843,12 +984,32 @@ export default { name: this.resource.name, clusterid: this.cluster.id, displayname: values.displayname, + zoneid: this.zoneid, importsource: this.importsource, - hypervisor: this.hypervisor + hypervisor: this.hypervisor, + host: this.exthost, + hostname: values.hostname, + username: this.username, + password: this.password, + hostid: this.host.id, + storageid: this.pool.id, + diskpath: this.diskpath, + temppath: this.tmppath } var importapi = 'importUnmanagedInstance' if (this.isExternalImport || this.isDiskImport || this.selectedVmwareVcenter) { importapi = 'importVm' + if (this.isDiskImport) { + if (!values.networkid) { + this.$notification.error({ + message: this.$t('message.request.failed'), + description: this.$t('message.please.enter.valid.value') + ': ' + this.$t('label.network') + }) + return + } + params.name = values.displayname + params.networkid = values.networkid + } } if (!this.computeOffering || !this.computeOffering.id) { this.$notification.error({ @@ -892,6 +1053,16 @@ export default { }) } } + if (this.isDiskImport) { + var storageType = this.computeOffering.storagetype + if (this.importsource !== storageType) { + this.$notification.error({ + message: this.$t('message.request.failed'), + description: 'Incompatible Storage. Import Source is: ' + this.importsource + '. Storage Type in service offering is: ' + storageType + }) + return + } + } if (this.selectedVmwareVcenter) { if (this.selectedVmwareVcenter.existingvcenterid) { params.existingvcenterid = this.selectedVmwareVcenter.existingvcenterid @@ -934,6 +1105,7 @@ export default { } var nicNetworkIndex = 0 var nicIpIndex = 0 + var networkcheck = new Set() for (var nicId in this.nicsNetworksMapping) { if (!this.nicsNetworksMapping[nicId].network) { this.$notification.error({ @@ -944,6 +1116,16 @@ export default { } params['nicnetworklist[' + nicNetworkIndex + '].nic'] = nicId params['nicnetworklist[' + nicNetworkIndex + '].network'] = this.nicsNetworksMapping[nicId].network + var netId = this.nicsNetworksMapping[nicId].network + if (!networkcheck.has(netId)) { + networkcheck.add(netId) + } else { + this.$notification.error({ + message: this.$t('message.request.failed'), + description: 'Same network cannot be assigned to multiple Nics' + }) + return + } nicNetworkIndex++ if ('ipAddress' in this.nicsNetworksMapping[nicId]) { if (!this.nicsNetworksMapping[nicId].ipAddress) { @@ -1010,7 +1192,7 @@ export default { for (var field of fields) { this.updateFieldValue(field, undefined) } - this.templateType = 'auto' + this.templateType = this.defaultTemplateType() this.updateComputeOffering(undefined) this.switches = {} }, @@ -1022,33 +1204,33 @@ export default { diff --git a/ui/src/views/tools/ManageInstances.vue b/ui/src/views/tools/ManageInstances.vue index 96eba539638..fc14f684e72 100644 --- a/ui/src/views/tools/ManageInstances.vue +++ b/ui/src/views/tools/ManageInstances.vue @@ -48,126 +48,278 @@
    - - - - - - - VMware - - - KVM - - - - + + + + + + + + VMware + + + KVM + + + + + + + + {{ opt.label }} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {{ $t('label.clusterid') }} + {{ $t('label.zoneid') }} + + + - + - {{ opt.label }} + + + {{ zoneitem.label }} - - - - - - - - - - - - - - - {{ zoneitem.label }} - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + {{ $t('label.import.instance') }} + + + + + + + + +
    + + {{ $t('label.fetch.instances') }} + +
    - + + + +