StorPool storage plugin (#6007)

* StorPool storage plugin

Adds volume storage plugin for StorPool SDS

* Added support for alternative endpoint

Added option to switch to alternative endpoint for SP primary storage

* renamed all classes from Storpool to StorPool

* Address review

* removed unnecessary else

* Removed check about the storage provider

We don't need this check, we'll get if the snapshot is on StorPool be
its name from path

* Check that current plugin supports all functionality before upgrade CS

* Smoke tests for StorPool plug-in

* Fixed conflicts

* Fixed conflicts and added missed Apache license header

* Removed whitespaces in smoke tests

* Added StorPool plugin jar for Debian

the StorPool jar will be included into cloudstack-agent package for
Debian/Ubuntu
This commit is contained in:
slavkap 2022-04-14 17:12:01 +03:00 committed by GitHub
parent 9067938a0d
commit 4004dfcfd8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
60 changed files with 11084 additions and 5 deletions

View File

@ -97,6 +97,11 @@
<artifactId>cloud-plugin-storage-volume-linstor</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-storpool</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-server</artifactId>
@ -755,6 +760,12 @@
<artifactId>bcpkix-jdk15on</artifactId>
<overWrite>false</overWrite>
<outputDirectory>${project.build.directory}/lib</outputDirectory>
</artifactItem>
<artifactItem>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-storpool</artifactId>
<overWrite>false</overWrite>
<outputDirectory>${project.build.directory}/lib</outputDirectory>
</artifactItem>
<artifactItem>
<groupId>org.bouncycastle</groupId>
@ -799,6 +810,7 @@
<exclude>org.bouncycastle:bcpkix-jdk15on</exclude>
<exclude>org.bouncycastle:bctls-jdk15on</exclude>
<exclude>mysql:mysql-connector-java</exclude>
<exclude>org.apache.cloudstack:cloud-plugin-storage-volume-storpool</exclude>
</excludes>
</artifactSet>
<transformers>

View File

@ -20,6 +20,7 @@
package org.apache.cloudstack.storage.command;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.Command;
import com.cloud.agent.api.to.DataTO;
public class CopyCmdAnswer extends Answer {
@ -37,4 +38,8 @@ public class CopyCmdAnswer extends Answer {
public CopyCmdAnswer(String errMsg) {
super(null, false, errMsg);
}
public CopyCmdAnswer(Command cmd, Exception e) {
super(cmd, e);
}
}

1
debian/rules vendored
View File

@ -41,6 +41,7 @@ override_dh_auto_install:
mkdir $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib
install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-$(VERSION).jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/
install -D plugins/hypervisors/kvm/target/dependencies/* $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/
install -D plugins/storage/volume/storpool/target/cloud-plugin-storage-volume-storpool-$(VERSION).jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/
install -d -m0755 debian/$(PACKAGE)-agent/lib/systemd/system
install -m0644 packaging/systemd/$(PACKAGE)-agent.service debian/$(PACKAGE)-agent/lib/systemd/system/$(PACKAGE)-agent.service

View File

@ -103,4 +103,33 @@ public interface PrimaryDataStoreDriver extends DataStoreDriver {
* returns true if the host can access the storage pool
*/
boolean canHostAccessStoragePool(Host host, StoragePool pool);
/**
* Used by storage pools which want to keep VMs' information
* @return true if additional VM info is needed (intended for storage pools).
*/
boolean isVmInfoNeeded();
/**
* Provides additional info for a VM (intended for storage pools).
* E.g. the storage pool may want to keep/delete information if the volume is attached/detached to any VM.
* @param vmId The ID of the virtual machine
* @param volumeId the ID of the volume
*/
void provideVmInfo(long vmId, long volumeId);
/**
* Returns true if the storage have to know about the VM's tags (intended for storage pools).
* @param tagKey The name of the tag
* @return true if the storage have to know about the VM's tags
*/
boolean isVmTagsNeeded(String tagKey);
/**
* Provide VM's tags to storage (intended for storage pools).
* @param vmId The ID of the virtual machine
* @param volumeId The ID of the volume
* @param tagValue The value of the VM's tag
*/
void provideVmTags(long vmId, long volumeId, String tagValue);
}

View File

@ -39,9 +39,9 @@ public interface StorageStrategyFactory {
/**
* Used only for KVM hypervisors when allocating a VM snapshot
* @param vmId the ID of the virtual machine
* @param rootPoolId volume pool ID
* @param snapshotMemory for VM snapshots with memory
* @return VMSnapshotStrategy
*/
VMSnapshotStrategy getVmSnapshotStrategy(Long vmId, Long rootPoolId, boolean snapshotMemory);
}

View File

@ -30,7 +30,7 @@ public interface VMSnapshotStrategy {
StrategyPriority canHandle(VMSnapshot vmSnapshot);
/**
* Used only for KVM hypervisors when allocating a VM snapshot
* Verifies if the strategy can handle the VM snapshot. This method is used only for KVM hypervisors when allocating a VM snapshot.
* @param vmId the ID of the virtual machine
* @param snapshotMemory for VM snapshots with memory
* @return StrategyPriority

View File

@ -52,8 +52,8 @@ import com.cloud.host.dao.HostDao;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.GuestOSHypervisorVO;
import com.cloud.storage.GuestOSVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.GuestOSDao;
import com.cloud.storage.dao.GuestOSHypervisorDao;

View File

@ -351,6 +351,7 @@ install -D agent/target/transformed/cloudstack-agent-profile.sh ${RPM_BUILD_ROOT
install -D agent/target/transformed/cloudstack-agent.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-agent
install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-%{_maventag}.jar ${RPM_BUILD_ROOT}%{_datadir}/%name-agent/lib/cloud-plugin-hypervisor-kvm-%{_maventag}.jar
cp plugins/hypervisors/kvm/target/dependencies/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib
cp plugins/storage/volume/storpool/target/*.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib
# Usage server
mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/usage

View File

@ -344,6 +344,7 @@ install -D agent/target/transformed/cloudstack-agent-profile.sh ${RPM_BUILD_ROOT
install -D agent/target/transformed/cloudstack-agent.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-agent
install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-%{_maventag}.jar ${RPM_BUILD_ROOT}%{_datadir}/%name-agent/lib/cloud-plugin-hypervisor-kvm-%{_maventag}.jar
cp plugins/hypervisors/kvm/target/dependencies/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib
cp plugins/storage/volume/storpool/target/*.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib
# Usage server
mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/usage

View File

@ -346,6 +346,7 @@ install -D agent/target/transformed/cloudstack-agent-profile.sh ${RPM_BUILD_ROOT
install -D agent/target/transformed/cloudstack-agent.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-agent
install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-%{_maventag}.jar ${RPM_BUILD_ROOT}%{_datadir}/%name-agent/lib/cloud-plugin-hypervisor-kvm-%{_maventag}.jar
cp plugins/hypervisors/kvm/target/dependencies/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib
cp plugins/storage/volume/storpool/target/*.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib
# Usage server
mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/usage

View File

@ -123,6 +123,8 @@
<module>storage/volume/solidfire</module>
<module>storage/volume/scaleio</module>
<module>storage/volume/linstor</module>
<module>storage/volume/storpool</module>
<module>storage-allocators/random</module>
@ -211,4 +213,4 @@
</modules>
</profile>
</profiles>
</project>
</project>

View File

@ -1860,4 +1860,22 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
return true;
}
@Override
public boolean isVmInfoNeeded() {
return false;
}
@Override
public void provideVmInfo(long vmId, long volumeId) {
}
@Override
public boolean isVmTagsNeeded(String tagKey) {
return false;
}
@Override
public void provideVmTags(long vmId, long volumeId, String tagValue) {
}
}

View File

@ -491,4 +491,22 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri
public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
return true;
}
@Override
public boolean isVmInfoNeeded() {
return false;
}
@Override
public void provideVmInfo(long vmId, long volumeId) {
}
@Override
public boolean isVmTagsNeeded(String tagKey) {
return false;
}
@Override
public void provideVmTags(long vmId, long volumeId, String tagValue) {
}
}

View File

@ -765,4 +765,22 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
return true;
}
@Override
public boolean isVmInfoNeeded() {
return false;
}
@Override
public void provideVmInfo(long vmId, long volumeId) {
}
@Override
public boolean isVmTagsNeeded(String tagKey) {
return false;
}
@Override
public void provideVmTags(long vmId, long volumeId, String tagValue) {
}
}

View File

@ -239,4 +239,22 @@ public class NexentaPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
return true;
}
@Override
public boolean isVmInfoNeeded() {
return false;
}
@Override
public void provideVmInfo(long vmId, long volumeId) {
}
@Override
public boolean isVmTagsNeeded(String tagKey) {
return false;
}
@Override
public void provideVmTags(long vmId, long volumeId, String tagValue) {
}
}

View File

@ -265,4 +265,22 @@ public class SamplePrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
return true;
}
@Override
public boolean isVmInfoNeeded() {
return false;
}
@Override
public void provideVmInfo(long vmId, long volumeId) {
}
@Override
public boolean isVmTagsNeeded(String tagKey) {
return false;
}
@Override
public void provideVmTags(long vmId, long volumeId, String tagValue) {
}
}

View File

@ -947,4 +947,22 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
String msg = "SDC not connected on the host: " + host.getId() + ", reconnect the SDC to MDM";
alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC disconnected on host: " + host.getUuid(), msg);
}
@Override
public boolean isVmInfoNeeded() {
return false;
}
@Override
public void provideVmInfo(long vmId, long volumeId) {
}
@Override
public boolean isVmTagsNeeded(String tagKey) {
return false;
}
@Override
public void provideVmTags(long vmId, long volumeId, String tagValue) {
}
}

View File

@ -1619,4 +1619,22 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
return true;
}
@Override
public boolean isVmInfoNeeded() {
return false;
}
@Override
public void provideVmInfo(long vmId, long volumeId) {
}
@Override
public boolean isVmTagsNeeded(String tagKey) {
return false;
}
@Override
public void provideVmTags(long vmId, long volumeId, String tagValue) {
}
}

View File

@ -0,0 +1,344 @@
# StorPool CloudStack Integration
## CloudStack Overview
### Primary and Secondary storage
Primary storage is associated with a cluster or zone, and it stores the virtual disks for all the VMs running on hosts in that cluster/zone.
Secondary storage stores the following:
* Templates — OS images that can be used to boot VMs and can include additional configuration information, such as installed applications
* ISO images — disc images containing data or bootable media for operating systems
* Disk volume snapshots — saved copies of VM data which can be used for data recovery or to create new templates
### ROOT and DATA volumes
ROOT volumes correspond to the boot disk of a VM. They are created automatically by CloudStack during VM creation.
ROOT volumes are created based on a system disk offering, corresponding to the service offering the user VM
is based on. We may change the ROOT volume disk offering but only to another system created disk offering.
DATA volumes correspond to additional disks. These can be created by users and then attached/detached to VMs.
DATA volumes are created based on a user-defined disk offering.
## Plugin Organization
The StorPool plugin consists of two parts:
### KVM hypervisor plugin patch
Source directory: ./apache-cloudstack-4.17-src/plugins/hypervisors/kvm
### StorPool primary storage plugin
Source directory: ./apache-cloudstack-4.17.0-src/plugins/storage/volume
There is one plugin for both the CloudStack management and agents, in the hope that having all the source
in one place will ease development and maintenance. The plugin itself though is separated into two mainly
independent parts:
* ./src/com/... directory tree: agent related classes and commands send from management to agent
* ./src/org/... directory tree: management related classes
The plugin is intended to be self contained and non-intrusive, thus ideally deploying it would consist of only
dropping the jar file into the appropriate places. This is the reason why all StorPool related communication
(ex. data copying, volume resize) is done with StorPool specific commands even when there is a CloudStack command
that does pretty much the same.
Note that for the present the StorPool plugin may only be used for a single primary storage cluster; support for
multiple clusters is planned.
## Build, Install, Setup
### Build
Go to the source directory and run:
mvn -Pdeveloper -DskipTests install
The resulting jar file is located in the target/ subdirectory.
Note: checkstyle errors: before compilation a code style check is performed; if this fails compilation is aborted.
In short: no trailing whitespace, indent using 4 spaces, not tabs, comment-out or remove unused imports.
Note: Need to build both the KVM plugin and the StorPool plugin proper.
### Install
#### StorPool primary storage plugin
For each CloudStack management host:
```bash
scp ./target/cloud-plugin-storage-volume-storpool-{version}.jar {MGMT_HOST}:/usr/share/cloudstack-management/lib/
```
For each CloudStack agent host:
```bash
scp ./target/cloud-plugin-storage-volume-storpool-{version}.jar {AGENT_HOST}:/usr/share/cloudstack-agent/plugins/
```
Note: CloudStack managements/agents services must be restarted after adding the plugin to the respective directories
Note: Agents should have access to the StorPool management API, since attach and detach operations happens on the agent.
This is a CloudStack design issue, can't do much about it.
### Setup
#### Setting up StorPool
Perform the StorPool installation following the StorPool Installation Guide.
Create a template to be used by CloudStack. Must set *placeHead*, *placeAll*, *placeTail* and *replication*.
No need to set default volume size because it is determined by the CloudStack disks and services offering.
#### Setting up a StorPool PRIMARY storage pool in CloudStack
From the WEB UI, go to Infrastructure -> Primary Storage -> Add Primary Storage
Scope: select Zone-Wide
Hypervisor: select KVM
Zone: pick appropriate zone.
Name: user specified name
Protocol: select *SharedMountPoint*
Path: enter */dev/storpool* (required argument, actually not needed in practice).
Provider: select *StorPool*
Managed: leave unchecked (currently ignored)
Capacity Bytes: used for accounting purposes only. May be more or less than the actual StorPool template capacity.
Capacity IOPS: currently not used (may use for max IOPS limitations on volumes from this pool).
URL: enter SP_API_HTTP=address:port;SP_AUTH_TOKEN=token;SP_TEMPLATE=template_name. At present one template can be used for at most one Storage Pool.
SP_API_HTTP - address of StorPool Api
SP_AUTH_TOKEN - StorPool's token
SP_TEMPLATE - name of StorPool's template
Storage Tags: If left blank, the StorPool storage plugin will use the pool name to create a corresponding storage tag.
This storage tag may be used later, when defining service or disk offerings.
## Plugin Functionality
<table cellpadding="5">
<tr>
<th>Plugin Action</th>
<th>CloudStack Action</th>
<th>management/agent</th>
<th>impl. details</th>
</tr>
<tr>
<td>Create ROOT volume from ISO</td>
<td>create VM from ISO</td>
<td>management</td>
<td>createVolumeAsync</td>
</tr>
<tr>
<td>Create ROOT volume from Template</td>
<td>create VM from Template</td>
<td>management + agent</td>
<td>copyAsync (T => T, T => V)</td>
</tr>
<tr>
<td>Create DATA volume</td>
<td>create Volume</td>
<td>management</td>
<td>createVolumeAsync</td>
</tr>
<tr>
<td>Attach ROOT/DATA volume</td>
<td>start VM (+attach/detach Volume)</td>
<td>agent</td>
<td>connectPhysicalDisk</td>
</tr>
<tr>
<td>Detach ROOT/DATA volume</td>
<td>stop VM</td>
<td>agent</td>
<td>disconnectPhysicalDiskByPath</td>
</tr>
<tr>
<td>&nbsp;</td>
<td>Migrate VM</td>
<td>agent</td>
<td>attach + detach</td>
</tr>
<tr>
<td>Delete ROOT volume</td>
<td>destroy VM (expunge)</td>
<td>management</td>
<td>deleteAsync</td>
</tr>
<tr>
<td>Delete DATA volume</td>
<td>delete Volume (detached)</td>
<td>management</td>
<td>deleteAsync</td>
</tr>
<tr>
<td>Create ROOT/DATA volume snapshot</td>
<td>snapshot volume</td>
<td>management + agent</td>
<td>takeSnapshot + copyAsync (S => S)</td>
</tr>
<tr>
<td>Create volume from snapshoot</td>
<td>create volume from snapshot</td>
<td>management + agent(?)</td>
<td>copyAsync (S => V)</td>
</tr>
<tr>
<td>Create TEMPLATE from ROOT volume</td>
<td>create template from volume</td>
<td>management + agent</td>
<td>copyAsync (V => T)</td>
</tr>
<tr>
<td>Create TEMPLATE from snapshot</td>
<td>create template from snapshot</td>
<td>SECONDARY STORAGE</td>
<td>&nbsp;</td>
</tr>
<tr>
<td>Download volume</td>
<td>download volume</td>
<td>management + agent</td>
<td>copyAsync (V => V)</td>
</tr>
<tr>
<td>Revert ROOT/DATA volume to snapshot</td>
<td>revert to snapshot</td>
<td>management</td>
<td>revertSnapshot</td>
</tr>
<tr>
<td>(Live) resize ROOT/DATA volume</td>
<td>resize volume</td>
<td>management + agent</td>
<td>resize + StorpoolResizeCmd</td>
</tr>
<tr>
<td>Delete SNAPSHOT (ROOT/DATA)</td>
<td>delete snapshot</td>
<td>management</td>
<td>StorpoolSnapshotStrategy</td>
</tr>
<tr>
<td>Delete TEMPLATE</td>
<td>delete template</td>
<td>agent</td>
<td>deletePhysicalDisk</td>
</tr>
<tr>
<td>migrate VM/volume</td>
<td>migrate VM/volume to another storage</td>
<td>management/management + agent</td>
<td>copyAsync (V => V)</td>
</tr>
<tr>
<td>VM snapshot</td>
<td>group snapshot of VM's disks</td>
<td>management</td>
<td>StorpoolVMSnapshotStrategy takeVMSnapshot</td>
</tr>
<tr>
<td>revert VM snapshot</td>
<td>revert group snapshot of VM's disks</td>
<td>management</td>
<td>StorpoolVMSnapshotStrategy revertVMSnapshot</td>
</tr>
<tr>
<td>delete VM snapshot</td>
<td>delete group snapshot of VM's disks</td>
<td>management</td>
<td>StorpoolVMSnapshotStrategy deleteVMSnapshot</td>
</tr>
<tr>
<td>VM vc_policy tag</td>
<td>vc_policy tag for all disks attached to VM</td>
<td>management</td>
<td>StorPoolCreateTagsCmd</td>
</tr>
<tr>
<td>delete VM vc_policy tag</td>
<td>remove vc_policy tag for all disks attached to VM</td>
<td>management</td>
<td>StorPoolDeleteTagsCmd</td>
</tr>
</table>
>NOTE: When using multicluster for each CloudStack cluster in its settings set the value of StorPool's SP_CLUSTER_ID in "sp.cluster.id".
>
>NOTE: Secondary storage could be bypassed with Configuration setting "sp.bypass.secondary.storage" set to true. </br>
In this case only snapshots won't be downloaded to secondary storage.
>
### Creating template from snapshot
#### If bypass option is enabled
The snapshot exists only on PRIMARY (StorPool) storage. From this snapshot it will be created a template on SECONADRY.
#### If bypass option is disabled
TODO: Maybe we should not use CloudStack functionality, and to use that one when bypass option is enabled
This is independent of StorPool as snapshots exist on secondary.
### Creating ROOT volume from templates
When creating the first volume based on the given template, if snapshot of the template does not exists on StorPool it will be first downloaded (cached) to PRIMARY storage.
This is mapped to a StorPool snapshot so, creating succecutive volumes from the same template does not incur additional
copying of data to PRIMARY storage.
This cached snapshot is garbage collected when the original template is deleted from CloudStack. This cleanup is done
by a background task in CloudStack.
### Creating a ROOT volume from an ISO image
We just need to create the volume. The ISO installation is handled by CloudStack.
### Creating a DATA volume
DATA volumes are created by CloudStack the first time it is attached to a VM.
### Creating volume from snapshot
We use the fact that the snapshot already exists on PRIMARY, so no data is copied. We will copy snapshots from SECONDARY to StorPool PRIMARY,
when there is no corresponding StorPool snapshot.
### Resizing volumes
We need to send a resize cmd to agent, where the VM the volume is attached to is running, so that
the resize is visible by the VM.
### Creating snapshots
The snapshot is first created on the PRIMARY storage (i.e. StorPool), then backed-up on SECONDARY storage
(tested with NFS secondary) if bypass option is not enabled. The original StorPool snapshot is kept, so that creating volumes from the snapshot does not need to copy
the data again to PRIMARY. When the snapshot is deleted from CloudStack so is the corresponding StorPool snapshot.
Currently snapshots are taken in RAW format.
### Reverting volume to snapshot
It's handled by StorPool
### Migrating volumes to other Storage pools
Tested with storage pools on NFS only.
### Virtual Machine Snapshot/Group Snapshot
StorPool supports consistent snapshots of volumes attached to a virtual machine.
### BW/IOPS limitations
Max IOPS are kept in StorPool's volumes with the help of custom service offerings, by adding IOPS limits to the
corresponding system disk offering.
CloudStack has no way to specify max BW. Do they want to be able to specify max BW only is sufficient.

View File

@ -0,0 +1,68 @@
<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor
license agreements. See the NOTICE file distributed with this work for additional
information regarding copyright ownership. The ASF licenses this file to
you under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License. -->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-plugin-storage-volume-storpool</artifactId>
<name>Apache CloudStack Plugin - Storage Volume StorPool provider</name>
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack-plugins</artifactId>
<version>4.17.0.0-SNAPSHOT</version>
<relativePath>../../../pom.xml</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-storage-volume</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-storage-snapshot</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-hypervisor-kvm</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-orchestration</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>4.4</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<skipTests>true</skipTests>
</configuration>
<executions>
<execution>
<phase>integration-test</phase>
<goals>
<goal>test</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,30 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.cloud.agent.api.storage;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import com.cloud.agent.api.to.DataTO;
public class StorPoolBackupSnapshotCommand extends StorPoolCopyCommand<SnapshotObjectTO, SnapshotObjectTO> {
public StorPoolBackupSnapshotCommand(final DataTO srcTO, final DataTO dstTO, final int timeout, final boolean executeInSequence) {
super(srcTO, dstTO, timeout, executeInSequence);
}
}

View File

@ -0,0 +1,30 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.cloud.agent.api.storage;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import com.cloud.agent.api.to.DataTO;
public class StorPoolBackupTemplateFromSnapshotCommand extends StorPoolCopyCommand<DataTO, TemplateObjectTO> {
public StorPoolBackupTemplateFromSnapshotCommand(final DataTO srcTO, final DataTO dstTO, final int timeout, final boolean executeInSequence) {
super(srcTO, dstTO, timeout, executeInSequence);
}
}

View File

@ -0,0 +1,60 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.cloud.agent.api.storage;
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
import com.cloud.agent.api.to.DataTO;
public class StorPoolCopyCommand<S extends DataTO, D extends DataTO> extends StorageSubSystemCommand {
private S sourceTO;
private D destinationTO;
private boolean executeInSequence = false;
public StorPoolCopyCommand(final DataTO sourceTO, final DataTO destinationTO, final int timeout, final boolean executeInSequence) {
super();
this.sourceTO = (S)sourceTO;
this.destinationTO = (D)destinationTO;
setWait(timeout);
this.executeInSequence = executeInSequence;
}
public S getSourceTO() {
return sourceTO;
}
public D getDestinationTO() {
return destinationTO;
}
public int getWaitInMillSeconds() {
return getWait() * 1000;
}
@Override
public boolean executeInSequence() {
return executeInSequence;
}
@Override
public void setExecuteInSequence(final boolean inSeq) {
executeInSequence = inSeq;
}
}

View File

@ -0,0 +1,30 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.cloud.agent.api.storage;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import com.cloud.agent.api.to.DataTO;
public class StorPoolCopyVolumeToSecondaryCommand extends StorPoolCopyCommand<VolumeObjectTO, VolumeObjectTO> {
public StorPoolCopyVolumeToSecondaryCommand(final DataTO srcTO, final DataTO dstTO, final int timeout, final boolean executeInSequence) {
super(srcTO, dstTO, timeout, executeInSequence);
}
}

View File

@ -0,0 +1,37 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.agent.api.storage;
import com.cloud.agent.api.to.DataTO;
public class StorPoolDownloadTemplateCommand extends StorPoolCopyCommand<DataTO, DataTO> {
protected String objectType;
public StorPoolDownloadTemplateCommand(final DataTO srcTO, final DataTO dstTO, final int timeout, final boolean executeInSequence, String objectType) {
super(srcTO, dstTO, timeout, executeInSequence);
this.objectType = objectType;
}
public String getObjectType() {
return objectType;
}
public void setObjectType(String objectType) {
this.objectType = objectType;
}
}

View File

@ -0,0 +1,30 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.cloud.agent.api.storage;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import com.cloud.agent.api.to.DataTO;
public class StorPoolDownloadVolumeCommand extends StorPoolCopyCommand<VolumeObjectTO, VolumeObjectTO> {
public StorPoolDownloadVolumeCommand(final DataTO srcTO, final DataTO dstTO, final int timeout, final boolean executeInSequence) {
super(srcTO, dstTO, timeout, executeInSequence);
}
}

View File

@ -0,0 +1,55 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.cloud.agent.api.storage;
import java.util.ArrayList;
import java.util.List;
import com.cloud.agent.api.MigrateCommand;
import com.cloud.agent.api.to.VirtualMachineTO;
public class StorPoolMigrateWithVolumesCommand extends MigrateCommand {
private List<MigrateDiskInfo> migrateDiskInfoList = new ArrayList<>();
public StorPoolMigrateWithVolumesCommand() {
super();
}
public StorPoolMigrateWithVolumesCommand(String vmName, String destIp, boolean isWindows, VirtualMachineTO vmTO,
boolean executeInSequence) {
super(vmName, destIp, isWindows, vmTO, executeInSequence);
}
public List<MigrateDiskInfo> getMigrateDiskInfoList() {
return migrateDiskInfoList;
}
public void setMigrateDiskInfoList(List<MigrateDiskInfo> migrateDiskInfoList) {
this.migrateDiskInfoList = migrateDiskInfoList;
}
public boolean isMigrateStorageManaged() {
return true;
}
public boolean isMigrateNonSharedInc() {
return false;
}
}

View File

@ -0,0 +1,94 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.cloud.agent.api.storage;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.ModifyStoragePoolAnswer;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.storage.template.TemplateProp;
public class StorPoolModifyStoragePoolAnswer extends Answer{
private StoragePoolInfo poolInfo;
private Map<String, TemplateProp> templateInfo;
private String localDatastoreName;
private String poolType;
private List<ModifyStoragePoolAnswer> datastoreClusterChildren = new ArrayList<>();
private String clusterId;
public StorPoolModifyStoragePoolAnswer(StorPoolModifyStoragePoolCommand cmd, long capacityBytes, long availableBytes, Map<String, TemplateProp> tInfo, String clusterId) {
super(cmd);
result = true;
poolInfo = new StoragePoolInfo(null, cmd.getPool().getHost(), cmd.getPool().getPath(), cmd.getLocalPath(), cmd.getPool().getType(), capacityBytes, availableBytes);
templateInfo = tInfo;
this.clusterId = clusterId;
}
public StorPoolModifyStoragePoolAnswer(String errMsg) {
super(null, false, errMsg);
}
public void setPoolInfo(StoragePoolInfo poolInfo) {
this.poolInfo = poolInfo;
}
public StoragePoolInfo getPoolInfo() {
return poolInfo;
}
public void setTemplateInfo(Map<String, TemplateProp> templateInfo) {
this.templateInfo = templateInfo;
}
public Map<String, TemplateProp> getTemplateInfo() {
return templateInfo;
}
public void setLocalDatastoreName(String localDatastoreName) {
this.localDatastoreName = localDatastoreName;
}
public String getLocalDatastoreName() {
return localDatastoreName;
}
public String getPoolType() {
return poolType;
}
public void setPoolType(String poolType) {
this.poolType = poolType;
}
public List<ModifyStoragePoolAnswer> getDatastoreClusterChildren() {
return datastoreClusterChildren;
}
public void setDatastoreClusterChildren(List<ModifyStoragePoolAnswer> datastoreClusterChildren) {
this.datastoreClusterChildren = datastoreClusterChildren;
}
public String getClusterId() {
return clusterId;
}
}

View File

@ -0,0 +1,36 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.cloud.agent.api.storage;
import com.cloud.agent.api.ModifyStoragePoolCommand;
import com.cloud.storage.StoragePool;
public class StorPoolModifyStoragePoolCommand extends ModifyStoragePoolCommand {
private String volumeName;
public StorPoolModifyStoragePoolCommand(boolean add, StoragePool pool, String volumeName) {
super(add, pool);
this.volumeName = volumeName;
}
public String getVolumeName() {
return volumeName;
}
}

View File

@ -0,0 +1,39 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.cloud.agent.api.storage;
import com.cloud.agent.api.to.StorageFilerTO;
public class StorPoolResizeVolumeCommand extends ResizeVolumeCommand {
protected boolean isAttached;
protected StorPoolResizeVolumeCommand() {
super();
}
public StorPoolResizeVolumeCommand(String path, StorageFilerTO pool, Long currentSize, Long newSize, boolean shrinkOk, String vmInstance, boolean isAttached) {
super(path, pool, currentSize, newSize, shrinkOk, vmInstance);
this.isAttached = isAttached;
}
public boolean isAttached() {
return isAttached;
}
}

View File

@ -0,0 +1,109 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.hypervisor.kvm.resource.wrapper;
import static com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor.SP_LOG;
import java.io.File;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
import org.apache.commons.io.FileUtils;
import org.apache.log4j.Logger;
import com.cloud.agent.api.storage.StorPoolBackupSnapshotCommand;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.NfsTO;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
@ResourceWrapper(handles = StorPoolBackupSnapshotCommand.class)
public final class StorPoolBackupSnapshotCommandWrapper extends CommandWrapper<StorPoolBackupSnapshotCommand, CopyCmdAnswer, LibvirtComputingResource> {
private static final Logger s_logger = Logger.getLogger(StorPoolBackupSnapshotCommandWrapper.class);
@Override
public CopyCmdAnswer execute(final StorPoolBackupSnapshotCommand cmd, final LibvirtComputingResource libvirtComputingResource) {
String srcPath = null;
KVMStoragePool secondaryPool = null;
try {
final SnapshotObjectTO src = cmd.getSourceTO();
final SnapshotObjectTO dst = cmd.getDestinationTO();
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
SP_LOG("StorpoolBackupSnapshotCommandWrapper.execute: src=" + src.getPath() + "dst=" + dst.getPath());
StorPoolStorageAdaptor.attachOrDetachVolume("attach", "snapshot", src.getPath());
srcPath = src.getPath();
final QemuImgFile srcFile = new QemuImgFile(srcPath, PhysicalDiskFormat.RAW);
final DataStoreTO dstDataStore = dst.getDataStore();
if (!(dstDataStore instanceof NfsTO)) {
return new CopyCmdAnswer("Backup Storpool snapshot: Only NFS secondary supported at present!");
}
secondaryPool = storagePoolMgr.getStoragePoolByURI(dstDataStore.getUrl());
final String dstDir = secondaryPool.getLocalPath() + File.separator + dst.getPath();
FileUtils.forceMkdir(new File(dstDir));
final String dstPath = dstDir + File.separator + dst.getName();
final QemuImgFile dstFile = new QemuImgFile(dstPath, PhysicalDiskFormat.QCOW2);
final QemuImg qemu = new QemuImg(cmd.getWaitInMillSeconds());
qemu.convert(srcFile, dstFile);
SP_LOG("StorpoolBackupSnapshotCommandWrapper srcFileFormat=%s, dstFileFormat=%s", srcFile.getFormat(), dstFile.getFormat());
final File snapFile = new File(dstPath);
final long size = snapFile.exists() ? snapFile.length() : 0;
final SnapshotObjectTO snapshot = new SnapshotObjectTO();
snapshot.setPath(dst.getPath() + File.separator + dst.getName());
snapshot.setPhysicalSize(size);
return new CopyCmdAnswer(snapshot);
} catch (final Exception e) {
final String error = String.format("Failed to backup snapshot with id [%s] with a pool %s, due to %s", cmd.getSourceTO().getId(), cmd.getSourceTO().getDataStore().getUuid(), e.getMessage());
SP_LOG(error);
s_logger.debug(error);
return new CopyCmdAnswer(cmd, e);
} finally {
if (srcPath != null) {
StorPoolStorageAdaptor.attachOrDetachVolume("detach", "snapshot", srcPath);
}
if (secondaryPool != null) {
try {
secondaryPool.delete();
} catch (final Exception e) {
s_logger.debug("Failed to delete secondary storage", e);
}
}
}
}
}

View File

@ -0,0 +1,161 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.cloud.hypervisor.kvm.resource.wrapper;
import static com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor.SP_LOG;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.util.HashMap;
import java.util.Map;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
import org.apache.commons.io.FileUtils;
import org.apache.log4j.Logger;
import com.cloud.agent.api.storage.StorPoolBackupTemplateFromSnapshotCommand;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.NfsTO;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.StorageLayer;
import com.cloud.storage.template.Processor;
import com.cloud.storage.template.Processor.FormatInfo;
import com.cloud.storage.template.QCOW2Processor;
import com.cloud.storage.template.TemplateLocation;
import com.cloud.storage.template.TemplateProp;
@ResourceWrapper(handles = StorPoolBackupTemplateFromSnapshotCommand.class)
public class StorPoolBackupTemplateFromSnapshotCommandWrapper extends CommandWrapper<StorPoolBackupTemplateFromSnapshotCommand, CopyCmdAnswer, LibvirtComputingResource> {
private static final Logger s_logger = Logger.getLogger(StorPoolBackupTemplateFromSnapshotCommandWrapper.class);
@Override
public CopyCmdAnswer execute(final StorPoolBackupTemplateFromSnapshotCommand cmd, final LibvirtComputingResource libvirtComputingResource) {
String srcPath = null;
KVMStoragePool secondaryPool = null;
String objectType = cmd.getSourceTO().getObjectType().toString().toLowerCase();
try {
final DataTO src = cmd.getSourceTO();
final TemplateObjectTO dst = cmd.getDestinationTO();
String name = null;
String volumeFormatExtension = null;
if (src instanceof SnapshotObjectTO) {
name = ((SnapshotObjectTO) src).getName();
volumeFormatExtension = ((SnapshotObjectTO) src).getVolume().getFormat().getFileExtension();
} else if (src instanceof VolumeObjectTO) {
name = ((VolumeObjectTO) src).getName();
volumeFormatExtension = ((VolumeObjectTO) src).getFormat().getFileExtension();
} else {
return new CopyCmdAnswer("Backup of a template is not supported for data object: " + src.getObjectType() );
}
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
StorageLayer storage = libvirtComputingResource.getStorage();
Processor processor = new QCOW2Processor();
String _tmpltpp = "template.properties";
SP_LOG("StorpoolBackupTemplateFromSnapshotCommandWrapper.execute: src=" + src.getPath() + "dst=" + dst.getPath());
StorPoolStorageAdaptor.attachOrDetachVolume("attach", objectType, src.getPath());
srcPath = src.getPath();
final QemuImgFile srcFile = new QemuImgFile(srcPath, PhysicalDiskFormat.RAW);
final DataStoreTO dstDataStore = dst.getDataStore();
if (!(dstDataStore instanceof NfsTO)) {
return new CopyCmdAnswer("Backup Storpool snapshot: Only NFS secondary supported at present!");
}
secondaryPool = storagePoolMgr.getStoragePoolByURI(dstDataStore.getUrl());
final String dstDir = secondaryPool.getLocalPath() + File.separator + dst.getPath();
FileUtils.forceMkdir(new File(dstDir));
String nameWithExtension = name + "." + volumeFormatExtension;
final String dstPath = dstDir + File.separator + nameWithExtension;
final QemuImgFile dstFile = new QemuImgFile(dstPath, PhysicalDiskFormat.QCOW2);
final QemuImg qemu = new QemuImg(cmd.getWaitInMillSeconds());
qemu.convert(srcFile, dstFile);
storage.create(dstDir, _tmpltpp);
String metaFileName = dstDir + File.separator + _tmpltpp;
File metaFile = new File(metaFileName);
try ( FileWriter writer = new FileWriter(metaFile);
BufferedWriter bufferWriter = new BufferedWriter(writer);) {
bufferWriter.write("uniquename=" + dst.getName());
bufferWriter.write("\n");
bufferWriter.write("filename=" + nameWithExtension);
}
Map<String, Object> params = new HashMap<String, Object>();
params.put(StorageLayer.InstanceConfigKey, storage);
processor.configure("template processor", params);
FormatInfo info = processor.process(dstDir, null, name);
TemplateLocation loc = new TemplateLocation(storage, dstDir);
loc.create(1, true, dst.getName());
loc.addFormat(info);
loc.save();
TemplateProp prop = loc.getTemplateInfo();
final TemplateObjectTO template = new TemplateObjectTO();
template.setPath(dst.getPath() + File.separator + nameWithExtension);
template.setFormat(ImageFormat.QCOW2);
template.setSize(prop.getSize());
template.setPhysicalSize(prop.getPhysicalSize());
return new CopyCmdAnswer(template);
} catch (final Exception e) {
final String error = "failed to backup snapshot: " + e.getMessage();
SP_LOG(error);
s_logger.debug(error);
return new CopyCmdAnswer(cmd, e);
} finally {
if (srcPath != null) {
StorPoolStorageAdaptor.attachOrDetachVolume("detach", objectType, srcPath);
}
if (secondaryPool != null) {
try {
secondaryPool.delete();
} catch (final Exception e) {
s_logger.debug("Failed to delete secondary storage", e);
}
}
}
}
}

View File

@ -0,0 +1,124 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.hypervisor.kvm.resource.wrapper;
import static com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor.SP_LOG;
import java.io.File;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
import org.apache.log4j.Logger;
import com.cloud.agent.api.storage.StorPoolCopyVolumeToSecondaryCommand;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.NfsTO;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
@ResourceWrapper(handles = StorPoolCopyVolumeToSecondaryCommand.class)
public final class StorPoolCopyVolumeToSecondaryCommandWrapper extends CommandWrapper<StorPoolCopyVolumeToSecondaryCommand, CopyCmdAnswer, LibvirtComputingResource> {
private static final Logger s_logger = Logger.getLogger(StorPoolCopyVolumeToSecondaryCommandWrapper.class);
@Override
public CopyCmdAnswer execute(final StorPoolCopyVolumeToSecondaryCommand cmd, final LibvirtComputingResource libvirtComputingResource) {
String srcPath = null;
KVMStoragePool secondaryPool = null;
try {
final VolumeObjectTO src = cmd.getSourceTO();
final VolumeObjectTO dst = cmd.getDestinationTO();
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
final String destVolumePath = dst.getPath();
SP_LOG("StorpoolCopyVolumeToSecondaryCommandWrapper.execute: src=" + src.getPath() + "dst=" + dst.getPath());
StorPoolStorageAdaptor.attachOrDetachVolume("attach", "snapshot", src.getPath());
srcPath = src.getPath();
final QemuImgFile srcFile = new QemuImgFile(srcPath, PhysicalDiskFormat.RAW);
final DataStoreTO dstDataStore = dst.getDataStore();
final KVMStoragePoolManager poolMgr = libvirtComputingResource.getStoragePoolMgr();
SP_LOG("StorpoolCopyVolumeToSecondaryCommandWrapper.execute: KVMStoragePoolManager " + poolMgr);
KVMStoragePool destPool;
if( dstDataStore instanceof NfsTO ) {
destPool = storagePoolMgr.getStoragePoolByURI(dstDataStore.getUrl());
destPool.createFolder(destVolumePath);
storagePoolMgr.deleteStoragePool(destPool.getType(), destPool.getUuid());
destPool = storagePoolMgr.getStoragePoolByURI(dstDataStore.getUrl() + File.separator + destVolumePath);
SP_LOG("StorpoolCopyVolumeToSecondaryCommandWrapper.execute: Nfs destPool=%s ",destPool);
} else if( dstDataStore instanceof PrimaryDataStoreTO ) {
PrimaryDataStoreTO primaryDst = (PrimaryDataStoreTO)dstDataStore;
destPool = poolMgr.getStoragePool(primaryDst.getPoolType(), dstDataStore.getUuid());
SP_LOG("StorpoolCopyVolumeToSecondaryCommandWrapper.execute: not Nfs destPool=%s " ,destPool);
} else {
return new CopyCmdAnswer("Don't know how to copy to " + dstDataStore.getClass().getName() + ", " + dst.getPath() );
}
SP_LOG("StorpoolCopyVolumeToSecondaryCommandWrapper.execute: dstName=%s, dstProvisioningType=%s, srcSize=%s, dstUUID=%s, srcUUID=%s " ,dst.getName(), dst.getProvisioningType(), src.getSize(),dst.getUuid(), src.getUuid());
KVMPhysicalDisk newDisk = destPool.createPhysicalDisk(dst.getUuid(), dst.getProvisioningType(), src.getSize());
SP_LOG("NewDisk path=%s, uuid=%s ", newDisk.getPath(), dst.getUuid());
String destPath = newDisk.getPath();
newDisk.setPath(dst.getUuid());
PhysicalDiskFormat destFormat = newDisk.getFormat();
SP_LOG("StorpoolCopyVolumeToSecondaryCommandWrapper.execute: KVMPhysicalDisk name=%s, format=%s, path=%s, destinationPath=%s " , newDisk.getName(), newDisk.getFormat(), newDisk.getPath(), destPath);
QemuImgFile destFile = new QemuImgFile(destPath, destFormat);
QemuImg qemu = new QemuImg(cmd.getWaitInMillSeconds());
qemu.convert(srcFile, destFile);
final File file = new File(destPath);
final long size = file.exists() ? file.length() : 0;
dst.setPath(destVolumePath + File.separator + dst.getUuid());
dst.setSize(size);
return new CopyCmdAnswer(dst);
} catch (final Exception e) {
final String error = "Failed to copy volume to secondary storage: " + e.getMessage();
s_logger.debug(error);
return new CopyCmdAnswer(error);
} finally {
if (srcPath != null) {
StorPoolStorageAdaptor.attachOrDetachVolume("detach", "snapshot", srcPath);
}
if (secondaryPool != null) {
try {
SP_LOG("StorpoolCopyVolumeToSecondaryCommandWrapper.execute: secondaryPool=%s " , secondaryPool);
secondaryPool.delete();
} catch (final Exception e) {
s_logger.debug("Failed to delete secondary storage", e);
}
}
}
}
}

View File

@ -0,0 +1,134 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.hypervisor.kvm.resource.wrapper;
import static com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor.SP_LOG;
import java.io.File;
import java.util.List;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Logger;
import com.cloud.agent.api.storage.StorPoolDownloadTemplateCommand;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.NfsTO;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
@ResourceWrapper(handles = StorPoolDownloadTemplateCommand.class)
public final class StorPoolDownloadTemplateCommandWrapper extends CommandWrapper<StorPoolDownloadTemplateCommand, CopyCmdAnswer, LibvirtComputingResource> {
private static final Logger s_logger = Logger.getLogger(StorPoolDownloadTemplateCommandWrapper.class);
@Override
public CopyCmdAnswer execute(final StorPoolDownloadTemplateCommand cmd, final LibvirtComputingResource libvirtComputingResource) {
String dstPath = null;
KVMStoragePool secondaryPool = null;
DataTO src = cmd.getSourceTO();
DataTO dst = cmd.getDestinationTO();
try {
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
SP_LOG("StorpoolDownloadTemplateCommandWrapper.execute: src=" + src.getPath() + " dst=" + dst.getPath());
final DataStoreTO srcDataStore = src.getDataStore();
if (!(srcDataStore instanceof NfsTO)) {
return new CopyCmdAnswer("Download template to Storpool: Only NFS secondary supported at present!");
}
final NfsTO nfsImageStore = (NfsTO)srcDataStore;
final String tmplturl = nfsImageStore.getUrl() + File.separator + src.getPath();
final int index = tmplturl.lastIndexOf("/");
final String mountpoint = tmplturl.substring(0, index);
String tmpltname = null;
if (index < tmplturl.length() - 1) {
tmpltname = tmplturl.substring(index + 1);
}
secondaryPool = storagePoolMgr.getStoragePoolByURI(mountpoint);
KVMPhysicalDisk srcDisk = null;
if (tmpltname == null) {
secondaryPool.refresh();
final List<KVMPhysicalDisk> disks = secondaryPool.listPhysicalDisks();
if (CollectionUtils.isEmpty(disks)) {
SP_LOG("Failed to get volumes from pool: " + secondaryPool.getUuid());
return new CopyCmdAnswer("Failed to get volumes from pool: " + secondaryPool.getUuid());
}
for (final KVMPhysicalDisk disk : disks) {
if (disk.getName().endsWith("qcow2")) {
srcDisk = disk;
break;
}
}
} else {
srcDisk = secondaryPool.getPhysicalDisk(tmpltname);
}
if (srcDisk == null) {
SP_LOG("Failed to get template from pool: " + secondaryPool.getUuid());
return new CopyCmdAnswer("Failed to get template from pool: " + secondaryPool.getUuid());
}
SP_LOG("got src path: " + srcDisk.getPath() + " srcSize " + srcDisk.getVirtualSize());
final QemuImgFile srcFile = new QemuImgFile(srcDisk.getPath(), srcDisk.getFormat());
final QemuImg qemu = new QemuImg(cmd.getWaitInMillSeconds());
StorPoolStorageAdaptor.resize( Long.toString(srcDisk.getVirtualSize()), dst.getPath());
dstPath = dst.getPath();
StorPoolStorageAdaptor.attachOrDetachVolume("attach", cmd.getObjectType(), dstPath);
final QemuImgFile dstFile = new QemuImgFile(dstPath, PhysicalDiskFormat.RAW);
qemu.convert(srcFile, dstFile);
return new CopyCmdAnswer(dst);
} catch (final Exception e) {
final String error = "Failed to copy template to primary: " + e.getMessage();
s_logger.debug(error);
return new CopyCmdAnswer(cmd, e);
} finally {
if (dstPath != null) {
StorPoolStorageAdaptor.attachOrDetachVolume("detach", cmd.getObjectType(), dstPath);
}
if (secondaryPool != null) {
try {
secondaryPool.delete();
} catch (final Exception e) {
s_logger.debug("Failed to delete secondary storage", e);
}
}
}
}
}

View File

@ -0,0 +1,162 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.hypervisor.kvm.resource.wrapper;
import static com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor.SP_LOG;
import java.util.List;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
//import java.io.File;
import org.apache.log4j.Logger;
import com.cloud.agent.api.storage.StorPoolDownloadVolumeCommand;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.NfsTO;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.StoragePoolType;
@ResourceWrapper(handles = StorPoolDownloadVolumeCommand.class)
public final class StorPoolDownloadVolumeCommandWrapper extends CommandWrapper<StorPoolDownloadVolumeCommand, CopyCmdAnswer, LibvirtComputingResource> {
private static final Logger s_logger = Logger.getLogger(StorPoolDownloadVolumeCommandWrapper.class);
@Override
public CopyCmdAnswer execute(final StorPoolDownloadVolumeCommand cmd, final LibvirtComputingResource libvirtComputingResource) {
String dstPath = null;
KVMStoragePool secondaryPool = null;
try {
final VolumeObjectTO src = cmd.getSourceTO();
final VolumeObjectTO dst = cmd.getDestinationTO();
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
SP_LOG("StorpoolDownloadVolumeCommandWrapper.execute: src=" + src.getPath() + " srcName=" + src.getName() + " dst=" + dst.getPath());
final DataStoreTO srcDataStore = src.getDataStore();
KVMPhysicalDisk srcDisk = null;
if(srcDataStore instanceof NfsTO) {
SP_LOG("StorpoolDownloadVolumeCommandWrapper.execute: srcIsNfsTO");
final String tmplturl = srcDataStore.getUrl() + srcDataStore.getPathSeparator() + src.getPath();
final int index = tmplturl.lastIndexOf("/");
final String mountpoint = tmplturl.substring(0, index);
String tmpltname = null;
if (index < tmplturl.length() - 1) {
tmpltname = tmplturl.substring(index + 1);
}
secondaryPool = storagePoolMgr.getStoragePoolByURI(mountpoint);
if (tmpltname == null) {
secondaryPool.refresh();
final List<KVMPhysicalDisk> disks = secondaryPool.listPhysicalDisks();
if (disks == null || disks.isEmpty()) {
SP_LOG("Failed to get volumes from pool: " + secondaryPool.getUuid());
return new CopyCmdAnswer("Failed to get volumes from pool: " + secondaryPool.getUuid());
}
for (final KVMPhysicalDisk disk : disks) {
if (disk.getName().endsWith("qcow2")) {
srcDisk = disk;
break;
}
}
} else {
srcDisk = secondaryPool.getPhysicalDisk(tmpltname);
}
} else if(srcDataStore instanceof PrimaryDataStoreTO) {
SP_LOG("SrcDisk is Primary Storage");
PrimaryDataStoreTO primarySrc = (PrimaryDataStoreTO)srcDataStore;
SP_LOG("StorpoolDownloadVolumeCommandWrapper.execute primarySrcPoolType=%s, uuid-%s ", primarySrc.getPoolType(), primarySrc.getUuid());
final KVMStoragePoolManager poolMgr = libvirtComputingResource.getStoragePoolMgr();
srcDisk = poolMgr.getPhysicalDisk(primarySrc.getPoolType(), srcDataStore.getUuid(), src.getPath());
SP_LOG("PhysicalDisk: disk=%s", srcDisk );
} else {
return new CopyCmdAnswer("Don't know how to copy from " + srcDataStore.getClass().getName() + ", " + src.getPath() );
}
if (srcDisk == null) {
SP_LOG("Failed to get src volume");
return new CopyCmdAnswer("Failed to get src volume");
}
SP_LOG("got src path: " + srcDisk.getPath() + " srcSize " + srcDisk.getVirtualSize());
String srcPath = null;
boolean isRBDPool = srcDisk.getPool().getType() == StoragePoolType.RBD;
if (isRBDPool) {
KVMStoragePool srcPool = srcDisk.getPool();
String rbdDestPath = srcPool.getSourceDir() + "/" + srcDisk.getName();
srcPath = KVMPhysicalDisk.RBDStringBuilder(srcPool.getSourceHost(),
srcPool.getSourcePort(),
srcPool.getAuthUserName(),
srcPool.getAuthSecret(),
rbdDestPath);
} else {
srcPath = srcDisk.getPath();
}
final QemuImgFile srcFile = new QemuImgFile(srcPath, PhysicalDiskFormat.RAW);
final QemuImg qemu = new QemuImg(cmd.getWaitInMillSeconds());
StorPoolStorageAdaptor.resize( Long.toString(srcDisk.getVirtualSize()), dst.getPath());
dstPath = dst.getPath();
StorPoolStorageAdaptor.attachOrDetachVolume("attach", "volume", dstPath);
final QemuImgFile dstFile = new QemuImgFile(dstPath, srcFile.getFormat());
SP_LOG("SRC format=%s, DST format=%s",srcFile.getFormat(), dstFile.getFormat());
qemu.convert(srcFile, dstFile);
SP_LOG("StorpoolDownloadVolumeCommandWrapper VolumeObjectTO format=%s, hypervisor=%s", dst.getFormat(), dst.getHypervisorType());
if (isRBDPool) {
dst.setFormat(ImageFormat.QCOW2);
}
return new CopyCmdAnswer(dst);
} catch (final Exception e) {
final String error = "Failed to copy volume to primary: " + e.getMessage();
SP_LOG(error);
s_logger.debug(error);
return new CopyCmdAnswer(cmd, e);
} finally {
if (dstPath != null) {
StorPoolStorageAdaptor.attachOrDetachVolume("detach", "volume", dstPath);
}
if (secondaryPool != null) {
try {
secondaryPool.delete();
} catch (final Exception e) {
s_logger.debug("Failed to delete secondary storage", e);
}
}
}
}
}

View File

@ -0,0 +1,146 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.cloud.hypervisor.kvm.resource.wrapper;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.log4j.Logger;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.storage.StorPoolModifyStoragePoolAnswer;
import com.cloud.agent.api.storage.StorPoolModifyStoragePoolCommand;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.cloud.storage.template.TemplateProp;
import com.cloud.utils.script.OutputInterpreter;
import com.cloud.utils.script.Script;
import com.google.gson.JsonElement;
import com.google.gson.JsonParser;
@ResourceWrapper(handles = StorPoolModifyStoragePoolCommand.class)
public final class StorPoolModifyStorageCommandWrapper extends CommandWrapper<StorPoolModifyStoragePoolCommand, Answer, LibvirtComputingResource> {
private static final Logger log = Logger.getLogger(StorPoolModifyStorageCommandWrapper.class);
@Override
public Answer execute(final StorPoolModifyStoragePoolCommand command, final LibvirtComputingResource libvirtComputingResource) {
String clusterId = getSpClusterId();
if (clusterId == null) {
log.debug(String.format("Could not get StorPool cluster id for a command $s", command.getClass()));
return new Answer(command, false, "spNotFound");
}
try {
String result = attachOrDetachVolume("attach", "volume", command.getVolumeName());
if (result != null) {
return new Answer(command, false, result);
}
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
final KVMStoragePool storagepool =
storagePoolMgr.createStoragePool(command.getPool().getUuid(), command.getPool().getHost(), command.getPool().getPort(), command.getPool().getPath(), command.getPool()
.getUserInfo(), command.getPool().getType());
if (storagepool == null) {
log.debug(String.format("Did not find a storage pool [%s]", command.getPool().getId()));
return new Answer(command, false, String.format("Failed to create storage pool [%s]", command.getPool().getId()));
}
final Map<String, TemplateProp> tInfo = new HashMap<String, TemplateProp>();
final StorPoolModifyStoragePoolAnswer answer = new StorPoolModifyStoragePoolAnswer(command, storagepool.getCapacity(), storagepool.getAvailable(), tInfo, clusterId);
return answer;
} catch (Exception e) {
log.debug(String.format("Could not modify storage due to %s", e.getMessage()));
return new Answer(command, e);
}
}
private String getSpClusterId() {
Script sc = new Script("storpool_confget", 0, log);
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
String SP_CLUSTER_ID = null;
final String err = sc.execute(parser);
if (err != null) {
final String errMsg = String.format("Could not execute storpool_confget. Error: %s", err);
log.warn(errMsg);
StorPoolStorageAdaptor.SP_LOG("Could not execute storpool_confget. Error: %s", err);
return SP_CLUSTER_ID;
}
for (String line: parser.getLines().split("\n")) {
String[] toks = line.split("=");
if( toks.length != 2 ) {
continue;
}
if (toks[0].equals("SP_CLUSTER_ID")) {
SP_CLUSTER_ID = toks[1];
return SP_CLUSTER_ID;
}
}
return SP_CLUSTER_ID;
}
public String attachOrDetachVolume(String command, String type, String volumeUuid) {
final String name = StorPoolStorageAdaptor.getVolumeNameFromPath(volumeUuid, true);
if (name == null) {
return null;
}
String err = null;
Script sc = new Script("storpool", 300000, log);
sc.add("-M");
sc.add("-j");
sc.add(command);
sc.add(type, name);
sc.add("here");
sc.add("onRemoteAttached");
sc.add("export");
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
String res = sc.execute(parser);
if (res != null) {
if (!res.equals(Script.ERR_TIMEOUT)) {
try {
Set<Entry<String, JsonElement>> obj2 = new JsonParser().parse(res).getAsJsonObject().entrySet();
for (Entry<String, JsonElement> entry : obj2) {
if (entry.getKey().equals("error")) {
res = entry.getValue().getAsJsonObject().get("name").getAsString();
}
}
} catch (Exception e) {
}
}
err = String.format("Unable to %s volume %s. Error: %s", command, name, res);
}
if (err != null) {
log.warn(err);
}
return res;
}
}

View File

@ -0,0 +1,98 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.hypervisor.kvm.resource.wrapper;
import org.apache.log4j.Logger;
import com.cloud.agent.api.storage.ResizeVolumeAnswer;
import com.cloud.agent.api.storage.StorPoolResizeVolumeCommand;
import com.cloud.agent.api.to.StorageFilerTO;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.cloud.utils.script.Script;
@ResourceWrapper(handles = StorPoolResizeVolumeCommand.class)
public final class StorPoolResizeVolumeCommandWrapper extends CommandWrapper<StorPoolResizeVolumeCommand, ResizeVolumeAnswer, LibvirtComputingResource> {
private static final Logger s_logger = Logger.getLogger(StorPoolResizeVolumeCommandWrapper.class);
@Override
public ResizeVolumeAnswer execute(final StorPoolResizeVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) {
final String volid = command.getPath();
final long newSize = command.getNewSize();
final long currentSize = command.getCurrentSize();
final String vmInstanceName = command.getInstanceName();
final boolean shrinkOk = command.getShrinkOk();
final StorageFilerTO spool = command.getPool();
String volPath = null;
if (currentSize == newSize) {
// nothing to do
s_logger.info("No need to resize volume: current size " + currentSize + " is same as new size " + newSize);
return new ResizeVolumeAnswer(command, true, "success", currentSize);
}
try {
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
KVMStoragePool pool = storagePoolMgr.getStoragePool(spool.getType(), spool.getUuid());
final KVMPhysicalDisk vol = pool.getPhysicalDisk(volid);
final String path = vol.getPath();
volPath = path;
if (!command.isAttached()) {
StorPoolStorageAdaptor.attachOrDetachVolume("attach", "volume", path);
}
final Script resizecmd = new Script(libvirtComputingResource.getResizeVolumePath(), libvirtComputingResource.getCmdsTimeout(), s_logger);
resizecmd.add("-s", String.valueOf(newSize));
resizecmd.add("-c", String.valueOf(currentSize));
resizecmd.add("-p", path);
resizecmd.add("-t", "NOTIFYONLY");
resizecmd.add("-r", String.valueOf(shrinkOk));
resizecmd.add("-v", vmInstanceName);
final String result = resizecmd.execute();
if (result != null) {
return new ResizeVolumeAnswer(command, true, "Resize succeeded, but need reboot to notify guest");
}
/* fetch new size as seen from libvirt, don't want to assume anything */
pool = storagePoolMgr.getStoragePool(spool.getType(), spool.getUuid());
pool.refresh();
final long finalSize = pool.getPhysicalDisk(volid).getVirtualSize();
s_logger.debug("after resize, size reports as " + finalSize + ", requested " + newSize);
return new ResizeVolumeAnswer(command, true, "success", finalSize);
} catch (final Exception e) {
final String error = "Failed to resize volume: " + e.getMessage();
s_logger.debug(error);
return new ResizeVolumeAnswer(command, false, error);
} finally {
if (!command.isAttached() && volPath != null) {
StorPoolStorageAdaptor.attachOrDetachVolume("detach", "volume", volPath);
}
}
}
}

View File

@ -0,0 +1,388 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.kvm.storage;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.PrintWriter;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import org.apache.log4j.Logger;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.ProvisioningType;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.OutputInterpreter;
import com.cloud.utils.script.Script;
@StorageAdaptorInfo(storagePoolType=StoragePoolType.SharedMountPoint)
public class StorPoolStorageAdaptor implements StorageAdaptor {
public static void SP_LOG(String fmt, Object... args) {
try (PrintWriter spLogFile = new PrintWriter(new BufferedWriter(new FileWriter("/var/log/cloudstack/agent/storpool-agent.log", true)))) {
final String line = String.format(fmt, args);
String timeStamp = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,ms").format(Calendar.getInstance().getTime());
spLogFile.println(timeStamp +" "+line);
spLogFile.flush();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private static final Logger log = Logger.getLogger(StorPoolStorageAdaptor.class);
private static final Map<String, KVMStoragePool> storageUuidToStoragePool = new HashMap<String, KVMStoragePool>();
@Override
public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, StoragePoolType storagePoolType) {
SP_LOG("StorpooolStorageAdaptor.createStoragePool: uuid=%s, host=%s:%d, path=%s, userInfo=%s, type=%s", uuid, host, port, path, userInfo, storagePoolType);
StorPoolStoragePool storagePool = new StorPoolStoragePool(uuid, host, port, storagePoolType, this);
storageUuidToStoragePool.put(uuid, storagePool);
return storagePool;
}
@Override
public KVMStoragePool getStoragePool(String uuid) {
SP_LOG("StorpooolStorageAdaptor.getStoragePool: uuid=%s", uuid);
return storageUuidToStoragePool.get(uuid);
}
@Override
public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) {
SP_LOG("StorpooolStorageAdaptor.getStoragePool: uuid=%s, refresh=%s", uuid, refreshInfo);
return storageUuidToStoragePool.get(uuid);
}
@Override
public boolean deleteStoragePool(String uuid) {
SP_LOG("StorpooolStorageAdaptor.deleteStoragePool: uuid=%s", uuid);
return storageUuidToStoragePool.remove(uuid) != null;
}
@Override
public boolean deleteStoragePool(KVMStoragePool pool) {
SP_LOG("StorpooolStorageAdaptor.deleteStoragePool: uuid=%s", pool.getUuid());
return deleteStoragePool(pool.getUuid());
}
private static long getDeviceSize(final String devPath) {
SP_LOG("StorpooolStorageAdaptor.getDeviceSize: path=%s", devPath);
if (getVolumeNameFromPath(devPath, true) == null) {
return 0;
}
File file = new File(devPath);
if (!file.exists()) {
return 0;
}
Script sc = new Script("blockdev", 0, log);
sc.add("--getsize64", devPath);
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
String res = sc.execute(parser);
if (res != null) {
SP_LOG("Unable to retrieve device size for %s. Res: %s", devPath, res);
log.debug(String.format("Unable to retrieve device size for %s. Res: %s", devPath, res));
return 0;
}
return Long.parseLong(parser.getLine());
}
private static boolean waitForDeviceSymlink(String devPath) {
final int numTries = 10;
final int sleepTime = 100;
for(int i = 0; i < numTries; i++) {
if (getDeviceSize(devPath) != 0) {
return true;
} else {
try {
Thread.sleep(sleepTime);
} catch (Exception ex) {
// don't do anything
}
}
}
return false;
}
public static String getVolumeNameFromPath(final String volumeUuid, boolean tildeNeeded) {
if (volumeUuid.startsWith("/dev/storpool/")) {
return volumeUuid.split("/")[3];
} else if (volumeUuid.startsWith("/dev/storpool-byid/")) {
return tildeNeeded ? "~" + volumeUuid.split("/")[3] : volumeUuid.split("/")[3];
}
return null;
}
public static boolean attachOrDetachVolume(String command, String type, String volumeUuid) {
final String name = getVolumeNameFromPath(volumeUuid, true);
if (name == null) {
return false;
}
SP_LOG("StorpooolStorageAdaptor.attachOrDetachVolume: cmd=%s, type=%s, uuid=%s, name=%s", command, type, volumeUuid, name);
final int numTries = 10;
final int sleepTime = 1000;
String err = null;
for(int i = 0; i < numTries; i++) {
Script sc = new Script("storpool", 0, log);
sc.add("-M");
sc.add(command);
sc.add(type, name);
sc.add("here");
if (command.equals("attach")) {
sc.add("onRemoteAttached");
sc.add("export");
}
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
String res = sc.execute(parser);
if (res == null) {
err = null;
break;
}
err = String.format("Unable to %s volume %s. Error: %s", command, name, res);
if (command.equals("detach")) {
try {
Thread.sleep(sleepTime);
} catch (Exception ex) {
// don't do anything
}
} else {
break;
}
}
if (err != null) {
SP_LOG(err);
log.warn(err);
throw new CloudRuntimeException(err);
}
if (command.equals("attach")) {
return waitForDeviceSymlink(volumeUuid);
} else {
return true;
}
}
public static boolean resize(String newSize, String volumeUuid ) {
final String name = getVolumeNameFromPath(volumeUuid, true);
if (name == null) {
return false;
}
SP_LOG("StorpooolStorageAdaptor.resize: size=%s, uuid=%s, name=%s", newSize, volumeUuid, name);
Script sc = new Script("storpool", 0, log);
sc.add("-M");
sc.add("volume");
sc.add(name);
sc.add("update");
sc.add("size");
sc.add(newSize);
sc.add("shrinkOk");
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
String res = sc.execute(parser);
if (res == null) {
return true;
}
String err = String.format("Unable to resize volume %s. Error: %s", name, res);
SP_LOG(err);
log.warn(err);
throw new CloudRuntimeException(err);
}
@Override
public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool) {
SP_LOG("StorpooolStorageAdaptor.getPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool);
log.debug(String.format("getPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool));
final long deviceSize = getDeviceSize(volumeUuid);
KVMPhysicalDisk physicalDisk = new KVMPhysicalDisk(volumeUuid, volumeUuid, pool);
physicalDisk.setFormat(PhysicalDiskFormat.RAW);
physicalDisk.setSize(deviceSize);
physicalDisk.setVirtualSize(deviceSize);
return physicalDisk;
}
@Override
public boolean connectPhysicalDisk(String volumeUuid, KVMStoragePool pool, Map<String, String> details) {
SP_LOG("StorpooolStorageAdaptor.connectPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool);
log.debug(String.format("connectPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool));
return attachOrDetachVolume("attach", "volume", volumeUuid);
}
@Override
public boolean disconnectPhysicalDisk(String volumeUuid, KVMStoragePool pool) {
SP_LOG("StorpooolStorageAdaptor.disconnectPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool);
log.debug(String.format("disconnectPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool));
return attachOrDetachVolume("detach", "volume", volumeUuid);
}
public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect) {
String volumeUuid = volumeToDisconnect.get(DiskTO.UUID);
SP_LOG("StorpooolStorageAdaptor.disconnectPhysicalDisk: map. uuid=%s", volumeUuid);
return attachOrDetachVolume("detach", "volume", volumeUuid);
}
@Override
public boolean disconnectPhysicalDiskByPath(String localPath) {
SP_LOG("StorpooolStorageAdaptor.disconnectPhysicalDiskByPath: localPath=%s", localPath);
log.debug(String.format("disconnectPhysicalDiskByPath: localPath=%s", localPath));
return attachOrDetachVolume("detach", "volume", localPath);
}
// The following do not apply for StorpoolStorageAdaptor?
@Override
public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, KVMStoragePool pool, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) {
SP_LOG("StorpooolStorageAdaptor.createPhysicalDisk: uuid=%s, pool=%s, format=%s, size=%d", volumeUuid, pool, format, size);
throw new UnsupportedOperationException("Creating a physical disk is not supported.");
}
@Override
public boolean deletePhysicalDisk(String volumeUuid, KVMStoragePool pool, Storage.ImageFormat format) {
// Should only come here when cleaning-up StorPool snapshots associated with CloudStack templates.
SP_LOG("StorpooolStorageAdaptor.deletePhysicalDisk: uuid=%s, pool=%s, format=%s", volumeUuid, pool, format);
final String name = getVolumeNameFromPath(volumeUuid, true);
if (name == null) {
final String err = String.format("StorpooolStorageAdaptor.deletePhysicalDisk: '%s' is not a StorPool volume?", volumeUuid);
SP_LOG(err);
throw new UnsupportedOperationException(err);
}
Script sc = new Script("storpool", 0, log);
sc.add("-M");
sc.add("snapshot", name);
sc.add("delete", name);
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
String res = sc.execute(parser);
if (res != null) {
final String err = String.format("Unable to delete StorPool snapshot '%s'. Error: %s", name, res);
SP_LOG(err);
log.warn(err);
throw new UnsupportedOperationException(err);
}
return true; // apparently ignored
}
@Override
public List<KVMPhysicalDisk> listPhysicalDisks(String storagePoolUuid, KVMStoragePool pool) {
SP_LOG("StorpooolStorageAdaptor.listPhysicalDisks: uuid=%s, pool=%s", storagePoolUuid, pool);
throw new UnsupportedOperationException("Listing disks is not supported for this configuration.");
}
@Override
public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, PhysicalDiskFormat format,
ProvisioningType provisioningType, long size, KVMStoragePool destPool, int timeout) {
SP_LOG("StorpooolStorageAdaptor.createDiskFromTemplate: template=%s, name=%s, fmt=%s, ptype=%s, size=%d, dst_pool=%s, to=%d",
template, name, format, provisioningType, size, destPool.getUuid(), timeout);
throw new UnsupportedOperationException("Creating a disk from a template is not yet supported for this configuration.");
}
@Override
public KVMPhysicalDisk createTemplateFromDisk(KVMPhysicalDisk disk, String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool) {
SP_LOG("StorpooolStorageAdaptor.createTemplateFromDisk: disk=%s, name=%s, fmt=%s, size=%d, dst_pool=%s", disk, name, format, size, destPool.getUuid());
throw new UnsupportedOperationException("Creating a template from a disk is not yet supported for this configuration.");
}
@Override
public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) {
SP_LOG("StorpooolStorageAdaptor.copyPhysicalDisk: disk=%s, name=%s, dst_pool=%s, to=%d", disk, name, destPool.getUuid(), timeout);
throw new UnsupportedOperationException("Copying a disk is not supported in this configuration.");
}
public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool) {
SP_LOG("StorpooolStorageAdaptor.createDiskFromSnapshot: snap=%s, snap_name=%s, name=%s, dst_pool=%s", snapshot, snapshotName, name, destPool.getUuid());
throw new UnsupportedOperationException("Creating a disk from a snapshot is not supported in this configuration.");
}
@Override
public boolean refresh(KVMStoragePool pool) {
SP_LOG("StorpooolStorageAdaptor.refresh: pool=%s", pool);
return true;
}
@Override
public boolean createFolder(String uuid, String path) {
SP_LOG("StorpooolStorageAdaptor.createFolder: uuid=%s, path=%s", uuid, path);
throw new UnsupportedOperationException("A folder cannot be created in this configuration.");
}
public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name,
KVMStoragePool destPool, int timeout) {
SP_LOG("StorpooolStorageAdaptor.createDiskFromSnapshot: snap=%s, snap_name=%s, name=%s, dst_pool=%s", snapshot,
snapshotName, name, destPool.getUuid());
throw new UnsupportedOperationException(
"Creating a disk from a snapshot is not supported in this configuration.");
}
public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name,
PhysicalDiskFormat format, long size, KVMStoragePool destPool, int timeout) {
SP_LOG("StorpooolStorageAdaptor.createDiskFromTemplateBacking: template=%s, name=%s, dst_pool=%s", template,
name, destPool.getUuid());
throw new UnsupportedOperationException(
"Creating a disk from a template is not supported in this configuration.");
}
public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, KVMStoragePool destPool,
boolean isIso) {
SP_LOG("StorpooolStorageAdaptor.createTemplateFromDirectDownloadFile: templateFilePath=%s, dst_pool=%s",
templateFilePath, destPool.getUuid());
throw new UnsupportedOperationException(
"Creating a template from direct download is not supported in this configuration.");
}
public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath,
KVMStoragePool destPool, ImageFormat format, int timeout) {
return null;
}
@Override
public boolean createFolder(String uuid, String path, String localPath) {
return false;
}
}

View File

@ -0,0 +1,164 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.kvm.storage;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.StoragePoolType;
public class StorPoolStoragePool implements KVMStoragePool {
private String _uuid;
private String _sourceHost;
private int _sourcePort;
private StoragePoolType _storagePoolType;
private StorageAdaptor _storageAdaptor;
private String _authUsername;
private String _authSecret;
private String _sourceDir;
private String _localPath;
public StorPoolStoragePool(String uuid, String host, int port, StoragePoolType storagePoolType, StorageAdaptor storageAdaptor) {
_uuid = uuid;
_sourceHost = host;
_sourcePort = port;
_storagePoolType = storagePoolType;
_storageAdaptor = storageAdaptor;
}
@Override
public String getUuid() {
return _uuid;
}
@Override
public String getSourceHost() {
return _sourceHost;
}
@Override
public int getSourcePort() {
return _sourcePort;
}
@Override
public long getCapacity() {
return 100L*(1024L*1024L*1024L*1024L*1024L);
}
@Override
public long getUsed() {
return 0;
}
@Override
public long getAvailable() {
return 0;
}
@Override
public StoragePoolType getType() {
return _storagePoolType;
}
@Override
public String getAuthUserName() {
return _authUsername;
}
@Override
public String getAuthSecret() {
return _authSecret;
}
@Override
public String getSourceDir() {
return _sourceDir;
}
@Override
public String getLocalPath() {
return _localPath;
}
@Override
public PhysicalDiskFormat getDefaultFormat() {
return PhysicalDiskFormat.RAW;
}
@Override
public KVMPhysicalDisk createPhysicalDisk(String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) {
return _storageAdaptor.createPhysicalDisk(name, this, format, provisioningType, size);
}
@Override
public KVMPhysicalDisk createPhysicalDisk(String name, Storage.ProvisioningType provisioningType, long size) {
return _storageAdaptor.createPhysicalDisk(name, this, null, provisioningType, size);
}
@Override
public boolean connectPhysicalDisk(String name, Map<String, String> details) {
return _storageAdaptor.connectPhysicalDisk(name, this, details);
}
@Override
public KVMPhysicalDisk getPhysicalDisk(String volumeUuid) {
return _storageAdaptor.getPhysicalDisk(volumeUuid, this);
}
@Override
public boolean disconnectPhysicalDisk(String volumeUuid) {
return _storageAdaptor.disconnectPhysicalDisk(volumeUuid, this);
}
@Override
public boolean deletePhysicalDisk(String volumeUuid, Storage.ImageFormat format) {
return _storageAdaptor.deletePhysicalDisk(volumeUuid, this, format);
}
@Override
public List<KVMPhysicalDisk> listPhysicalDisks() {
return _storageAdaptor.listPhysicalDisks(_uuid, this);
}
@Override
public boolean refresh() {
return _storageAdaptor.refresh(this);
}
@Override
public boolean delete() {
return _storageAdaptor.deleteStoragePool(this);
}
@Override
public boolean createFolder(String path) {
return _storageAdaptor.createFolder(_uuid, path);
}
@Override
public boolean isExternalSnapshot() {
return false;
}
public boolean supportsConfigDriveIso() {
return false;
}
}

View File

@ -0,0 +1,323 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.collector;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.inject.Inject;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.util.StorPoolHelper;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Logger;
import com.cloud.utils.component.ManagerBase;
import com.cloud.utils.concurrency.NamedThreadFactory;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.db.TransactionCallbackNoReturn;
import com.cloud.utils.db.TransactionLegacy;
import com.cloud.utils.db.TransactionStatus;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
public class StorPoolAbandonObjectsCollector extends ManagerBase implements Configurable {
private static Logger log = Logger.getLogger(StorPoolAbandonObjectsCollector.class);
@Inject
private PrimaryDataStoreDao storagePoolDao;
@Inject
private StoragePoolDetailsDao storagePoolDetailsDao;
private ScheduledExecutorService _volumeTagsUpdateExecutor;
private static final String ABANDON_LOG = "/var/log/cloudstack/management/storpool-abandoned-objects";
static final ConfigKey<Integer> volumeCheckupTagsInterval = new ConfigKey<Integer>("Advanced", Integer.class,
"storpool.volume.tags.checkup", "86400",
"Minimal interval (in seconds) to check and report if StorPool volume exists in CloudStack volumes database",
false);
static final ConfigKey<Integer> snapshotCheckupTagsInterval = new ConfigKey<Integer>("Advanced", Integer.class,
"storpool.snapshot.tags.checkup", "86400",
"Minimal interval (in seconds) to check and report if StorPool snapshot exists in CloudStack snapshots database",
false);
@Override
public String getConfigComponentName() {
return StorPoolAbandonObjectsCollector.class.getSimpleName();
}
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[] { volumeCheckupTagsInterval, snapshotCheckupTagsInterval };
}
@Override
public boolean start() {
init();
return true;
}
private void init() {
_volumeTagsUpdateExecutor = Executors.newScheduledThreadPool(2,
new NamedThreadFactory("StorPoolAbandonObjectsCollector"));
StorPoolHelper.appendLogger(log, ABANDON_LOG, "abandon");
if (volumeCheckupTagsInterval.value() > 0) {
_volumeTagsUpdateExecutor.scheduleAtFixedRate(new StorPoolVolumesTagsUpdate(),
volumeCheckupTagsInterval.value(), volumeCheckupTagsInterval.value(), TimeUnit.SECONDS);
}
if (snapshotCheckupTagsInterval.value() > 0) {
_volumeTagsUpdateExecutor.scheduleAtFixedRate(new StorPoolSnapshotsTagsUpdate(),
snapshotCheckupTagsInterval.value(), snapshotCheckupTagsInterval.value(), TimeUnit.SECONDS);
}
}
class StorPoolVolumesTagsUpdate extends ManagedContextRunnable {
@Override
@DB
protected void runInContext() {
List<StoragePoolVO> spPools = storagePoolDao.findPoolsByProvider(StorPoolUtil.SP_PROVIDER_NAME);
if (CollectionUtils.isEmpty(spPools)) {
return;
}
Map<String, String> volumes = new HashMap<>();
for (StoragePoolVO storagePoolVO : spPools) {
try {
JsonArray arr = StorPoolUtil.volumesList(StorPoolUtil.getSpConnection(storagePoolVO.getUuid(), storagePoolVO.getId(), storagePoolDetailsDao, storagePoolDao));
volumes.putAll(getStorPoolNamesAndCsTag(arr));
} catch (Exception e) {
log.debug(String.format("Could not collect abandon objects due to %s", e.getMessage()), e);
}
}
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) {
TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
PreparedStatement pstmt = txn.prepareAutoCloseStatement(
"CREATE TEMPORARY TABLE `cloud`.`volumes1`(`id` bigint unsigned NOT NULL auto_increment, `name` varchar(255) NOT NULL,`tag` varchar(255) NOT NULL, PRIMARY KEY (`id`))");
pstmt.executeUpdate();
pstmt = txn.prepareAutoCloseStatement(
"CREATE TEMPORARY TABLE `cloud`.`volumes_on_host1`(`id` bigint unsigned NOT NULL auto_increment, `name` varchar(255) NOT NULL,`tag` varchar(255) NOT NULL, PRIMARY KEY (`id`))");
pstmt.executeUpdate();
} catch (SQLException e) {
log.info(String.format("[ignored] SQL failed to delete vm work job: %s ",
e.getLocalizedMessage()));
} catch (Throwable e) {
log.info(String.format("[ignored] caught an error during delete vm work job: %s",
e.getLocalizedMessage()));
}
try {
PreparedStatement pstmt = txn.prepareStatement("INSERT INTO `cloud`.`volumes1` (name, tag) VALUES (?, ?)");
PreparedStatement volumesOnHostpstmt = txn.prepareStatement("INSERT INTO `cloud`.`volumes_on_host1` (name, tag) VALUES (?, ?)");
for (Map.Entry<String, String> volume : volumes.entrySet()) {
if (volume.getValue().equals("volume")) {
addRecordToDb(volume.getKey(), pstmt, volume.getValue(), true);
} else if (volume.getValue().equals("check-volume-is-on-host")) {
addRecordToDb(volume.getKey(), volumesOnHostpstmt, volume.getValue(), true);
}
}
pstmt.executeBatch();
volumesOnHostpstmt.executeBatch();
String sql = "SELECT f.* FROM `cloud`.`volumes1` f LEFT JOIN `cloud`.`volumes` v ON f.name=v.path where v.path is NULL OR NOT state=?";
findMissingRecordsInCS(txn, sql, "volume");
String sqlVolumeOnHost = "SELECT f.* FROM `cloud`.`volumes_on_host1` f LEFT JOIN `cloud`.`storage_pool_details` v ON f.name=v.value where v.value is NULL";
findMissingRecordsInCS(txn, sqlVolumeOnHost, "volumes_on_host");
} catch (SQLException e) {
log.info(String.format("[ignored] SQL failed due to: %s ",
e.getLocalizedMessage()));
} catch (Throwable e) {
log.info(String.format("[ignored] caught an error: %s",
e.getLocalizedMessage()));
} finally {
try {
PreparedStatement pstmt = txn.prepareStatement("DROP TABLE `cloud`.`volumes1`");
pstmt.executeUpdate();
pstmt = txn.prepareStatement("DROP TABLE `cloud`.`volumes_on_host1`");
pstmt.executeUpdate();
} catch (SQLException e) {
txn.close();
log.info(String.format("createTemporaryVolumeTable %s", e.getMessage()));
}
txn.close();
}
}
});
}
}
class StorPoolSnapshotsTagsUpdate extends ManagedContextRunnable {
@Override
@DB
protected void runInContext() {
List<StoragePoolVO> spPools = storagePoolDao.findPoolsByProvider(StorPoolUtil.SP_PROVIDER_NAME);
Map<String, String> snapshots = new HashMap<String, String>();
if (CollectionUtils.isEmpty(spPools)) {
return;
}
for (StoragePoolVO storagePoolVO : spPools) {
try {
JsonArray arr = StorPoolUtil.snapshotsList(StorPoolUtil.getSpConnection(storagePoolVO.getUuid(), storagePoolVO.getId(), storagePoolDetailsDao, storagePoolDao));
snapshots.putAll(getStorPoolNamesAndCsTag(arr));
} catch (Exception e) {
log.debug(String.format("Could not collect abandon objects due to %s", e.getMessage()));
}
}
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) {
TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try{
PreparedStatement pstmt = txn.prepareAutoCloseStatement(
"CREATE TEMPORARY TABLE `cloud`.`snapshots1`(`id` bigint unsigned NOT NULL auto_increment, `name` varchar(255) NOT NULL,`tag` varchar(255) NOT NULL, PRIMARY KEY (`id`))");
pstmt.executeUpdate();
pstmt = txn.prepareAutoCloseStatement(
"CREATE TEMPORARY TABLE `cloud`.`vm_snapshots1`(`id` bigint unsigned NOT NULL auto_increment, `name` varchar(255) NOT NULL,`tag` varchar(255) NOT NULL, PRIMARY KEY (`id`))");
pstmt.executeUpdate();
pstmt = txn.prepareAutoCloseStatement(
"CREATE TEMPORARY TABLE `cloud`.`vm_templates1`(`id` bigint unsigned NOT NULL auto_increment, `name` varchar(255) NOT NULL,`tag` varchar(255) NOT NULL, PRIMARY KEY (`id`))");
pstmt.executeUpdate();
} catch (SQLException e) {
log.info(String.format("[ignored] SQL failed to delete vm work job: %s ",
e.getLocalizedMessage()));
} catch (Throwable e) {
log.info(String.format("[ignored] caught an error during delete vm work job: %s",
e.getLocalizedMessage()));
}
try {
PreparedStatement snapshotsPstmt = txn.prepareStatement("INSERT INTO `cloud`.`snapshots1` (name, tag) VALUES (?, ?)");
PreparedStatement groupSnapshotsPstmt = txn.prepareStatement("INSERT INTO `cloud`.`vm_snapshots1` (name, tag) VALUES (?, ?)");
PreparedStatement templatePstmt = txn.prepareStatement("INSERT INTO `cloud`.`vm_templates1` (name, tag) VALUES (?, ?)");
for (Map.Entry<String, String> snapshot : snapshots.entrySet()) {
if (!snapshot.getValue().equals("group") && !snapshot.getValue().equals("template")) {
addRecordToDb(snapshot.getKey(), snapshotsPstmt, snapshot.getValue(), true);
} else if (snapshot.getValue().equals("group")) {
addRecordToDb(snapshot.getKey(), groupSnapshotsPstmt, snapshot.getValue(), true);
} else if (snapshot.getValue().equals("template")) {
addRecordToDb(snapshot.getKey(), templatePstmt, snapshot.getValue(), true);
}
}
snapshotsPstmt.executeBatch();
groupSnapshotsPstmt.executeBatch();
templatePstmt.executeBatch();
String sqlSnapshots = "SELECT f.* FROM `cloud`.`snapshots1` f LEFT JOIN `cloud`.`snapshot_details` v ON f.name=v.value where v.value is NULL";
findMissingRecordsInCS(txn, sqlSnapshots, "snapshot");
String sqlVmSnapshots = "SELECT f.* FROM `cloud`.`vm_snapshots1` f LEFT JOIN `cloud`.`vm_snapshot_details` v ON f.name=v.value where v.value is NULL";
findMissingRecordsInCS(txn, sqlVmSnapshots, "snapshot");
String sqlTemplates = "SELECT temp.*"
+ " FROM `cloud`.`vm_templates1` temp"
+ " LEFT JOIN `cloud`.`template_store_ref` store"
+ " ON temp.name=store.local_path"
+ " LEFT JOIN `cloud`.`template_spool_ref` spool"
+ " ON temp.name=spool.local_path"
+ " where store.local_path is NULL"
+ " and spool.local_path is NULL";
findMissingRecordsInCS(txn, sqlTemplates, "snapshot");
} catch (SQLException e) {
log.info(String.format("[ignored] SQL failed due to: %s ",
e.getLocalizedMessage()));
} catch (Throwable e) {
log.info(String.format("[ignored] caught an error: %s",
e.getLocalizedMessage()));
} finally {
try {
PreparedStatement pstmt = txn.prepareStatement("DROP TABLE `cloud`.`snapshots1`");
pstmt.executeUpdate();
pstmt = txn.prepareStatement("DROP TABLE `cloud`.`vm_snapshots1`");
pstmt.executeUpdate();
pstmt = txn.prepareStatement("DROP TABLE `cloud`.`vm_templates1`");
pstmt.executeUpdate();
} catch (SQLException e) {
txn.close();
log.info(String.format("createTemporaryVolumeTable %s", e.getMessage()));
}
txn.close();
}
}
});
}
}
private void addRecordToDb(String name, PreparedStatement pstmt, String tag, boolean pathNeeded)
throws SQLException {
name = name.startsWith("~") ? name.split("~")[1] : name;
pstmt.setString(1, pathNeeded ? StorPoolUtil.devPath(name) : name);
pstmt.setString(2, tag);
pstmt.addBatch();
}
private void findMissingRecordsInCS(TransactionLegacy txn, String sql, String object) throws SQLException {
ResultSet rs;
PreparedStatement pstmt2 = txn.prepareStatement(sql);
if (object.equals("volume")) {
pstmt2.setString(1, "Ready");
}
rs = pstmt2.executeQuery();
String name = null;
while (rs.next()) {
name = rs.getString(2);
log.info(String.format(
"CloudStack does not know about StorPool %s %s, it had to be a %s", object, name, rs.getString(3)));
}
}
private Map<String,String> getStorPoolNamesAndCsTag(JsonArray arr) {
Map<String, String> map = new HashMap<>();
for (int i = 0; i < arr.size(); i++) {
String name = arr.get(i).getAsJsonObject().get("name").getAsString();
String tag = null;
if (!name.startsWith("*") && !name.contains("@")) {
JsonObject tags = arr.get(i).getAsJsonObject().get("tags").getAsJsonObject();
if (tags != null && tags.getAsJsonPrimitive("cs") != null && !(arr.get(i).getAsJsonObject().get("deleted") != null && arr.get(i).getAsJsonObject().get("deleted").getAsBoolean())) {
tag = tags.getAsJsonPrimitive("cs").getAsString();
map.put(name, tag);
}
}
}
return map;
}
}

View File

@ -0,0 +1,976 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.datastore.driver;
import java.util.Map;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.storage.RemoteHostEndPoint;
import org.apache.cloudstack.storage.command.CommandResult;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.command.CreateObjectAnswer;
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
import org.apache.cloudstack.storage.datastore.util.StorPoolHelper;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc;
import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager;
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.storage.volume.VolumeObject;
import org.apache.log4j.Logger;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.storage.ResizeVolumeAnswer;
import com.cloud.agent.api.storage.StorPoolBackupSnapshotCommand;
import com.cloud.agent.api.storage.StorPoolBackupTemplateFromSnapshotCommand;
import com.cloud.agent.api.storage.StorPoolCopyVolumeToSecondaryCommand;
import com.cloud.agent.api.storage.StorPoolDownloadTemplateCommand;
import com.cloud.agent.api.storage.StorPoolDownloadVolumeCommand;
import com.cloud.agent.api.storage.StorPoolResizeVolumeCommand;
import com.cloud.agent.api.to.DataObjectType;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.StorageFilerTO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.host.Host;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
import com.cloud.server.ResourceTag;
import com.cloud.server.ResourceTag.ResourceObjectType;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.ResizeVolumePayload;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.VMTemplateDetailVO;
import com.cloud.storage.VMTemplateStoragePoolVO;
import com.cloud.storage.VolumeDetailVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.SnapshotDetailsDao;
import com.cloud.storage.dao.SnapshotDetailsVO;
import com.cloud.storage.dao.VMTemplateDetailsDao;
import com.cloud.storage.dao.VMTemplatePoolDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.dao.VolumeDetailsDao;
import com.cloud.tags.dao.ResourceTagDao;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachineManager;
import com.cloud.vm.dao.VMInstanceDao;
public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
private static final Logger log = Logger.getLogger(StorPoolPrimaryDataStoreDriver.class);
@Inject
private VolumeDao volumeDao;
@Inject
private StorageManager storageMgr;
@Inject
private PrimaryDataStoreDao primaryStoreDao;
@Inject
private EndPointSelector selector;
@Inject
private ConfigurationDao configDao;
@Inject
private TemplateDataStoreDao vmTemplateDataStoreDao;
@Inject
private VMInstanceDao vmInstanceDao;
@Inject
private ClusterDao clusterDao;
@Inject
private HostDao hostDao;
@Inject
private ResourceTagDao _resourceTagDao;
@Inject
private SnapshotDetailsDao _snapshotDetailsDao;
@Inject
private SnapshotDataStoreDao snapshotDataStoreDao;
@Inject
private VolumeDetailsDao volumeDetailsDao;
@Inject
private VMTemplateDetailsDao vmTemplateDetailsDao;
@Inject
private StoragePoolDetailsDao storagePoolDetailsDao;
@Inject
private VMTemplatePoolDao vmTemplatePoolDao;
@Override
public Map<String, String> getCapabilities() {
return null;
}
@Override
public DataTO getTO(DataObject data) {
return null;
}
@Override
public DataStoreTO getStoreTO(DataStore store) {
return null;
}
@Override
public long getUsedBytes(StoragePool storagePool) {
return 0;
}
@Override
public long getUsedIops(StoragePool storagePool) {
return 0;
}
@Override
public boolean grantAccess(DataObject data, Host host, DataStore dataStore) {
return false;
}
@Override
public void revokeAccess(DataObject data, Host host, DataStore dataStore) {
}
private void updateStoragePool(final long poolId, final long deltaUsedBytes) {
StoragePoolVO storagePool = primaryStoreDao.findById(poolId);
final long capacity = storagePool.getCapacityBytes();
final long used = storagePool.getUsedBytes() + deltaUsedBytes;
storagePool.setUsedBytes(used < 0 ? 0 : (used > capacity ? capacity : used));
primaryStoreDao.update(poolId, storagePool);
}
private String getVMInstanceUUID(Long id) {
return id != null ? vmInstanceDao.findById(id).getUuid() : null;
}
protected void _completeResponse(final CreateObjectAnswer answer, final String err, final AsyncCompletionCallback<CommandResult> callback)
{
final CreateCmdResult res = new CreateCmdResult(null, answer);
res.setResult(err);
callback.complete(res);
}
protected void completeResponse(final DataTO result, final AsyncCompletionCallback<CommandResult> callback)
{
_completeResponse(new CreateObjectAnswer(result), null, callback);
}
protected void completeResponse(final String err, final AsyncCompletionCallback<CommandResult> callback)
{
_completeResponse(new CreateObjectAnswer(err), err, callback);
}
@Override
public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool) {
return dataObject.getSize();
}
@Override
public long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool storagePool) {
return 0;
}
@Override
public ChapInfo getChapInfo(DataObject dataObject) {
return null;
}
@Override
public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
String path = null;
String err = null;
if (data.getType() == DataObjectType.VOLUME) {
try {
VolumeInfo vinfo = (VolumeInfo)data;
String name = vinfo.getUuid();
Long size = vinfo.getSize();
SpConnectionDesc conn = StorPoolUtil.getSpConnection(dataStore.getUuid(), dataStore.getId(), storagePoolDetailsDao, primaryStoreDao);
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriver.createAsync volume: name=%s, uuid=%s, isAttached=%s vm=%s, payload=%s, template: %s", vinfo.getName(), vinfo.getUuid(), vinfo.isAttachedVM(), vinfo.getAttachedVmName(), vinfo.getpayload(), conn.getTemplateName());
SpApiResponse resp = StorPoolUtil.volumeCreate(name, null, size, getVMInstanceUUID(vinfo.getInstanceId()), null, "volume", vinfo.getMaxIops(), conn);
if (resp.getError() == null) {
String volumeName = StorPoolUtil.getNameFromResponse(resp, false);
path = StorPoolUtil.devPath(volumeName);
VolumeVO volume = volumeDao.findById(vinfo.getId());
volume.setPoolId(dataStore.getId());
volume.setPoolType(StoragePoolType.SharedMountPoint);
volume.setPath(path);
volumeDao.update(volume.getId(), volume);
updateStoragePool(dataStore.getId(), size);
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriver.createAsync volume: name=%s, uuid=%s, isAttached=%s vm=%s, payload=%s, template: %s", volumeName, vinfo.getUuid(), vinfo.isAttachedVM(), vinfo.getAttachedVmName(), vinfo.getpayload(), conn.getTemplateName());
} else {
err = String.format("Could not create StorPool volume %s. Error: %s", name, resp.getError());
}
} catch (Exception e) {
err = String.format("Could not create volume due to %s", e.getMessage());
}
} else {
err = String.format("Invalid object type \"%s\" passed to createAsync", data.getType());
}
CreateCmdResult res = new CreateCmdResult(path, new Answer(null, err == null, err));
res.setResult(err);
if (callback != null) {
callback.complete(res);
}
}
@Override
public void resize(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
String path = null;
String err = null;
ResizeVolumeAnswer answer = null;
if (data.getType() == DataObjectType.VOLUME) {
VolumeObject vol = (VolumeObject)data;
StoragePool pool = (StoragePool)data.getDataStore();
ResizeVolumePayload payload = (ResizeVolumePayload)vol.getpayload();
final String name = StorPoolStorageAdaptor.getVolumeNameFromPath(vol.getPath(), true);
final long oldSize = vol.getSize();
Long oldMaxIops = vol.getMaxIops();
try {
SpConnectionDesc conn = StorPoolUtil.getSpConnection(data.getDataStore().getUuid(), data.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.resize: name=%s, uuid=%s, oldSize=%d, newSize=%s, shrinkOk=%s", name, vol.getUuid(), oldSize, payload.newSize, payload.shrinkOk);
SpApiResponse resp = StorPoolUtil.volumeUpdate(name, payload.newSize, payload.shrinkOk, payload.newMaxIops, conn);
if (resp.getError() != null) {
err = String.format("Could not resize StorPool volume %s. Error: %s", name, resp.getError());
} else {
StorPoolResizeVolumeCommand resizeCmd = new StorPoolResizeVolumeCommand(vol.getPath(), new StorageFilerTO(pool), vol.getSize(), payload.newSize, payload.shrinkOk,
payload.instanceName, payload.hosts == null ? false : true);
answer = (ResizeVolumeAnswer) storageMgr.sendToPool(pool, payload.hosts, resizeCmd);
if (answer == null || !answer.getResult()) {
err = answer != null ? answer.getDetails() : "return a null answer, resize failed for unknown reason";
} else {
path = StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false));
vol.setSize(payload.newSize);
vol.update();
if (payload.newMaxIops != null) {
VolumeVO volume = volumeDao.findById(vol.getId());
volume.setMaxIops(payload.newMaxIops);
volumeDao.update(volume.getId(), volume);
}
updateStoragePool(vol.getPoolId(), payload.newSize - oldSize);
}
}
if (err != null) {
// try restoring volume to its initial size
resp = StorPoolUtil.volumeUpdate(name, oldSize, true, oldMaxIops, conn);
if (resp.getError() != null) {
log.debug(String.format("Could not resize StorPool volume %s back to its original size. Error: %s", name, resp.getError()));
}
}
} catch (Exception e) {
log.debug("sending resize command failed", e);
err = e.toString();
}
} else {
err = String.format("Invalid object type \"%s\" passed to resize", data.getType());
}
CreateCmdResult res = new CreateCmdResult(path, answer);
res.setResult(err);
callback.complete(res);
}
@Override
public void deleteAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback<CommandResult> callback) {
String err = null;
if (data.getType() == DataObjectType.VOLUME) {
VolumeInfo vinfo = (VolumeInfo)data;
String name = StorPoolStorageAdaptor.getVolumeNameFromPath(vinfo.getPath(), true);
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriver.deleteAsync delete volume: name=%s, uuid=%s, isAttached=%s vm=%s, payload=%s dataStore=%s", name, vinfo.getUuid(), vinfo.isAttachedVM(), vinfo.getAttachedVmName(), vinfo.getpayload(), dataStore.getUuid());
if (name == null) {
name = vinfo.getUuid();
}
try {
SpConnectionDesc conn = StorPoolUtil.getSpConnection(dataStore.getUuid(), dataStore.getId(), storagePoolDetailsDao, primaryStoreDao);
SpApiResponse resp = StorPoolUtil.volumeDelete(name, conn);
if (resp.getError() == null) {
updateStoragePool(dataStore.getId(), - vinfo.getSize());
VolumeDetailVO detail = volumeDetailsDao.findDetail(vinfo.getId(), StorPoolUtil.SP_PROVIDER_NAME);
if (detail != null) {
volumeDetailsDao.remove(detail.getId());
}
} else {
if (!resp.getError().getName().equalsIgnoreCase("objectDoesNotExist")) {
err = String.format("Could not delete StorPool volume %s. Error: %s", name, resp.getError());
}
}
} catch (Exception e) {
err = String.format("Could not delete volume due to %s", e.getMessage());
}
} else {
err = String.format("Invalid DataObjectType \"%s\" passed to deleteAsync", data.getType());
}
if (err != null) {
log.error(err);
StorPoolUtil.spLog(err);
}
CommandResult res = new CommandResult();
res.setResult(err);
callback.complete(res);
}
private void logDataObject(final String pref, DataObject data) {
final DataStore dstore = data.getDataStore();
String name = null;
Long size = null;
if (data.getType() == DataObjectType.VOLUME) {
VolumeInfo vinfo = (VolumeInfo)data;
name = vinfo.getName();
size = vinfo.getSize();
} else if (data.getType() == DataObjectType.SNAPSHOT) {
SnapshotInfo sinfo = (SnapshotInfo)data;
name = sinfo.getName();
size = sinfo.getSize();
} else if (data.getType() == DataObjectType.TEMPLATE) {
TemplateInfo tinfo = (TemplateInfo)data;
name = tinfo.getName();
size = tinfo.getSize();
}
StorPoolUtil.spLog("%s: name=%s, size=%s, uuid=%s, type=%s, dstore=%s:%s:%s", pref, name, size, data.getUuid(), data.getType(), dstore.getUuid(), dstore.getName(), dstore.getRole());
}
@Override
public boolean canCopy(DataObject srcData, DataObject dstData) {
return true;
}
@Override
public void copyAsync(DataObject srcData, DataObject dstData, AsyncCompletionCallback<CopyCommandResult> callback) {
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.copyAsnc:");
logDataObject("SRC", srcData);
logDataObject("DST", dstData);
final DataObjectType srcType = srcData.getType();
final DataObjectType dstType = dstData.getType();
String err = null;
Answer answer = null;
StorageSubSystemCommand cmd = null;
try {
if (srcType == DataObjectType.SNAPSHOT && dstType == DataObjectType.VOLUME) {
SnapshotInfo sinfo = (SnapshotInfo)srcData;
final String snapshotName = StorPoolHelper.getSnapshotName(srcData.getId(), srcData.getUuid(), snapshotDataStoreDao, _snapshotDetailsDao);
VolumeInfo vinfo = (VolumeInfo)dstData;
final String volumeName = vinfo.getUuid();
final Long size = vinfo.getSize();
SpConnectionDesc conn = StorPoolUtil.getSpConnection(vinfo.getDataStore().getUuid(), vinfo.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
SpApiResponse resp = StorPoolUtil.volumeCreate(volumeName, snapshotName, size, null, null, "volume", sinfo.getBaseVolume().getMaxIops(), conn);
if (resp.getError() == null) {
updateStoragePool(dstData.getDataStore().getId(), size);
VolumeObjectTO to = (VolumeObjectTO)dstData.getTO();
to.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
to.setSize(size);
answer = new CopyCmdAnswer(to);
StorPoolUtil.spLog("Created volume=%s with uuid=%s from snapshot=%s with uuid=%s", StorPoolUtil.getNameFromResponse(resp, false), to.getUuid(), snapshotName, sinfo.getUuid());
} else if (resp.getError().getName().equals("objectDoesNotExist")) {
//check if snapshot is on secondary storage
StorPoolUtil.spLog("Snapshot %s does not exists on StorPool, will try to create a volume from a snopshot on secondary storage", snapshotName);
SnapshotDataStoreVO snap = snapshotDataStoreDao.findBySnapshot(sinfo.getId(), DataStoreRole.Image);
if (snap != null && StorPoolStorageAdaptor.getVolumeNameFromPath(snap.getInstallPath(), false) == null) {
resp = StorPoolUtil.volumeCreate(srcData.getUuid(), null, size, null, "no", "snapshot", sinfo.getBaseVolume().getMaxIops(), conn);
if (resp.getError() == null) {
VolumeObjectTO dstTO = (VolumeObjectTO) dstData.getTO();
dstTO.setSize(size);
dstTO.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
cmd = new StorPoolDownloadTemplateCommand(srcData.getTO(), dstTO, StorPoolHelper.getTimeout(StorPoolHelper.PrimaryStorageDownloadWait, configDao), VirtualMachineManager.ExecuteInSequence.value(), "volume");
EndPoint ep = selector.select(srcData, dstData);
if (ep == null) {
err = "No remote endpoint to send command, check if host or ssvm is down?";
} else {
answer = ep.sendMessage(cmd);
}
if (answer != null && answer.getResult()) {
SpApiResponse resp2 = StorPoolUtil.volumeFreeze(StorPoolUtil.getNameFromResponse(resp, true), conn);
if (resp2.getError() != null) {
err = String.format("Could not freeze Storpool volume %s. Error: %s", srcData.getUuid(), resp2.getError());
} else {
String name = StorPoolUtil.getNameFromResponse(resp, false);
SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(sinfo.getId(), sinfo.getUuid());
if (snapshotDetails != null) {
StorPoolHelper.updateSnapshotDetailsValue(snapshotDetails.getId(), StorPoolUtil.devPath(name), "snapshot");
}else {
StorPoolHelper.addSnapshotDetails(sinfo.getId(), sinfo.getUuid(), StorPoolUtil.devPath(name), _snapshotDetailsDao);
}
resp = StorPoolUtil.volumeCreate(volumeName, StorPoolUtil.getNameFromResponse(resp, true), size, null, null, "volume", sinfo.getBaseVolume().getMaxIops(), conn);
if (resp.getError() == null) {
updateStoragePool(dstData.getDataStore().getId(), size);
VolumeObjectTO to = (VolumeObjectTO) dstData.getTO();
to.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
to.setSize(size);
// successfully downloaded snapshot to primary storage
answer = new CopyCmdAnswer(to);
StorPoolUtil.spLog("Created volume=%s with uuid=%s from snapshot=%s with uuid=%s", name, to.getUuid(), snapshotName, sinfo.getUuid());
} else {
err = String.format("Could not create Storpool volume %s from snapshot %s. Error: %s", volumeName, snapshotName, resp.getError());
}
}
} else {
err = answer != null ? answer.getDetails() : "Unknown error while downloading template. Null answer returned.";
}
} else {
err = String.format("Could not create Storpool volume %s from snapshot %s. Error: %s", volumeName, snapshotName, resp.getError());
}
} else {
err = String.format("The snapshot %s does not exists neither on primary, neither on secondary storage. Cannot create volume from snapshot", snapshotName);
}
} else {
err = String.format("Could not create Storpool volume %s from snapshot %s. Error: %s", volumeName, snapshotName, resp.getError());
}
} else if (srcType == DataObjectType.SNAPSHOT && dstType == DataObjectType.SNAPSHOT) {
// bypass secondary storage
if (StorPoolConfigurationManager.BypassSecondaryStorage.value()) {
SnapshotObjectTO snapshot = (SnapshotObjectTO) srcData.getTO();
answer = new CopyCmdAnswer(snapshot);
} else {
// copy snapshot to secondary storage (backup snapshot)
cmd = new StorPoolBackupSnapshotCommand(srcData.getTO(), dstData.getTO(), StorPoolHelper.getTimeout(StorPoolHelper.BackupSnapshotWait, configDao), VirtualMachineManager.ExecuteInSequence.value());
final String snapName = StorPoolStorageAdaptor.getVolumeNameFromPath(((SnapshotInfo) srcData).getPath(), true);
SpConnectionDesc conn = StorPoolUtil.getSpConnection(srcData.getDataStore().getUuid(), srcData.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
try {
Long clusterId = StorPoolHelper.findClusterIdByGlobalId(snapName, clusterDao);
EndPoint ep = clusterId != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(StorPoolHelper.findHostByCluster(clusterId, hostDao)) : selector.select(srcData, dstData);
if (ep == null) {
err = "No remote endpoint to send command, check if host or ssvm is down?";
} else {
answer = ep.sendMessage(cmd);
// if error during snapshot backup, cleanup the StorPool snapshot
if (answer != null && !answer.getResult()) {
StorPoolUtil.spLog(String.format("Error while backing-up snapshot '%s' - cleaning up StorPool snapshot. Error: %s", snapName, answer.getDetails()));
SpApiResponse resp = StorPoolUtil.snapshotDelete(snapName, conn);
if (resp.getError() != null) {
final String err2 = String.format("Failed to cleanup StorPool snapshot '%s'. Error: %s.", snapName, resp.getError());
log.error(err2);
StorPoolUtil.spLog(err2);
}
}
}
} catch (CloudRuntimeException e) {
err = e.getMessage();
}
}
} else if (srcType == DataObjectType.VOLUME && dstType == DataObjectType.TEMPLATE) {
// create template from volume
VolumeObjectTO volume = (VolumeObjectTO) srcData.getTO();
TemplateObjectTO template = (TemplateObjectTO) dstData.getTO();
SpConnectionDesc conn = StorPoolUtil.getSpConnection(srcData.getDataStore().getUuid(), srcData.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
String volumeName = StorPoolStorageAdaptor.getVolumeNameFromPath(volume.getPath(), true);
cmd = new StorPoolBackupTemplateFromSnapshotCommand(volume, template,
StorPoolHelper.getTimeout(StorPoolHelper.PrimaryStorageDownloadWait, configDao), VirtualMachineManager.ExecuteInSequence.value());
try {
Long clusterId = StorPoolHelper.findClusterIdByGlobalId(volumeName, clusterDao);
EndPoint ep2 = clusterId != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(StorPoolHelper.findHostByCluster(clusterId, hostDao)) : selector.select(srcData, dstData);
if (ep2 == null) {
err = "No remote endpoint to send command, check if host or ssvm is down?";
} else {
answer = ep2.sendMessage(cmd);
if (answer != null && answer.getResult()) {
SpApiResponse resSnapshot = StorPoolUtil.volumeSnapshot(volumeName, template.getUuid(), null, "template", "no", conn);
if (resSnapshot.getError() != null) {
log.debug(String.format("Could not snapshot volume with ID=%s", volume.getId()));
StorPoolUtil.spLog("Volume snapshot failed with error=%s", resSnapshot.getError().getDescr());
err = resSnapshot.getError().getDescr();
}
else {
StorPoolHelper.updateVmStoreTemplate(template.getId(), template.getDataStore().getRole(), StorPoolUtil.devPath(StorPoolUtil.getSnapshotNameFromResponse(resSnapshot, false, StorPoolUtil.GLOBAL_ID)), vmTemplateDataStoreDao);
vmTemplateDetailsDao.persist(new VMTemplateDetailVO(template.getId(), StorPoolUtil.SP_STORAGE_POOL_ID, String.valueOf(srcData.getDataStore().getId()), false));
}
}else {
err = "Could not copy template to secondary " + answer.getResult();
}
}
}catch (CloudRuntimeException e) {
err = e.getMessage();
}
} else if (srcType == DataObjectType.TEMPLATE && dstType == DataObjectType.TEMPLATE) {
// copy template to primary storage
TemplateInfo tinfo = (TemplateInfo)dstData;
Long size = tinfo.getSize();
if(size == null || size == 0)
size = 1L*1024*1024*1024;
SpConnectionDesc conn = StorPoolUtil.getSpConnection(dstData.getDataStore().getUuid(), dstData.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
TemplateDataStoreVO templDataStoreVO = vmTemplateDataStoreDao.findByTemplate(tinfo.getId(), DataStoreRole.Image);
String snapshotName = (templDataStoreVO != null && templDataStoreVO.getLocalDownloadPath() != null)
? StorPoolStorageAdaptor.getVolumeNameFromPath(templDataStoreVO.getLocalDownloadPath(), true)
: null;
String name = tinfo.getUuid();
SpApiResponse resp = null;
if (snapshotName != null) {
//no need to copy volume from secondary, because we have it already on primary. Just need to create a child snapshot from it.
//The child snapshot is needed when configuration "storage.cleanup.enabled" is true, not to clean the base snapshot and to lose everything
resp = StorPoolUtil.volumeCreate(name, snapshotName, size, null, "no", "template", null, conn);
if (resp.getError() != null) {
err = String.format("Could not create Storpool volume for CS template %s. Error: %s", name, resp.getError());
} else {
String volumeNameToSnapshot = StorPoolUtil.getNameFromResponse(resp, true);
SpApiResponse resp2 = StorPoolUtil.volumeFreeze(volumeNameToSnapshot, conn);
if (resp2.getError() != null) {
err = String.format("Could not freeze Storpool volume %s. Error: %s", name, resp2.getError());
} else {
StorPoolUtil.spLog("Storpool snapshot [%s] for a template exists. Creating template on Storpool with name [%s]", tinfo.getUuid(), name);
TemplateObjectTO dstTO = (TemplateObjectTO) dstData.getTO();
dstTO.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
dstTO.setSize(size);
answer = new CopyCmdAnswer(dstTO);
}
}
} else {
resp = StorPoolUtil.volumeCreate(name, null, size, null, "no", "template", null, conn);
if (resp.getError() != null) {
err = String.format("Could not create Storpool volume for CS template %s. Error: %s", name, resp.getError());
} else {
TemplateObjectTO dstTO = (TemplateObjectTO)dstData.getTO();
dstTO.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
dstTO.setSize(size);
cmd = new StorPoolDownloadTemplateCommand(srcData.getTO(), dstTO, StorPoolHelper.getTimeout(StorPoolHelper.PrimaryStorageDownloadWait, configDao), VirtualMachineManager.ExecuteInSequence.value(), "volume");
EndPoint ep = selector.select(srcData, dstData);
if (ep == null) {
err = "No remote endpoint to send command, check if host or ssvm is down?";
} else {
answer = ep.sendMessage(cmd);
}
if (answer != null && answer.getResult()) {
// successfully downloaded template to primary storage
SpApiResponse resp2 = StorPoolUtil.volumeFreeze(StorPoolUtil.getNameFromResponse(resp, true), conn);
if (resp2.getError() != null) {
err = String.format("Could not freeze Storpool volume %s. Error: %s", name, resp2.getError());
}
} else {
err = answer != null ? answer.getDetails() : "Unknown error while downloading template. Null answer returned.";
}
}
}
if (err != null) {
resp = StorPoolUtil.volumeDelete(StorPoolUtil.getNameFromResponse(resp, true), conn);
if (resp.getError() != null) {
log.warn(String.format("Could not clean-up Storpool volume %s. Error: %s", name, resp.getError()));
}
}
} else if (srcType == DataObjectType.TEMPLATE && dstType == DataObjectType.VOLUME) {
// create volume from template on Storpool PRIMARY
TemplateInfo tinfo = (TemplateInfo)srcData;
VolumeInfo vinfo = (VolumeInfo)dstData;
VMTemplateStoragePoolVO templStoragePoolVO = StorPoolHelper.findByPoolTemplate(vinfo.getPoolId(), tinfo.getId());
final String parentName = templStoragePoolVO.getLocalDownloadPath() !=null ? StorPoolStorageAdaptor.getVolumeNameFromPath(templStoragePoolVO.getLocalDownloadPath(), true) : StorPoolStorageAdaptor.getVolumeNameFromPath(templStoragePoolVO.getInstallPath(), true);
final String name = vinfo.getUuid();
SpConnectionDesc conn = StorPoolUtil.getSpConnection(vinfo.getDataStore().getUuid(), vinfo.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
Long snapshotSize = StorPoolUtil.snapshotSize(parentName, conn);
if (snapshotSize == null) {
err = String.format("Snapshot=%s does not exist on StorPool. Will recreate it first on primary", parentName);
vmTemplatePoolDao.remove(templStoragePoolVO.getId());
}
if (err == null) {
long size = vinfo.getSize();
if( size < snapshotSize )
{
StorPoolUtil.spLog(String.format("provided size is too small for snapshot. Provided %d, snapshot %d. Using snapshot size", size, snapshotSize));
size = snapshotSize;
}
StorPoolUtil.spLog(String.format("volume size is: %d", size));
Long vmId = vinfo.getInstanceId();
SpApiResponse resp = StorPoolUtil.volumeCreate(name, parentName, size, getVMInstanceUUID(vmId),
getVcPolicyTag(vmId), "volume", vinfo.getMaxIops(), conn);
if (resp.getError() == null) {
updateStoragePool(dstData.getDataStore().getId(), vinfo.getSize());
VolumeObjectTO to = (VolumeObjectTO) vinfo.getTO();
to.setSize(vinfo.getSize());
to.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
answer = new CopyCmdAnswer(to);
} else {
err = String.format("Could not create Storpool volume %s. Error: %s", name, resp.getError());
}
}
} else if (srcType == DataObjectType.VOLUME && dstType == DataObjectType.VOLUME) {
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriver.copyAsync src Data Store=%s", srcData.getDataStore().getDriver());
VolumeInfo dstInfo = (VolumeInfo)dstData;
VolumeInfo srcInfo = (VolumeInfo) srcData;
if( !(srcData.getDataStore().getDriver() instanceof StorPoolPrimaryDataStoreDriver ) ) {
// copy "VOLUME" to primary storage
String name = dstInfo.getUuid();
Long size = dstInfo.getSize();
if(size == null || size == 0)
size = 1L*1024*1024*1024;
SpConnectionDesc conn = StorPoolUtil.getSpConnection(dstData.getDataStore().getUuid(), dstData.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
Long vmId = srcInfo.getInstanceId();
SpApiResponse resp = StorPoolUtil.volumeCreate(name, null, size, getVMInstanceUUID(vmId), getVcPolicyTag(vmId), "volume", dstInfo.getMaxIops(), conn);
if (resp.getError() != null) {
err = String.format("Could not create Storpool volume for CS template %s. Error: %s", name, resp.getError());
} else {
//updateVolume(dstData.getId());
VolumeObjectTO dstTO = (VolumeObjectTO)dstData.getTO();
dstTO.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
dstTO.setSize(size);
cmd = new StorPoolDownloadVolumeCommand(srcData.getTO(), dstTO, StorPoolHelper.getTimeout(StorPoolHelper.PrimaryStorageDownloadWait, configDao), VirtualMachineManager.ExecuteInSequence.value());
EndPoint ep = selector.select(srcData, dstData);
if( ep == null) {
StorPoolUtil.spLog("select(srcData, dstData) returned NULL. trying srcOnly");
ep = selector.select(srcData); // Storpool is zone
}
if (ep == null) {
err = "No remote endpoint to send command, check if host or ssvm is down?";
} else {
StorPoolUtil.spLog("Sending command to %s", ep.getHostAddr());
answer = ep.sendMessage(cmd);
if (answer != null && answer.getResult()) {
// successfully downloaded volume to primary storage
} else {
err = answer != null ? answer.getDetails() : "Unknown error while downloading volume. Null answer returned.";
}
}
if (err != null) {
SpApiResponse resp3 = StorPoolUtil.volumeDelete(name, conn);
if (resp3.getError() != null) {
log.warn(String.format("Could not clean-up Storpool volume %s. Error: %s", name, resp3.getError()));
}
}
}
} else {
// download volume - first copies to secondary
VolumeObjectTO srcTO = (VolumeObjectTO)srcData.getTO();
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.copyAsnc SRC path=%s ", srcTO.getPath());
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.copyAsnc DST canonicalName=%s ", dstData.getDataStore().getClass().getCanonicalName());
PrimaryDataStoreTO checkStoragePool = dstData.getTO().getDataStore() instanceof PrimaryDataStoreTO ? (PrimaryDataStoreTO)dstData.getTO().getDataStore() : null;
final String name = StorPoolStorageAdaptor.getVolumeNameFromPath(srcTO.getPath(), true);
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.copyAsnc DST tmpSnapName=%s ,srcUUID=%s", name, srcTO.getUuid());
if (checkStoragePool != null && checkStoragePool.getPoolType().equals(StoragePoolType.SharedMountPoint)) {
SpConnectionDesc conn = StorPoolUtil.getSpConnection(dstData.getDataStore().getUuid(), dstData.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
String baseOn = StorPoolStorageAdaptor.getVolumeNameFromPath(srcTO.getPath(), true);
//uuid tag will be the same as srcData.uuid
String volumeName = srcData.getUuid();
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.copyAsnc volumeName=%s, baseOn=%s", volumeName, baseOn);
final SpApiResponse response = StorPoolUtil.volumeCopy(volumeName, baseOn, "volume", srcInfo.getMaxIops(), conn);
srcTO.setSize(srcData.getSize());
srcTO.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(response, false)));
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.copyAsnc DST to=%s", srcTO);
answer = new CopyCmdAnswer(srcTO);
} else {
SpConnectionDesc conn = StorPoolUtil.getSpConnection(srcData.getDataStore().getUuid(), srcData.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
final SpApiResponse resp = StorPoolUtil.volumeSnapshot(name, srcTO.getUuid(), srcInfo.getInstanceId() != null ? getVMInstanceUUID(srcInfo.getInstanceId()) : null, "temporary", null, conn);
String snapshotName = StorPoolUtil.getSnapshotNameFromResponse(resp, true, StorPoolUtil.GLOBAL_ID);
if (resp.getError() == null) {
srcTO.setPath(StorPoolUtil.devPath(
StorPoolUtil.getSnapshotNameFromResponse(resp, false, StorPoolUtil.GLOBAL_ID)));
cmd = new StorPoolCopyVolumeToSecondaryCommand(srcTO, dstData.getTO(), StorPoolHelper.getTimeout(StorPoolHelper.CopyVolumeWait, configDao), VirtualMachineManager.ExecuteInSequence.value());
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.copyAsnc command=%s ", cmd);
try {
Long clusterId = StorPoolHelper.findClusterIdByGlobalId(snapshotName, clusterDao);
EndPoint ep = clusterId != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(StorPoolHelper.findHostByCluster(clusterId, hostDao)) : selector.select(srcData, dstData);
StorPoolUtil.spLog("selector.select(srcData, dstData) ", ep);
if (ep == null) {
ep = selector.select(dstData);
StorPoolUtil.spLog("selector.select(srcData) ", ep);
}
if (ep == null) {
err = "No remote endpoint to send command, check if host or ssvm is down?";
} else {
answer = ep.sendMessage(cmd);
StorPoolUtil.spLog("Answer: details=%s, result=%s", answer.getDetails(), answer.getResult());
}
} catch (CloudRuntimeException e) {
err = e.getMessage();
}
} else {
err = String.format("Failed to create temporary StorPool snapshot while trying to download volume %s (uuid %s). Error: %s", srcTO.getName(), srcTO.getUuid(), resp.getError());
}
final SpApiResponse resp2 = StorPoolUtil.snapshotDelete(snapshotName, conn);
if (resp2.getError() != null) {
final String err2 = String.format("Failed to delete temporary StorPool snapshot %s. Error: %s", StorPoolUtil.getNameFromResponse(resp, true), resp2.getError());
log.error(err2);
StorPoolUtil.spLog(err2);
}
}
}
} else {
err = String.format("Unsupported copy operation from %s (type %s) to %s (type %s)", srcData.getUuid(), srcType, dstData.getUuid(), dstType);
}
} catch (Exception e) {
StorPoolUtil.spLog("Caught exception: %s", e.toString());
err = e.toString();
}
if (answer != null && !answer.getResult()) {
err = answer.getDetails();
}
if (err != null) {
StorPoolUtil.spLog("Failed due to %s", err);
log.error(err);
answer = new Answer(cmd, false, err);
}
CopyCommandResult res = new CopyCommandResult(null, answer);
res.setResult(err);
callback.complete(res);
}
@Override
public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CreateCmdResult> callback) {
String snapshotName = snapshot.getUuid();
VolumeInfo vinfo = snapshot.getBaseVolume();
String volumeName = StorPoolStorageAdaptor.getVolumeNameFromPath(vinfo.getPath(), true);
Long vmId = vinfo.getInstanceId();
if (volumeName != null) {
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriver.takeSnapshot volumename=%s vmInstance=%s",volumeName, vmId);
} else {
throw new UnsupportedOperationException("The path should be: " + StorPoolUtil.SP_DEV_PATH);
}
CreateObjectAnswer answer = null;
String err = null;
try {
SpConnectionDesc conn = StorPoolUtil.getSpConnection(vinfo.getDataStore().getUuid(), vinfo.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
SpApiResponse resp = StorPoolUtil.volumeSnapshot(volumeName, snapshotName, vmId != null ? getVMInstanceUUID(vmId) : null, "snapshot", null, conn);
if (resp.getError() != null) {
err = String.format("Could not snapshot StorPool volume %s. Error %s", volumeName, resp.getError());
answer = new CreateObjectAnswer(err);
} else {
String name = StorPoolUtil.getSnapshotNameFromResponse(resp, true, StorPoolUtil.GLOBAL_ID);
SnapshotObjectTO snapTo = (SnapshotObjectTO)snapshot.getTO();
snapTo.setPath(StorPoolUtil.devPath(name.split("~")[1]));
answer = new CreateObjectAnswer(snapTo);
StorPoolHelper.addSnapshotDetails(snapshot.getId(), snapshot.getUuid(), snapTo.getPath(), _snapshotDetailsDao);
//add primary storage of snapshot
StorPoolHelper.addSnapshotDetails(snapshot.getId(), StorPoolUtil.SP_STORAGE_POOL_ID, String.valueOf(snapshot.getDataStore().getId()), _snapshotDetailsDao);
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.takeSnapshot: snapshot: name=%s, uuid=%s, volume: name=%s, uuid=%s", name, snapshot.getUuid(), volumeName, vinfo.getUuid());
}
} catch (Exception e) {
err = String.format("Could not take volume snapshot due to %s", e.getMessage());
}
CreateCmdResult res = new CreateCmdResult(null, answer);
res.setResult(err);
callback.complete(res);
}
@Override
public void revertSnapshot(final SnapshotInfo snapshot, final SnapshotInfo snapshotOnPrimaryStore, final AsyncCompletionCallback<CommandResult> callback) {
final VolumeInfo vinfo = snapshot.getBaseVolume();
final String snapshotName = StorPoolHelper.getSnapshotName(snapshot.getId(), snapshot.getUuid(), snapshotDataStoreDao, _snapshotDetailsDao);
final String volumeName = StorPoolStorageAdaptor.getVolumeNameFromPath(vinfo.getPath(), true);
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.revertSnapshot: snapshot: name=%s, uuid=%s, volume: name=%s, uuid=%s", snapshotName, snapshot.getUuid(), volumeName, vinfo.getUuid());
String err = null;
SpConnectionDesc conn = null;
try {
conn = StorPoolUtil.getSpConnection(vinfo.getDataStore().getUuid(), vinfo.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
} catch (Exception e) {
err = String.format("Could not revert volume due to %s", e.getMessage());
completeResponse(err, callback);
return;
}
VolumeDetailVO detail = volumeDetailsDao.findDetail(vinfo.getId(), StorPoolUtil.SP_PROVIDER_NAME);
if (detail != null) {
//Rename volume to its global id only if it was migrated from UUID to global id
SpApiResponse updateVolumeResponse = StorPoolUtil.volumeUpdateRename(StorPoolStorageAdaptor.getVolumeNameFromPath(vinfo.getPath(), true), "", StorPoolStorageAdaptor.getVolumeNameFromPath(detail.getValue(), false), conn);
if (updateVolumeResponse.getError() != null) {
StorPoolUtil.spLog("Could not update StorPool's volume %s to it's globalId due to %s", StorPoolStorageAdaptor.getVolumeNameFromPath(vinfo.getPath(), true), updateVolumeResponse.getError().getDescr());
err = String.format("Could not update StorPool's volume %s to it's globalId due to %s", StorPoolStorageAdaptor.getVolumeNameFromPath(vinfo.getPath(), true), updateVolumeResponse.getError().getDescr());
completeResponse(err, callback);
return;
}
volumeDetailsDao.remove(detail.getId());
}
SpApiResponse resp = StorPoolUtil.detachAllForced(volumeName, false, conn);
if (resp.getError() != null) {
err = String.format("Could not detach StorPool volume %s due to %s", volumeName, resp.getError());
completeResponse(err, callback);
return;
}
SpApiResponse response = StorPoolUtil.volumeRevert(volumeName, snapshotName, conn);
if (response.getError() != null) {
err = String.format(
"Could not revert StorPool volume %s to the %s snapshot: could not create the new volume: error %s",
volumeName, snapshotName, response.getError());
completeResponse(err, callback);
return;
}
if (vinfo.getMaxIops() != null) {
response = StorPoolUtil.volumeUpadateTags(volumeName, null, vinfo.getMaxIops(), conn, null);
if (response.getError() != null) {
StorPoolUtil.spLog("Volume was reverted successfully but max iops could not be set due to %s", response.getError().getDescr());
}
}
final VolumeObjectTO to = (VolumeObjectTO)vinfo.getTO();
completeResponse(to, callback);
}
private String getVcPolicyTag(Long vmId) {
ResourceTag resourceTag = vmId != null ? _resourceTagDao.findByKey(vmId, ResourceObjectType.UserVm, StorPoolUtil.SP_VC_POLICY) : null;
return resourceTag != null ? resourceTag.getValue() : "";
}
public void handleQualityOfServiceForVolumeMigration(VolumeInfo arg0, QualityOfServiceState arg1) {
StorPoolUtil.spLog("handleQualityOfServiceForVolumeMigration with volume name=%s", arg0.getName());
}
public void copyAsync(DataObject srcData, DataObject destData, Host destHost,
AsyncCompletionCallback<CopyCommandResult> callback) {
copyAsync(srcData, destData, callback);
}
public boolean canProvideStorageStats() {
return false;
}
public Pair<Long, Long> getStorageStats(StoragePool storagePool) {
return null;
}
public boolean canProvideVolumeStats() {
return false;
}
public Pair<Long, Long> getVolumeStats(StoragePool storagePool, String volumeId) {
return null;
}
public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
return false;
}
@Override
public boolean isVmInfoNeeded() {
return true;
}
@Override
public void provideVmInfo(long vmId, long volumeId) {
VolumeVO volume = volumeDao.findById(volumeId);
StoragePoolVO poolVO = primaryStoreDao.findById(volume.getPoolId());
if (poolVO != null) {
try {
SpConnectionDesc conn = StorPoolUtil.getSpConnection(poolVO.getUuid(), poolVO.getId(), storagePoolDetailsDao, primaryStoreDao);
String volName = StorPoolStorageAdaptor.getVolumeNameFromPath(volume.getPath(), true);
VMInstanceVO userVM = vmInstanceDao.findById(vmId);
SpApiResponse resp = StorPoolUtil.volumeUpadateTags(volName, volume.getInstanceId() != null ? userVM.getUuid() : "", null, conn, getVcPolicyTag(vmId));
if (resp.getError() != null) {
log.warn(String.format("Could not update VC policy tags of a volume with id [%s]", volume.getUuid()));
}
} catch (Exception e) {
log.warn(String.format("Could not update Virtual machine tags due to %s", e.getMessage()));
}
}
}
@Override
public boolean isVmTagsNeeded(String tagKey) {
return tagKey != null && tagKey.equals(StorPoolUtil.SP_VC_POLICY);
}
@Override
public void provideVmTags(long vmId, long volumeId, String tagValue) {
VolumeVO volume = volumeDao.findById(volumeId);
StoragePoolVO poolVO = primaryStoreDao.findById(volume.getPoolId());
if (poolVO != null) {
try {
SpConnectionDesc conn = StorPoolUtil.getSpConnection(poolVO.getUuid(), poolVO.getId(), storagePoolDetailsDao, primaryStoreDao);
String volName = StorPoolStorageAdaptor.getVolumeNameFromPath(volume.getPath(), true);
SpApiResponse resp = StorPoolUtil.volumeUpadateVCTags(volName, conn, getVcPolicyTag(vmId));
if (resp.getError() != null) {
log.warn(String.format("Could not update VC policy tags of a volume with id [%s]", volume.getUuid()));
}
} catch (Exception e) {
log.warn(String.format("Could not update Virtual machine tags due to %s", e.getMessage()));
}
}
}
}

View File

@ -0,0 +1,321 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.datastore.lifecycle;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.log4j.Logger;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.host.HostVO;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
import com.cloud.resource.ResourceManager;
import com.cloud.storage.ScopeType;
import com.cloud.storage.SnapshotVO;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolAutomation;
import com.cloud.storage.VMTemplateDetailVO;
import com.cloud.storage.VMTemplateStoragePoolVO;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotDetailsDao;
import com.cloud.storage.dao.SnapshotDetailsVO;
import com.cloud.storage.dao.VMTemplateDetailsDao;
import com.cloud.storage.dao.VMTemplatePoolDao;
import com.cloud.utils.exception.CloudRuntimeException;
public class StorPoolPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
private static final Logger log = Logger.getLogger(StorPoolPrimaryDataStoreLifeCycle.class);
@Inject
protected PrimaryDataStoreHelper dataStoreHelper;
@Inject
protected StoragePoolAutomation storagePoolAutmation;
@Inject
private PrimaryDataStoreDao _primaryDataStoreDao;
@Inject
private ResourceManager resourceMgr;
@Inject
private StorageManager storageMgr;
@Inject
private SnapshotDao snapshotDao;
@Inject
private SnapshotDetailsDao snapshotDetailsDao;
@Inject
private VMTemplatePoolDao vmTemplatePoolDao;
@Inject
private VMTemplateDetailsDao vmTemplateDetailsDao;
@Inject
private StoragePoolDetailsDao storagePoolDetailsDao;
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
StorPoolUtil.spLog("initialize:");
for (Map.Entry<String, Object> e: dsInfos.entrySet()) {
StorPoolUtil.spLog(" %s=%s", e.getKey(), e.getValue());
}
StorPoolUtil.spLog("");
log.debug("initialize");
String name = (String)dsInfos.get("name");
String providerName = (String)dsInfos.get("providerName");
Long zoneId = (Long)dsInfos.get("zoneId");
String url = (String)dsInfos.get("url");
SpConnectionDesc conn = new SpConnectionDesc(url);
if (conn.getHostPort() == null)
throw new IllegalArgumentException("No SP_API_HTTP");
if (conn.getAuthToken() == null)
throw new IllegalArgumentException("No SP_AUTH_TOKEN");
if (conn.getTemplateName() == null)
throw new IllegalArgumentException("No SP_TEMPLATE");
if (!StorPoolUtil.templateExists(conn)) {
throw new IllegalArgumentException("No such storpool template " + conn.getTemplateName() + " or credentials are invalid");
}
for (StoragePoolVO sp : _primaryDataStoreDao.findPoolsByProvider("StorPool")) {
List<StoragePoolDetailVO> spDetails = storagePoolDetailsDao.listDetails(sp.getId());
String host = null;
String template = null;
String authToken = null;
SpConnectionDesc old = null;
for (StoragePoolDetailVO storagePoolDetailVO : spDetails) {
switch (storagePoolDetailVO.getName()) {
case StorPoolUtil.SP_AUTH_TOKEN:
authToken = storagePoolDetailVO.getValue();
break;
case StorPoolUtil.SP_HOST_PORT:
host = storagePoolDetailVO.getValue();
break;
case StorPoolUtil.SP_TEMPLATE:
template = storagePoolDetailVO.getValue();
break;
default:
break;
}
}
if (host != null && template != null && authToken != null) {
old = new SpConnectionDesc(host, authToken, template);
} else {
old = new SpConnectionDesc(sp.getUuid());
}
if( old.getHostPort().equals(conn.getHostPort()) && old.getTemplateName().equals(conn.getTemplateName()) )
throw new IllegalArgumentException("StorPool cluster and template already in use by pool " + sp.getName());
}
Long capacityBytes = (Long)dsInfos.get("capacityBytes");
if (capacityBytes == null) {
throw new IllegalArgumentException("Capcity bytes is required");
}
String tags = (String)dsInfos.get("tags");
if (tags == null || tags.isEmpty()) {
tags = name;
}
@SuppressWarnings("unchecked")
Map<String, String> details = (Map<String, String>)dsInfos.get("details");
details.put(StorPoolUtil.SP_AUTH_TOKEN, conn.getAuthToken());
details.put(StorPoolUtil.SP_HOST_PORT, conn.getHostPort());
details.put(StorPoolUtil.SP_TEMPLATE, conn.getTemplateName());
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
parameters.setName(name);
parameters.setUuid(conn.getTemplateName() + ";" + UUID.randomUUID().toString());
parameters.setZoneId(zoneId);
parameters.setProviderName(providerName);
parameters.setType(StoragePoolType.SharedMountPoint);
parameters.setHypervisorType(HypervisorType.KVM);
parameters.setManaged(false);
parameters.setHost("n/a");
parameters.setPort(0);
parameters.setPath(StorPoolUtil.SP_DEV_PATH);
parameters.setUsedBytes(0);
parameters.setCapacityBytes(capacityBytes);
parameters.setTags(tags);
parameters.setDetails(details);
return dataStoreHelper.createPrimaryDataStore(parameters);
}
@Override
public void updateStoragePool(StoragePool storagePool, Map<String, String> details) {
StorPoolUtil.spLog("updateStoragePool:");
for (Map.Entry<String, String> e: details.entrySet()) {
StorPoolUtil.spLog(" %s=%s", e.getKey(), e.getValue());
}
StorPoolUtil.spLog("");
log.debug("updateStoragePool");
return;
}
@Override
public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
log.debug("attachHost");
return true;
}
@Override
public boolean attachCluster(DataStore store, ClusterScope scope) {
log.debug("attachCluster");
if (!scope.getScopeType().equals(ScopeType.ZONE)) {
throw new UnsupportedOperationException("Only Zone-Wide scope is supported!");
}
return true;
}
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
log.debug("attachZone");
if (hypervisorType != HypervisorType.KVM) {
throw new UnsupportedOperationException("Only KVM hypervisors supported!");
}
List<HostVO> kvmHosts = resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId());
for (HostVO host : kvmHosts) {
try {
storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId());
} catch (Exception e) {
log.warn(String.format("Unable to establish a connection between host %s and pool %s due to %s", host, dataStore, e));
}
}
dataStoreHelper.attachZone(dataStore, hypervisorType);
return true;
}
@Override
public boolean maintain(DataStore dataStore) {
log.debug("maintain");
storagePoolAutmation.maintain(dataStore);
dataStoreHelper.maintain(dataStore);
return true;
}
@Override
public boolean cancelMaintain(DataStore store) {
log.debug("cancelMaintain");
dataStoreHelper.cancelMaintain(store);
storagePoolAutmation.cancelMaintain(store);
return true;
}
@Override
public boolean deleteDataStore(DataStore store) {
log.debug("deleteDataStore");
long storagePoolId = store.getId();
List<SnapshotVO> lstSnapshots = snapshotDao.listAll();
if (lstSnapshots != null) {
for (SnapshotVO snapshot : lstSnapshots) {
SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshot.getId(), StorPoolUtil.SP_STORAGE_POOL_ID);
// if this snapshot belongs to the storagePool that was passed in
if (snapshotDetails != null && snapshotDetails.getValue() != null && Long.parseLong(snapshotDetails.getValue()) == storagePoolId) {
throw new CloudRuntimeException("This primary storage cannot be deleted because it currently contains one or more snapshots.");
}
}
}
List<VMTemplateDetailVO> lstTemplateDetails = vmTemplateDetailsDao.listAll();
if (lstTemplateDetails != null) {
for (VMTemplateDetailVO vmTemplateDetailVO : lstTemplateDetails) {
if (vmTemplateDetailVO.getName().equals(StorPoolUtil.SP_STORAGE_POOL_ID) && Long.parseLong(vmTemplateDetailVO.getValue()) == storagePoolId) {
throw new CloudRuntimeException("This primary storage cannot be deleted because it currently contains one or more template snapshots.");
}
}
}
List<VMTemplateStoragePoolVO> lstTemplatePoolRefs = vmTemplatePoolDao.listByPoolId(storagePoolId);
SpConnectionDesc conn = null;
try {
conn = StorPoolUtil.getSpConnection(store.getUuid(), store.getId(), storagePoolDetailsDao, _primaryDataStoreDao);
} catch (CloudRuntimeException e) {
throw e;
}
if (lstTemplatePoolRefs != null) {
for (VMTemplateStoragePoolVO templatePoolRef : lstTemplatePoolRefs) {
SpApiResponse resp = StorPoolUtil.snapshotDelete(
StorPoolStorageAdaptor.getVolumeNameFromPath(templatePoolRef.getLocalDownloadPath(), true), conn);
if (resp.getError() != null) {
throw new CloudRuntimeException(String.format("Could not delete StorPool's snapshot from template_spool_ref table due to %s", resp.getError()));
}
vmTemplatePoolDao.remove(templatePoolRef.getId());
}
}
boolean isDeleted = dataStoreHelper.deletePrimaryDataStore(store);
if (isDeleted) {
List<StoragePoolDetailVO> volumesOnHosts = storagePoolDetailsDao.listDetails(storagePoolId);
for (StoragePoolDetailVO storagePoolDetailVO : volumesOnHosts) {
if (storagePoolDetailVO.getValue() != null && storagePoolDetailVO.getName().contains(StorPoolUtil.SP_VOLUME_ON_CLUSTER)) {
StorPoolUtil.volumeDelete(StorPoolStorageAdaptor.getVolumeNameFromPath(storagePoolDetailVO.getValue(), true), conn);
}
}
storagePoolDetailsDao.removeDetails(storagePoolId);
}
return isDeleted;
}
@Override
public boolean migrateToObjectStore(DataStore store) {
log.debug("migrateToObjectStore");
return false;
}
@Override
public void enableStoragePool(DataStore dataStore) {
log.debug("enableStoragePool");
dataStoreHelper.enable(dataStore);
}
@Override
public void disableStoragePool(DataStore dataStore) {
log.debug("disableStoragePool");
dataStoreHelper.disable(dataStore);
}
}

View File

@ -0,0 +1,234 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.datastore.provider;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.util.StorPoolFeaturesAndFixes;
import org.apache.cloudstack.storage.datastore.util.StorPoolHelper;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc;
import org.apache.log4j.Logger;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.storage.StorPoolModifyStoragePoolAnswer;
import com.cloud.agent.api.storage.StorPoolModifyStoragePoolCommand;
import com.cloud.agent.manager.AgentAttache;
import com.cloud.alert.AlertManager;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.exception.StorageConflictException;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.exception.CloudRuntimeException;
public class StorPoolHostListener implements HypervisorHostListener {
private static final Logger log = Logger.getLogger(StorPoolHostListener .class);
@Inject
private AgentManager agentMgr;
@Inject
private DataStoreManager dataStoreMgr;
@Inject
private AlertManager alertMgr;
@Inject
private StoragePoolHostDao storagePoolHostDao;
@Inject
private PrimaryDataStoreDao primaryStoreDao;
@Inject
private HostDao hostDao;
@Inject
private ClusterDao clusterDao;
@Inject
private ClusterDetailsDao clusterDetailsDao;
@Inject
private StoragePoolDetailsDao storagePoolDetailsDao;
@Override
public boolean hostConnect(long hostId, long poolId) throws StorageConflictException {
//Will update storage pool's connection details if they aren't updated in DB, before connecting pool to host
StoragePoolVO poolVO = primaryStoreDao.findById(poolId);
SpConnectionDesc conn = null;
try {
conn = StorPoolUtil.getSpConnection(poolVO.getUuid(), poolId, storagePoolDetailsDao, primaryStoreDao);
} catch (Exception e) {
return false;
}
StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
HostVO host = hostDao.findById(hostId);
StoragePoolDetailVO volumeOnPool = verifyVolumeIsOnCluster(poolId, conn, host.getClusterId());
if (volumeOnPool == null) {
return false;
}
if (host.isInMaintenanceStates()) {
addModifyCommandToCommandsAllowedInMaintenanceMode();
}
List<String> driverSupportedFeatures = StorPoolFeaturesAndFixes.getAllClassConstants();
List<StoragePoolDetailVO> driverFeaturesBeforeUpgrade = StorPoolHelper.listFeaturesUpdates(storagePoolDetailsDao, poolId);
boolean isCurrentVersionSupportsEverythingFromPrevious = StorPoolHelper.isPoolSupportsAllFunctionalityFromPreviousVersion(storagePoolDetailsDao, driverSupportedFeatures, driverFeaturesBeforeUpgrade, poolId);
if (!isCurrentVersionSupportsEverythingFromPrevious) {
String msg = "The current StorPool driver does not support all functionality from the one before upgrade to CS";
StorPoolUtil.spLog("Storage pool [%s] is not connected to host [%s] because the functionality after the upgrade is not full",
poolId, hostId);
alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg);
return false;
}
StorPoolModifyStoragePoolCommand cmd = new StorPoolModifyStoragePoolCommand(true, pool, volumeOnPool.getValue());
final Answer answer = agentMgr.easySend(hostId, cmd);
StoragePoolHostVO poolHost = storagePoolHostDao.findByPoolHost(pool.getId(), hostId);
if (answer == null) {
throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command" + pool.getId());
}
if (!answer.getResult()) {
if (answer.getDetails() != null) {
if (answer.getDetails().equals("objectDoesNotExist")) {
StorPoolUtil.volumeDelete(StorPoolStorageAdaptor.getVolumeNameFromPath(volumeOnPool.getValue(), true), conn);
storagePoolDetailsDao.remove(volumeOnPool.getId());
return false;
} else if (answer.getDetails().equals("spNotFound")) {
return false;
}
}
String msg = "Unable to attach storage pool" + poolId + " to the host" + hostId;
alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg);
throw new CloudRuntimeException("Unable establish connection from storage head to storage pool " + pool.getId() + " due to " + answer.getDetails() +
pool.getId());
}
StorPoolUtil.spLog("hostConnect: hostId=%d, poolId=%d", hostId, poolId);
StorPoolModifyStoragePoolAnswer mspAnswer = (StorPoolModifyStoragePoolAnswer)answer;
if (mspAnswer.getLocalDatastoreName() != null && pool.isShared()) {
String datastoreName = mspAnswer.getLocalDatastoreName();
List<StoragePoolVO> localStoragePools = primaryStoreDao.listLocalStoragePoolByPath(pool.getDataCenterId(), datastoreName);
for (StoragePoolVO localStoragePool : localStoragePools) {
if (datastoreName.equals(localStoragePool.getPath())) {
log.warn("Storage pool: " + pool.getId() + " has already been added as local storage: " + localStoragePool.getName());
throw new StorageConflictException("Cannot add shared storage pool: " + pool.getId() + " because it has already been added as local storage:"
+ localStoragePool.getName());
}
}
}
if (poolHost == null) {
poolHost = new StoragePoolHostVO(pool.getId(), hostId, mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/"));
storagePoolHostDao.persist(poolHost);
} else {
poolHost.setLocalPath(mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/"));
}
StorPoolHelper.setSpClusterIdIfNeeded(hostId, mspAnswer.getClusterId(), clusterDao, hostDao, clusterDetailsDao);
log.info("Connection established between storage pool " + pool + " and host " + hostId);
return true;
}
private synchronized StoragePoolDetailVO verifyVolumeIsOnCluster(long poolId, SpConnectionDesc conn, long clusterId) {
StoragePoolDetailVO volumeOnPool = storagePoolDetailsDao.findDetail(poolId, StorPoolUtil.SP_VOLUME_ON_CLUSTER + "-" + clusterId);
if (volumeOnPool == null) {
SpApiResponse resp = StorPoolUtil.volumeCreate(conn);
if (resp.getError() != null) {
return volumeOnPool;
}
String volumeName = StorPoolUtil.getNameFromResponse(resp, false);
volumeOnPool = new StoragePoolDetailVO(poolId, StorPoolUtil.SP_VOLUME_ON_CLUSTER + "-" + clusterId, StorPoolUtil.devPath(volumeName), false);
storagePoolDetailsDao.persist(volumeOnPool);
}
return volumeOnPool;
}
@Override
public boolean hostAdded(long hostId) {
return true;
}
@Override
public boolean hostDisconnected(long hostId, long poolId) {
StorPoolUtil.spLog("hostDisconnected: hostId=%d, poolId=%d", hostId, poolId);
return true;
}
@Override
public boolean hostAboutToBeRemoved(long hostId) {
return true;
}
@Override
public boolean hostRemoved(long hostId, long clusterId) {
return true;
}
//workaround: we need this "hack" to add our command StorPoolModifyStoragePoolCommand in AgentAttache.s_commandsAllowedInMaintenanceMode
//which checks the allowed commands when the host is in maintenance mode
private void addModifyCommandToCommandsAllowedInMaintenanceMode() {
Class<AgentAttache> cls = AgentAttache.class;
try {
Field field = cls.getDeclaredField("s_commandsAllowedInMaintenanceMode");
field.setAccessible(true);
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
List<String> allowedCmdsInMaintenance = new ArrayList<String>(Arrays.asList(AgentAttache.s_commandsAllowedInMaintenanceMode));
allowedCmdsInMaintenance.add(StorPoolModifyStoragePoolCommand.class.toString());
String[] allowedCmdsInMaintenanceNew = new String[allowedCmdsInMaintenance.size()];
allowedCmdsInMaintenance.toArray(allowedCmdsInMaintenanceNew);
Arrays.sort(allowedCmdsInMaintenanceNew);
field.set(null, allowedCmdsInMaintenanceNew);
} catch (IllegalArgumentException | IllegalAccessException | NoSuchFieldException | SecurityException e) {
String err = "Could not add StorPoolModifyStoragePoolCommand to s_commandsAllowedInMaintenanceMode array due to: %s";
StorPoolUtil.spLog(err, e.getMessage());
log.warn(String.format(err, e.getMessage()));
}
}
@Override
public boolean hostEnabled(long hostId) {
return true;
}
}

View File

@ -0,0 +1,78 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.datastore.provider;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
import org.apache.cloudstack.storage.datastore.driver.StorPoolPrimaryDataStoreDriver;
import org.apache.cloudstack.storage.datastore.lifecycle.StorPoolPrimaryDataStoreLifeCycle;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
import com.cloud.utils.component.ComponentContext;
public class StorPoolPrimaryDataStoreProvider implements PrimaryDataStoreProvider {
protected DataStoreLifeCycle lifecycle;
protected DataStoreDriver driver;
protected HypervisorHostListener listener;
StorPoolPrimaryDataStoreProvider() {
}
@Override
public String getName() {
return StorPoolUtil.SP_PROVIDER_NAME;
}
@Override
public DataStoreLifeCycle getDataStoreLifeCycle() {
return lifecycle;
}
@Override
public DataStoreDriver getDataStoreDriver() {
return driver;
}
@Override
public HypervisorHostListener getHostListener() {
return listener;
}
@Override
public boolean configure(Map<String, Object> params) {
lifecycle = ComponentContext.inject(StorPoolPrimaryDataStoreLifeCycle.class);
driver = ComponentContext.inject(StorPoolPrimaryDataStoreDriver.class);
listener = ComponentContext.inject(StorPoolHostListener.class);
return true;
}
@Override
public Set<DataStoreProviderType> getTypes() {
Set<DataStoreProviderType> types = new HashSet<DataStoreProviderType>();
types.add(DataStoreProviderType.PRIMARY);
return types;
}
}

View File

@ -0,0 +1,40 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.datastore.util;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.List;
public class StorPoolFeaturesAndFixes {
public static List<String> getAllClassConstants() {
List<String> constants = new ArrayList<>();
for (Field field : StorPoolFeaturesAndFixes.class.getDeclaredFields()) {
int modifiers = field.getModifiers();
if (Modifier.isStatic(modifiers) && Modifier.isFinal(modifiers)) {
constants.add(field.getName());
}
}
return constants;
}
}

View File

@ -0,0 +1,298 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.datastore.util;
import java.io.IOException;
import java.sql.PreparedStatement;
import java.sql.Timestamp;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse;
import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.log4j.Appender;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.apache.log4j.RollingFileAppender;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterDetailsVO;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
import com.cloud.server.ResourceTag;
import com.cloud.server.ResourceTag.ResourceObjectType;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.VMTemplateStoragePoolVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.SnapshotDetailsDao;
import com.cloud.storage.dao.SnapshotDetailsVO;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.tags.dao.ResourceTagDao;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.db.QueryBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.TransactionLegacy;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.dao.VMInstanceDao;
public class StorPoolHelper {
private static final String UPDATE_SNAPSHOT_DETAILS_VALUE = "UPDATE `cloud`.`snapshot_details` SET value=? WHERE id=?";
private static final String UPDATE_VOLUME_DETAILS_NAME = "UPDATE `cloud`.`volume_details` SET name=? WHERE id=?";
public static final String PrimaryStorageDownloadWait = "primary.storage.download.wait";
public static final String CopyVolumeWait = "copy.volume.wait";
public static final String BackupSnapshotWait = "backup.snapshot.wait";
public static void updateVolumeInfo(VolumeObjectTO volumeObjectTO, Long size, SpApiResponse resp,
VolumeDao volumeDao) {
String volumePath = StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false));
VolumeVO volume = volumeDao.findById(volumeObjectTO.getId());
if (volume != null) {
volumeObjectTO.setSize(size);
volumeObjectTO.setPath(volumePath);
volume.setSize(size);
volume.setPath(volumePath);
volumeDao.update(volumeObjectTO.getId(), volume);
}
}
// If volume is deleted, CloudStack removes records of snapshots created on Primary storage only in database.
// That's why we keep information in snapshot_details table, about all snapshots created on StorPool and we can operate with them
public static void addSnapshotDetails(final Long id, final String uuid, final String snapshotName,
SnapshotDetailsDao snapshotDetailsDao) {
SnapshotDetailsVO details = new SnapshotDetailsVO(id, uuid, snapshotName, false);
snapshotDetailsDao.persist(details);
}
public static String getSnapshotName(Long snapshotId, String snapshotUuid, SnapshotDataStoreDao snapshotStoreDao,
SnapshotDetailsDao snapshotDetailsDao) {
SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshotId, snapshotUuid);
if (snapshotDetails != null) {
return StorPoolStorageAdaptor.getVolumeNameFromPath(snapshotDetails.getValue(), true);
} else {
List<SnapshotDataStoreVO> snapshots = snapshotStoreDao.findBySnapshotId(snapshotId);
if (!CollectionUtils.isEmpty(snapshots)) {
for (SnapshotDataStoreVO snapshotDataStoreVO : snapshots) {
String name = StorPoolStorageAdaptor.getVolumeNameFromPath(snapshotDataStoreVO.getInstallPath(), true);
if (name == null) {
continue;
} else {
addSnapshotDetails(snapshotId, snapshotUuid, snapshotDataStoreVO.getInstallPath(), snapshotDetailsDao);
return name;
}
}
}
}
return null;
}
public static void updateSnapshotDetailsValue(Long id, String valueOrName, String snapshotOrVolume) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
PreparedStatement pstmt = null;
try {
String sql = null;
if (snapshotOrVolume.equals("snapshot")) {
sql = UPDATE_SNAPSHOT_DETAILS_VALUE;
} else if (snapshotOrVolume.equals("volume")) {
sql = UPDATE_VOLUME_DETAILS_NAME;
} else {
StorPoolUtil.spLog("Could not update snapshot detail with id=%s", id);
}
if (sql != null) {
pstmt = txn.prepareAutoCloseStatement(sql);
pstmt.setString(1, valueOrName);
pstmt.setLong(2, id);
pstmt.executeUpdate();
txn.commit();
}
} catch (Exception e) {
txn.rollback();
StorPoolUtil.spLog("Could not update snapshot detail with id=%s", id);
}
}
public static String getVcPolicyTag(Long vmId, ResourceTagDao resourceTagDao) {
if (vmId != null) {
ResourceTag tag = resourceTagDao.findByKey(vmId, ResourceObjectType.UserVm, StorPoolUtil.SP_VC_POLICY);
if (tag != null) {
return tag.getValue();
}
}
return null;
}
public static String getVMInstanceUUID(Long id, VMInstanceDao vmInstanceDao) {
if (id != null) {
VMInstanceVO vmInstance = vmInstanceDao.findById(id);
if (vmInstance != null) {
return vmInstance.getUuid();
}
}
return null;
}
public static Map<String, String> addStorPoolTags(String name, String vmUuid, String csTag, String vcPolicy) {
Map<String, String> tags = new HashMap<>();
tags.put("uuid", name);
tags.put("cvm", vmUuid);
tags.put(StorPoolUtil.SP_VC_POLICY, vcPolicy);
if (csTag != null) {
tags.put("cs", csTag);
}
return tags;
}
// Initialize custom logger for updated volume and snapshots
public static void appendLogger(Logger log, String filePath, String kindOfLog) {
Appender appender = null;
PatternLayout patternLayout = new PatternLayout();
patternLayout.setConversionPattern("%d{YYYY-MM-dd HH:mm:ss.SSS} %m%n");
SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMddHHmmss");
Timestamp timestamp = new Timestamp(System.currentTimeMillis());
String path = filePath + "-" + sdf.format(timestamp) + ".log";
try {
appender = new RollingFileAppender(patternLayout, path);
log.setAdditivity(false);
log.addAppender(appender);
} catch (IOException e) {
e.printStackTrace();
}
if (kindOfLog.equals("update")) {
StorPoolUtil.spLog(
"You can find information about volumes and snapshots, which will be updated in Database with their globalIs in %s log file",
path);
} else if (kindOfLog.equals("abandon")) {
StorPoolUtil.spLog(
"You can find information about volumes and snapshots, for which CloudStack doesn't have information in %s log file",
path);
}
}
public static void setSpClusterIdIfNeeded(long hostId, String clusterId, ClusterDao clusterDao, HostDao hostDao,
ClusterDetailsDao clusterDetails) {
HostVO host = hostDao.findById(hostId);
if (host != null && host.getClusterId() != null) {
ClusterVO cluster = clusterDao.findById(host.getClusterId());
ClusterDetailsVO clusterDetailsVo = clusterDetails.findDetail(cluster.getId(),
StorPoolConfigurationManager.StorPoolClusterId.key());
if (clusterDetailsVo == null) {
clusterDetails.persist(
new ClusterDetailsVO(cluster.getId(), StorPoolConfigurationManager.StorPoolClusterId.key(), clusterId));
} else if (clusterDetailsVo.getValue() == null || !clusterDetailsVo.getValue().equals(clusterId)) {
clusterDetailsVo.setValue(clusterId);
clusterDetails.update(clusterDetailsVo.getId(), clusterDetailsVo);
}
}
}
public static Long findClusterIdByGlobalId(String globalId, ClusterDao clusterDao) {
List<ClusterVO> clusterVo = clusterDao.listAll();
if (clusterVo.size() == 1) {
StorPoolUtil.spLog("There is only one cluster, sending backup to secondary command");
return null;
}
for (ClusterVO clusterVO2 : clusterVo) {
if (globalId != null && StorPoolConfigurationManager.StorPoolClusterId.valueIn(clusterVO2.getId()) != null
&& globalId.contains(StorPoolConfigurationManager.StorPoolClusterId.valueIn(clusterVO2.getId()).toString())) {
StorPoolUtil.spLog("Found cluster with id=%s for object with globalId=%s", clusterVO2.getId(),
globalId);
return clusterVO2.getId();
}
}
throw new CloudRuntimeException(
"Could not find the right clusterId. to send command. To use snapshot backup to secondary for each CloudStack cluster in its settings set the value of StorPool's cluster-id in \"sp.cluster.id\".");
}
public static HostVO findHostByCluster(Long clusterId, HostDao hostDao) {
List<HostVO> host = hostDao.findByClusterId(clusterId);
return host != null ? host.get(0) : null;
}
public static int getTimeout(String cfg, ConfigurationDao configDao) {
final ConfigurationVO value = configDao.findByName(cfg);
return NumbersUtil.parseInt(value.getValue(), Integer.parseInt(value.getDefaultValue()));
}
public static VMTemplateStoragePoolVO findByPoolTemplate(long poolId, long templateId) {
QueryBuilder<VMTemplateStoragePoolVO> sc = QueryBuilder.create(VMTemplateStoragePoolVO.class);
sc.and(sc.entity().getPoolId(), Op.EQ, poolId);
sc.and(sc.entity().getTemplateId(), Op.EQ, templateId);
return sc.find();
}
public static void updateVmStoreTemplate(Long id, DataStoreRole role, String path,
TemplateDataStoreDao templStoreDao) {
TemplateDataStoreVO templ = templStoreDao.findByTemplate(id, role);
templ.setLocalDownloadPath(path);
templStoreDao.persist(templ);
}
public static List<StoragePoolDetailVO> listFeaturesUpdates(StoragePoolDetailsDao storagePoolDetails, long poolId) {
SearchBuilder<StoragePoolDetailVO> sb = storagePoolDetails.createSearchBuilder();
sb.and("pool_id", sb.entity().getResourceId(), SearchCriteria.Op.EQ);
sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE);
SearchCriteria<StoragePoolDetailVO> sc = sb.create();
sc.setParameters("pool_id", poolId);
sc.setParameters("name", "SP-FEATURE" + "%");
return storagePoolDetails.search(sc, null);
}
public static boolean isPoolSupportsAllFunctionalityFromPreviousVersion(StoragePoolDetailsDao storagePoolDetails, List<String> currentPluginFeatures, List<StoragePoolDetailVO> poolFeaturesBeforeUpgrade, long poolId) {
if (CollectionUtils.isEmpty(currentPluginFeatures) && CollectionUtils.isEmpty(poolFeaturesBeforeUpgrade)) {
return true;
}
List<String> poolDetails = poolFeaturesBeforeUpgrade.stream().map(StoragePoolDetailVO::getName).collect(Collectors.toList());
List<String> detailsNotContainedInCurrent = new ArrayList<>(CollectionUtils.removeAll(poolDetails, currentPluginFeatures));
List<String> detailsNotContainedInDataBase = new ArrayList<>(CollectionUtils.removeAll(currentPluginFeatures, poolDetails));
if (!CollectionUtils.isEmpty(detailsNotContainedInCurrent)) {
return false;
} else if (!CollectionUtils.isEmpty(detailsNotContainedInDataBase)) {
for (String features : detailsNotContainedInDataBase) {
StoragePoolDetailVO storageNewFeatures = new StoragePoolDetailVO(poolId, features, features, false);
storagePoolDetails.persist(storageNewFeatures);
}
return true;
}
return true;
}
}

View File

@ -0,0 +1,609 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.datastore.util;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URISyntaxException;
import java.sql.Timestamp;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.HttpResponse;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.log4j.Logger;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.OutputInterpreter;
import com.cloud.utils.script.Script;
import com.google.gson.Gson;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import com.google.gson.JsonPrimitive;
public class StorPoolUtil {
private static final Logger log = Logger.getLogger(StorPoolUtil.class);
private static final File spLogFile = new File("/var/log/cloudstack/management/storpool-plugin.log");
private static PrintWriter spLogPrinterWriter = spLogFileInitialize();
private static PrintWriter spLogFileInitialize() {
try {
log.info("INITIALIZE SP-LOG_FILE");
if (spLogFile.exists()) {
final SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMddHHmmss");
final Timestamp timestamp = new Timestamp(System.currentTimeMillis());
final File spLogFileRename = new File(spLogFile + "-" + sdf.format(timestamp));
final boolean ret = spLogFile.renameTo(spLogFileRename);
if (!ret) {
log.warn("Unable to rename" + spLogFile + " to " + spLogFileRename);
} else {
log.debug("Renamed " + spLogFile + " to " + spLogFileRename);
}
} else {
spLogFile.getParentFile().mkdirs();
}
return new PrintWriter(spLogFile);
} catch (Exception e) {
log.info("INITIALIZE SP-LOG_FILE: " + e.getMessage());
throw new RuntimeException(e);
}
}
public static void spLog(String fmt, Object... args) {
String timeStamp = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,ms").format(Calendar.getInstance().getTime());
spLogPrinterWriter.println(String.format(timeStamp + " " + fmt, args));
spLogPrinterWriter.flush();
if (spLogFile.length() > 107374182400L) {
spLogPrinterWriter.close();
spLogPrinterWriter = spLogFileInitialize();
}
}
public static final String SP_PROVIDER_NAME = "StorPool";
public static final String SP_DEV_PATH = "/dev/storpool-byid/";
public static final String SP_OLD_PATH = "/dev/storpool/";
public static final String SP_VC_POLICY = "vc-policy";
public static final String GLOBAL_ID = "snapshotGlobalId";
public static final String UPDATED_DETAIL = "renamed";
public static final String SP_STORAGE_POOL_ID = "spStoragePoolId";
public static final String SP_HOST_PORT = "SP_API_HTTP_HOST";
public static final String SP_TEMPLATE = "SP_TEMPLATE";
public static final String SP_AUTH_TOKEN = "SP_AUTH_TOKEN";
public static final String SP_VOLUME_ON_CLUSTER = "SP_VOLUME_ON_CLUSTER";
public static enum StorpoolRights {
RO("ro"), RW("rw"), DETACH("detach");
private final String name;
private StorpoolRights(String name) {
this.name = name;
}
public String toString() {
return name;
}
}
public static final class SpApiError {
private String name;
private String descr;
public SpApiError() {
}
public String getName() {
return this.name;
}
public String getDescr() {
return this.descr;
}
public void setName(String name) {
this.name = name;
}
public void setDescr(String descr) {
this.descr = descr;
}
public String toString() {
return String.format("%s: %s", name, descr);
}
}
public static class SpConnectionDesc {
private String hostPort;
private String authToken;
private String templateName;
public SpConnectionDesc(String url) {
String[] urlSplit = url.split(";");
if (urlSplit.length == 1 && !urlSplit[0].contains("=")) {
this.templateName = url;
Script sc = new Script("storpool_confget", 0, log);
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
final String err = sc.execute(parser);
if (err != null) {
final String errMsg = String.format("Could not execute storpool_confget. Error: %s", err);
log.warn(errMsg);
throw new CloudRuntimeException(errMsg);
}
String SP_API_HOST = null;
String SP_API_PORT = null;
for (String line : parser.getLines().split("\n")) {
String[] toks = line.split("=");
if (toks.length != 2) {
continue;
}
switch (toks[0]) {
case "SP_API_HTTP_HOST":
SP_API_HOST = toks[1];
break;
case "SP_API_HTTP_PORT":
SP_API_PORT = toks[1];
break;
case "SP_AUTH_TOKEN":
this.authToken = toks[1];
break;
}
}
if (SP_API_HOST == null)
throw new CloudRuntimeException("Invalid StorPool config. Missing SP_API_HTTP_HOST");
if (SP_API_PORT == null)
throw new CloudRuntimeException("Invalid StorPool config. Missing SP_API_HTTP_PORT");
if (this.authToken == null)
throw new CloudRuntimeException("Invalid StorPool config. Missing SP_AUTH_TOKEN");
this.hostPort = SP_API_HOST + ":" + SP_API_PORT;
} else {
for (String kv : urlSplit) {
String[] toks = kv.split("=");
if (toks.length != 2)
continue;
switch (toks[0]) {
case "SP_API_HTTP":
this.hostPort = toks[1];
break;
case "SP_AUTH_TOKEN":
this.authToken = toks[1];
break;
case "SP_TEMPLATE":
this.templateName = toks[1];
break;
}
}
}
}
public SpConnectionDesc(String host, String authToken2, String templateName2) {
this.hostPort = host;
this.authToken = authToken2;
this.templateName = templateName2;
}
public String getHostPort() {
return this.hostPort;
}
public String getAuthToken() {
return this.authToken;
}
public String getTemplateName() {
return this.templateName;
}
}
public static SpConnectionDesc getSpConnection(String url, long poolId, StoragePoolDetailsDao poolDetails,
PrimaryDataStoreDao storagePool) {
boolean isAlternateEndpointEnabled = StorPoolConfigurationManager.AlternativeEndPointEnabled.valueIn(poolId);
if (isAlternateEndpointEnabled) {
String alternateEndpoint = StorPoolConfigurationManager.AlternativeEndpoint.valueIn(poolId);
if (StringUtils.isNotEmpty(alternateEndpoint)) {
return new SpConnectionDesc(alternateEndpoint);
} else {
throw new CloudRuntimeException(String.format("Using an alternative endpoint of StorPool primary storage with id [%s] is enabled but no endpoint URL is provided", poolId));
}
}
List<StoragePoolDetailVO> details = poolDetails.listDetails(poolId);
String host = null;
String authToken = null;
String templateName = null;
for (StoragePoolDetailVO storagePoolDetailVO : details) {
switch (storagePoolDetailVO.getName()) {
case SP_HOST_PORT:
host = storagePoolDetailVO.getValue();
break;
case SP_AUTH_TOKEN:
authToken = storagePoolDetailVO.getValue();
break;
case SP_TEMPLATE:
templateName = storagePoolDetailVO.getValue();
break;
}
}
if (host != null && authToken != null && templateName != null) {
return new SpConnectionDesc(host, authToken, templateName);
} else {
return updateStorageAndStorageDetails(url, poolId, poolDetails, storagePool);
}
}
private static SpConnectionDesc updateStorageAndStorageDetails(String url, long poolId,
StoragePoolDetailsDao poolDetails, PrimaryDataStoreDao storagePool) {
SpConnectionDesc conn = new SpConnectionDesc(url);
poolDetails.persist(new StoragePoolDetailVO(poolId, SP_HOST_PORT, conn.getHostPort(), false));
poolDetails.persist(new StoragePoolDetailVO(poolId, SP_AUTH_TOKEN, conn.getAuthToken(), false));
poolDetails.persist(new StoragePoolDetailVO(poolId, SP_TEMPLATE, conn.getTemplateName(), false));
StoragePoolVO pool = storagePool.findById(poolId);
pool.setUuid(conn.getTemplateName() + ";" + UUID.randomUUID().toString());
storagePool.update(poolId, pool);
StorPoolUtil.spLog(
"Storage pool with id=%s and template's name=%s was updated and its connection details are hidden from UI.",
pool.getId(), conn.getTemplateName());
return conn;
}
public static class SpApiResponse {
private SpApiError error;
public JsonElement fullJson;
public SpApiResponse() {
}
public SpApiError getError() {
return this.error;
}
public void setError(SpApiError error) {
this.error = error;
}
}
public static String devPath(final String name) {
return String.format("%s%s", SP_DEV_PATH, name);
}
private static SpApiResponse spApiRequest(HttpRequestBase req, String query, SpConnectionDesc conn) {
if (conn == null)
conn = new SpConnectionDesc("");
if (conn.getHostPort() == null) {
throw new CloudRuntimeException("Invalid StorPool config. Missing SP_API_HTTP_HOST");
}
if (conn.getAuthToken() == null) {
throw new CloudRuntimeException("Invalid StorPool config. Missing SP_AUTH_TOKEN");
}
try (CloseableHttpClient httpclient = HttpClientBuilder.create().build()) {
final String qry = String.format("http://%s/ctrl/1.0/%s", conn.getHostPort(), query);
final URI uri = new URI(qry);
req.setURI(uri);
req.addHeader("Authorization", String.format("Storpool v1:%s", conn.getAuthToken()));
final HttpResponse resp = httpclient.execute(req);
Gson gson = new Gson();
BufferedReader br = new BufferedReader(new InputStreamReader(resp.getEntity().getContent()));
JsonElement el = new JsonParser().parse(br);
SpApiResponse apiResp = gson.fromJson(el, SpApiResponse.class);
apiResp.fullJson = el;
return apiResp;
} catch (UnsupportedEncodingException ex) {
throw new CloudRuntimeException(ex.getMessage());
} catch (ClientProtocolException ex) {
throw new CloudRuntimeException(ex.getMessage());
} catch (IOException ex) {
throw new CloudRuntimeException(ex.getMessage());
} catch (URISyntaxException ex) {
throw new CloudRuntimeException(ex.getMessage());
}
}
private static SpApiResponse GET(String query, SpConnectionDesc conn) {
return spApiRequest(new HttpGet(), query, conn);
}
private static SpApiResponse POST(String query, Object json, SpConnectionDesc conn) {
HttpPost req = new HttpPost();
if (json != null) {
Gson gson = new Gson();
String js = gson.toJson(json);
StringEntity input = new StringEntity(js, ContentType.APPLICATION_JSON);
log.info("Request:" + js);
req.setEntity(input);
}
return spApiRequest(req, query, conn);
}
public static boolean templateExists(SpConnectionDesc conn) {
SpApiResponse resp = GET("VolumeTemplateDescribe/" + conn.getTemplateName(), conn);
return resp.getError() == null ? true : objectExists(resp.getError());
}
public static boolean snapshotExists(final String name, SpConnectionDesc conn) {
SpApiResponse resp = GET("MultiCluster/Snapshot/" + name, conn);
return resp.getError() == null ? true : objectExists(resp.getError());
}
public static JsonArray snapshotsList(SpConnectionDesc conn) {
SpApiResponse resp = GET("MultiCluster/SnapshotsList", conn);
JsonObject obj = resp.fullJson.getAsJsonObject();
JsonArray data = obj.getAsJsonArray("data");
return data;
}
public static JsonArray volumesList(SpConnectionDesc conn) {
SpApiResponse resp = GET("MultiCluster/VolumesList", conn);
JsonObject obj = resp.fullJson.getAsJsonObject();
JsonArray data = obj.getAsJsonArray("data");
return data;
}
private static boolean objectExists(SpApiError err) {
if (!err.getName().equals("objectDoesNotExist")) {
throw new CloudRuntimeException(err.getDescr());
}
return false;
}
public static Long snapshotSize(final String name, SpConnectionDesc conn) {
SpApiResponse resp = GET("MultiCluster/Snapshot/" + name, conn);
JsonObject obj = resp.fullJson.getAsJsonObject();
if (resp.getError() != null && !objectExists(resp.getError())) {
return null;
}
JsonObject data = obj.getAsJsonArray("data").get(0).getAsJsonObject();
return data.getAsJsonPrimitive("size").getAsLong();
}
public static String getSnapshotClusterID(String name, SpConnectionDesc conn) {
SpApiResponse resp = GET("MultiCluster/Snapshot/" + name, conn);
JsonObject obj = resp.fullJson.getAsJsonObject();
JsonObject data = obj.getAsJsonArray("data").get(0).getAsJsonObject();
JsonPrimitive clusterId = data.getAsJsonPrimitive("clusterId");
return clusterId != null ? clusterId.getAsString() : null;
}
public static String getVolumeClusterID(String name, SpConnectionDesc conn) {
SpApiResponse resp = GET("MultiCluster/Volume/" + name, conn);
JsonObject obj = resp.fullJson.getAsJsonObject();
JsonObject data = obj.getAsJsonArray("data").get(0).getAsJsonObject();
JsonPrimitive clusterId = data.getAsJsonPrimitive("clusterId");
return clusterId != null ? clusterId.getAsString() : null;
}
public static SpApiResponse volumeCreate(final String name, final String parentName, final Long size, String vmUuid,
String vcPolicy, String csTag, Long iops, SpConnectionDesc conn) {
Map<String, Object> json = new LinkedHashMap<>();
json.put("name", "");
json.put("iops", iops);
json.put("parent", parentName);
json.put("size", size);
json.put("template", conn.getTemplateName());
Map<String, String> tags = StorPoolHelper.addStorPoolTags(name, vmUuid, csTag, vcPolicy);
json.put("tags", tags);
return POST("MultiCluster/VolumeCreate", json, conn);
}
public static SpApiResponse volumeCreate(SpConnectionDesc conn) {
Map<String, Object> json = new LinkedHashMap<>();
json.put("name", "");
json.put("size", 512);
json.put("template", conn.getTemplateName());
Map<String, String> tags = new HashMap<>();
tags.put("cs", "check-volume-is-on-host");
json.put("tags", tags);
return POST("MultiCluster/VolumeCreate", json, conn);
}
public static SpApiResponse volumeCopy(final String name, final String baseOn, String csTag, Long iops,
SpConnectionDesc conn) {
Map<String, Object> json = new HashMap<>();
json.put("baseOn", baseOn);
if (iops != null) {
json.put("iops", iops);
}
json.put("template", conn.getTemplateName());
Map<String, String> tags = StorPoolHelper.addStorPoolTags(name, null, csTag, null);
json.put("tags", tags);
return POST("MultiCluster/VolumeCreate", json, conn);
}
public static SpApiResponse volumeUpdateRename(final String name, String newName, String uuid,
SpConnectionDesc conn) {
Map<String, Object> json = new HashMap<>();
json.put("rename", newName);
Map<String, String> tags = new HashMap<>();
tags.put("uuid", uuid);
json.put("tags", tags);
return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
}
public static SpApiResponse volumeUpdate(final String name, final Long newSize, final Boolean shrinkOk, Long iops,
SpConnectionDesc conn) {
Map<String, Object> json = new HashMap<>();
json.put("iops", iops);
json.put("size", newSize);
json.put("shrinkOk", shrinkOk);
return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
}
public static SpApiResponse volumeUpadateTags(final String name, final String uuid, Long iops,
SpConnectionDesc conn, String vcPolicy) {
Map<String, Object> json = new HashMap<>();
Map<String, String> tags = StorPoolHelper.addStorPoolTags(null, uuid, null, vcPolicy);
json.put("iops", iops);
json.put("tags", tags);
return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
}
public static SpApiResponse volumeUpadateCvmTags(final String name, final String uuid, SpConnectionDesc conn) {
Map<String, Object> json = new HashMap<>();
Map<String, String> tags = StorPoolHelper.addStorPoolTags(null, uuid, null, null);
json.put("tags", tags);
return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
}
public static SpApiResponse volumeUpadateVCTags(final String name, SpConnectionDesc conn, String vcPolicy) {
Map<String, Object> json = new HashMap<>();
Map<String, String> tags = StorPoolHelper.addStorPoolTags(null, null, null, vcPolicy);
json.put("tags", tags);
return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
}
public static SpApiResponse volumeSnapshot(final String volumeName, final String snapshotName, String vmUuid,
String csTag, String vcPolicy, SpConnectionDesc conn) {
Map<String, Object> json = new HashMap<>();
Map<String, String> tags = StorPoolHelper.addStorPoolTags(snapshotName, vmUuid, csTag, vcPolicy);
json.put("name", "");
json.put("tags", tags);
return POST("MultiCluster/VolumeSnapshot/" + volumeName, json, conn);
}
public static SpApiResponse volumesGroupSnapshot(final List<VolumeObjectTO> volumeTOs, final String vmUuid,
final String snapshotName, String csTag, SpConnectionDesc conn) {
Map<String, Object> json = new LinkedHashMap<>();
Map<String, String> tags = StorPoolHelper.addStorPoolTags(snapshotName, vmUuid, csTag, null);
List<Map<String, Object>> volumes = new ArrayList<>();
for (VolumeObjectTO volumeTO : volumeTOs) {
Map<String, Object> vol = new LinkedHashMap<>();
String name = StorPoolStorageAdaptor.getVolumeNameFromPath(volumeTO.getPath(), true);
vol.put("name", "");
vol.put("volume", name);
volumes.add(vol);
}
json.put("tags", tags);
json.put("volumes", volumes);
log.info("json:" + json);
return POST("MultiCluster/VolumesGroupSnapshot", json, conn);
}
public static SpApiResponse volumeRevert(final String name, final String snapshotName, SpConnectionDesc conn) {
Map<String, Object> json = new HashMap<>();
json.put("toSnapshot", snapshotName);
return POST("MultiCluster/VolumeRevert/" + name, json, conn);
}
public static SpApiResponse volumeFreeze(final String volumeName, SpConnectionDesc conn) {
return POST("MultiCluster/VolumeFreeze/" + volumeName, null, conn);
}
public static SpApiResponse volumeAcquire(final String volumeName, SpConnectionDesc conn) {
Map<String, Object> json = new HashMap<>();
json.put("onRemoteAttached", "detachForce");
return POST("MultiCluster/VolumeAcquire/" + volumeName, json, conn);
}
public static SpApiResponse volumeDelete(final String name, SpConnectionDesc conn) {
Map<String, Object> json = new HashMap<>();
json.put("onAttached", "detachForce");
return POST("MultiCluster/VolumeDelete/" + name, json, conn);
}
public static SpApiResponse snapshotDelete(final String name, SpConnectionDesc conn) {
SpApiResponse resp = detachAllForced(name, true, conn);
return resp.getError() == null ? POST("MultiCluster/SnapshotDelete/" + name, null, conn) : resp;
}
public static SpApiResponse detachAllForced(final String name, final boolean snapshot, SpConnectionDesc conn) {
final String type = snapshot ? "snapshot" : "volume";
List<Map<String, Object>> json = new ArrayList<>();
Map<String, Object> reassignDesc = new HashMap<>();
reassignDesc.put(type, name);
reassignDesc.put("detach", "all");
reassignDesc.put("force", true);
json.add(reassignDesc);
return POST("MultiCluster/VolumesReassign", json, conn);
}
public static String getSnapshotNameFromResponse(SpApiResponse resp, boolean tildeNeeded, String globalIdOrRemote) {
JsonObject obj = resp.fullJson.getAsJsonObject();
JsonPrimitive data = obj.getAsJsonObject("data").getAsJsonPrimitive(globalIdOrRemote);
String name = data != null ? data.getAsString() : null;
name = name != null ? !tildeNeeded ? name : "~" + name : name;
return name;
}
public static String getNameFromResponse(SpApiResponse resp, boolean tildeNeeded) {
JsonObject obj = resp.fullJson.getAsJsonObject();
JsonPrimitive data = obj.getAsJsonObject("data").getAsJsonPrimitive("name");
String name = data != null ? data.getAsString() : null;
name = name != null ? name.startsWith("~") && !tildeNeeded ? name.split("~")[1] : name : name;
return name;
}
}

View File

@ -0,0 +1,575 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.motion;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult;
import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.storage.RemoteHostEndPoint;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
import org.apache.cloudstack.storage.datastore.util.StorPoolHelper;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc;
import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.commons.collections.MapUtils;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.Command;
import com.cloud.agent.api.MigrateAnswer;
import com.cloud.agent.api.MigrateCommand;
import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo;
import com.cloud.agent.api.ModifyTargetsAnswer;
import com.cloud.agent.api.ModifyTargetsCommand;
import com.cloud.agent.api.PrepareForMigrationCommand;
import com.cloud.agent.api.storage.StorPoolBackupTemplateFromSnapshotCommand;
import com.cloud.agent.api.to.DataObjectType;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.OperationTimedoutException;
import com.cloud.host.Host;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.StorageManager;
import com.cloud.storage.VMTemplateDetailVO;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.GuestOSCategoryDao;
import com.cloud.storage.dao.GuestOSDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotDetailsDao;
import com.cloud.storage.dao.SnapshotDetailsVO;
import com.cloud.storage.dao.VMTemplateDetailsDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachineManager;
import com.cloud.vm.dao.VMInstanceDao;
@Component
public class StorPoolDataMotionStrategy implements DataMotionStrategy {
private static final Logger log = Logger.getLogger(StorPoolDataMotionStrategy.class);
@Inject
private SnapshotDataFactory _snapshotDataFactory;
@Inject
private DataStoreManager _dataStore;
@Inject
private ConfigurationDao _configDao;
@Inject
private EndPointSelector _selector;
@Inject
private TemplateDataStoreDao _templStoreDao;
@Inject
private ClusterDao _clusterDao;
@Inject
private HostDao _hostDao;
@Inject
private SnapshotDetailsDao _snapshotDetailsDao;
@Inject
private VMTemplateDetailsDao _vmTemplateDetailsDao;
@Inject
private SnapshotDataStoreDao _snapshotStoreDao;
@Inject
private StoragePoolDetailsDao _storagePoolDetails;
@Inject
private PrimaryDataStoreDao _storagePool;
@Inject
private VolumeDao _volumeDao;
@Inject
private VolumeDataFactory _volumeDataFactory;
@Inject
private VMInstanceDao _vmDao;
@Inject
private GuestOSDao _guestOsDao;
@Inject
private VolumeService _volumeService;
@Inject
private GuestOSCategoryDao _guestOsCategoryDao;
@Inject
private SnapshotDao _snapshotDao;
@Inject
private AgentManager _agentManager;
@Inject
private PrimaryDataStoreDao _storagePoolDao;
@Override
public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
DataObjectType srcType = srcData.getType();
DataObjectType dstType = destData.getType();
if (srcType == DataObjectType.SNAPSHOT && dstType == DataObjectType.TEMPLATE
&& StorPoolConfigurationManager.BypassSecondaryStorage.value()) {
SnapshotInfo sinfo = (SnapshotInfo) srcData;
VolumeInfo volume = sinfo.getBaseVolume();
StoragePoolVO storagePool = _storagePool.findById(volume.getPoolId());
if (!storagePool.getStorageProviderName().equals(StorPoolUtil.SP_PROVIDER_NAME)) {
return StrategyPriority.CANT_HANDLE;
}
String snapshotName = StorPoolHelper.getSnapshotName(sinfo.getId(), sinfo.getUuid(), _snapshotStoreDao,
_snapshotDetailsDao);
StorPoolUtil.spLog("StorPoolDataMotionStrategy.canHandle snapshot name=%s", snapshotName);
if (snapshotName != null) {
return StrategyPriority.HIGHEST;
}
}
return StrategyPriority.CANT_HANDLE;
}
@Override
public void copyAsync(DataObject srcData, DataObject destData, Host destHost,
AsyncCompletionCallback<CopyCommandResult> callback) {
SnapshotObjectTO snapshot = (SnapshotObjectTO) srcData.getTO();
TemplateObjectTO template = (TemplateObjectTO) destData.getTO();
DataStore store = _dataStore.getDataStore(snapshot.getVolume().getDataStore().getUuid(),
snapshot.getVolume().getDataStore().getRole());
SnapshotInfo sInfo = _snapshotDataFactory.getSnapshot(snapshot.getId(), store);
VolumeInfo vInfo = sInfo.getBaseVolume();
SpConnectionDesc conn = StorPoolUtil.getSpConnection(vInfo.getDataStore().getUuid(),
vInfo.getDataStore().getId(), _storagePoolDetails, _storagePool);
String name = template.getUuid();
String volumeName = "";
String parentName = StorPoolHelper.getSnapshotName(sInfo.getId(), sInfo.getUuid(), _snapshotStoreDao,
_snapshotDetailsDao);
// TODO volume tags cs - template
SpApiResponse res = StorPoolUtil.volumeCreate(name, parentName, sInfo.getSize(), null, "no", "template", null,
conn);
CopyCmdAnswer answer = null;
String err = null;
if (res.getError() != null) {
log.debug(String.format("Could not create volume from snapshot with ID=%s", snapshot.getId()));
StorPoolUtil.spLog("Volume create failed with error=%s", res.getError().getDescr());
err = res.getError().getDescr();
} else {
volumeName = StorPoolUtil.getNameFromResponse(res, true);
SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(sInfo.getId(), sInfo.getUuid());
snapshot.setPath(snapshotDetails.getValue());
Command backupSnapshot = new StorPoolBackupTemplateFromSnapshotCommand(snapshot, template,
StorPoolHelper.getTimeout(StorPoolHelper.BackupSnapshotWait, _configDao),
VirtualMachineManager.ExecuteInSequence.value());
try {
// final String snapName =
// StorpoolStorageAdaptor.getVolumeNameFromPath(((SnapshotInfo)
// srcData).getPath(), true);
Long clusterId = StorPoolHelper.findClusterIdByGlobalId(parentName, _clusterDao);
EndPoint ep2 = clusterId != null
? RemoteHostEndPoint
.getHypervisorHostEndPoint(StorPoolHelper.findHostByCluster(clusterId, _hostDao))
: _selector.select(sInfo, destData);
if (ep2 == null) {
err = "No remote endpoint to send command, check if host or ssvm is down?";
} else {
answer = (CopyCmdAnswer) ep2.sendMessage(backupSnapshot);
if (answer != null && answer.getResult()) {
SpApiResponse resSnapshot = StorPoolUtil.volumeFreeze(volumeName, conn);
if (resSnapshot.getError() != null) {
log.debug(String.format("Could not snapshot volume with ID=%s", snapshot.getId()));
StorPoolUtil.spLog("Volume freeze failed with error=%s", resSnapshot.getError().getDescr());
err = resSnapshot.getError().getDescr();
StorPoolUtil.volumeDelete(volumeName, conn);
} else {
StorPoolHelper.updateVmStoreTemplate(template.getId(), template.getDataStore().getRole(),
StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(res, false)), _templStoreDao);
}
} else {
err = "Could not copy template to secondary " + answer.getResult();
StorPoolUtil.volumeDelete(StorPoolUtil.getNameFromResponse(res, true), conn);
}
}
} catch (CloudRuntimeException e) {
err = e.getMessage();
}
}
_vmTemplateDetailsDao.persist(new VMTemplateDetailVO(template.getId(), StorPoolUtil.SP_STORAGE_POOL_ID,
String.valueOf(vInfo.getDataStore().getId()), false));
StorPoolUtil.spLog("StorPoolDataMotionStrategy.copyAsync Creating snapshot=%s for StorPool template=%s",
volumeName, conn.getTemplateName());
final CopyCommandResult cmd = new CopyCommandResult(null, answer);
cmd.setResult(err);
callback.complete(cmd);
}
@Override
public StrategyPriority canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost) {
return canHandleLiveMigrationOnStorPool(volumeMap, srcHost, destHost);
}
final StrategyPriority canHandleLiveMigrationOnStorPool(Map<VolumeInfo, DataStore> volumeMap, Host srcHost,
Host destHost) {
if (srcHost.getId() != destHost.getId() && isDestinationStorPoolPrimaryStorage(volumeMap)) {
return StrategyPriority.HIGHEST;
}
return StrategyPriority.CANT_HANDLE;
}
private boolean isDestinationStorPoolPrimaryStorage(Map<VolumeInfo, DataStore> volumeMap) {
if (MapUtils.isNotEmpty(volumeMap)) {
for (DataStore dataStore : volumeMap.values()) {
StoragePoolVO storagePoolVO = _storagePool.findById(dataStore.getId());
if (storagePoolVO == null
|| !storagePoolVO.getStorageProviderName().equals(StorPoolUtil.SP_PROVIDER_NAME)) {
return false;
}
}
} else {
return false;
}
return true;
}
@Override
public void copyAsync(Map<VolumeInfo, DataStore> volumeDataStoreMap, VirtualMachineTO vmTO, Host srcHost,
Host destHost, AsyncCompletionCallback<CopyCommandResult> callback) {
String errMsg = null;
String newVolName = null;
SpConnectionDesc conn = null;
try {
if (srcHost.getHypervisorType() != HypervisorType.KVM) {
throw new CloudRuntimeException(String.format("Invalid hypervisor type [%s]. Only KVM supported", srcHost.getHypervisorType()));
}
VMInstanceVO vmInstance = _vmDao.findById(vmTO.getId());
vmTO.setState(vmInstance.getState());
List<MigrateDiskInfo> migrateDiskInfoList = new ArrayList<MigrateDiskInfo>();
Map<String, MigrateCommand.MigrateDiskInfo> migrateStorage = new HashMap<>();
Map<VolumeInfo, VolumeInfo> srcVolumeInfoToDestVolumeInfo = new HashMap<>();
for (Map.Entry<VolumeInfo, DataStore> entry : volumeDataStoreMap.entrySet()) {
VolumeInfo srcVolumeInfo = entry.getKey();
DataStore destDataStore = entry.getValue();
VolumeVO srcVolume = _volumeDao.findById(srcVolumeInfo.getId());
StoragePoolVO destStoragePool = _storagePool.findById(destDataStore.getId());
VolumeVO destVolume = duplicateVolumeOnAnotherStorage(srcVolume, destStoragePool);
VolumeInfo destVolumeInfo = _volumeDataFactory.getVolume(destVolume.getId(), destDataStore);
destVolumeInfo.processEvent(Event.MigrationCopyRequested);
destVolumeInfo.processEvent(Event.MigrationCopySucceeded);
destVolumeInfo.processEvent(Event.MigrationRequested);
conn = StorPoolUtil.getSpConnection(destDataStore.getUuid(), destDataStore.getId(), _storagePoolDetails,
_storagePool);
SpApiResponse resp = StorPoolUtil.volumeCreate(srcVolume.getUuid(), null, srcVolume.getSize(),
vmTO.getUuid(), null, "volume", srcVolume.getMaxIops(), conn);
if (resp.getError() == null) {
newVolName = StorPoolUtil.getNameFromResponse(resp, true);
}
String volumeName = StorPoolUtil.getNameFromResponse(resp, false);
destVolume.setPath(StorPoolUtil.devPath(volumeName));
_volumeDao.update(destVolume.getId(), destVolume);
destVolume = _volumeDao.findById(destVolume.getId());
destVolumeInfo = _volumeDataFactory.getVolume(destVolume.getId(), destDataStore);
String destPath = generateDestPath(destHost, destStoragePool, destVolumeInfo);
MigrateCommand.MigrateDiskInfo migrateDiskInfo = configureMigrateDiskInfo(srcVolumeInfo, destPath);
migrateDiskInfoList.add(migrateDiskInfo);
migrateStorage.put(srcVolumeInfo.getPath(), migrateDiskInfo);
srcVolumeInfoToDestVolumeInfo.put(srcVolumeInfo, destVolumeInfo);
}
PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO);
try {
Answer pfma = _agentManager.send(destHost.getId(), pfmc);
if (pfma == null || !pfma.getResult()) {
String details = pfma != null ? pfma.getDetails() : "null answer returned";
errMsg = String.format("Unable to prepare for migration due to the following: %s", details);
throw new AgentUnavailableException(errMsg, destHost.getId());
}
} catch (final OperationTimedoutException e) {
errMsg = String.format("Operation timed out due to %s", e.getMessage());
throw new AgentUnavailableException(errMsg, destHost.getId());
}
VMInstanceVO vm = _vmDao.findById(vmTO.getId());
boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId())
.getName().equalsIgnoreCase("Windows");
MigrateCommand migrateCommand = new MigrateCommand(vmTO.getName(),
destHost.getPrivateIpAddress(), isWindows, vmTO, true);
migrateCommand.setWait(StorageManager.KvmStorageOnlineMigrationWait.value());
migrateCommand.setMigrateStorage(migrateStorage);
migrateCommand.setMigrateStorageManaged(true);
migrateCommand.setMigrateDiskInfoList(migrateDiskInfoList);
boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value();
migrateCommand.setAutoConvergence(kvmAutoConvergence);
MigrateAnswer migrateAnswer = (MigrateAnswer) _agentManager.send(srcHost.getId(), migrateCommand);
boolean success = migrateAnswer != null && migrateAnswer.getResult();
handlePostMigration(success, srcVolumeInfoToDestVolumeInfo, vmTO, destHost);
if (migrateAnswer == null) {
throw new CloudRuntimeException("Unable to get an answer to the migrate command");
}
if (!migrateAnswer.getResult()) {
errMsg = migrateAnswer.getDetails();
throw new CloudRuntimeException(errMsg);
}
} catch (AgentUnavailableException | OperationTimedoutException | CloudRuntimeException ex) {
errMsg = String.format(
"Copy volume(s) of VM [%s] to storage(s) [%s] and VM to host [%s] failed in StorPoolDataMotionStrategy.copyAsync. Error message: [%s].",
vmTO.getId(), srcHost.getId(), destHost.getId(), ex.getMessage());
log.error(errMsg, ex);
throw new CloudRuntimeException(errMsg);
} finally {
if (errMsg != null) {
deleteVolumeOnFail(newVolName, conn);
}
CopyCmdAnswer copyCmdAnswer = new CopyCmdAnswer(errMsg);
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
result.setResult(errMsg);
callback.complete(result);
}
}
private void deleteVolumeOnFail(String newVolName, SpConnectionDesc conn) {
if (newVolName != null && conn != null) {
StorPoolUtil.volumeDelete(newVolName, conn);
}
}
private VolumeVO duplicateVolumeOnAnotherStorage(Volume volume, StoragePoolVO storagePoolVO) {
Long lastPoolId = volume.getPoolId();
VolumeVO newVol = new VolumeVO(volume);
newVol.setInstanceId(null);
newVol.setChainInfo(null);
newVol.setPath(null);
newVol.setFolder(null);
newVol.setPodId(storagePoolVO.getPodId());
newVol.setPoolId(storagePoolVO.getId());
newVol.setLastPoolId(lastPoolId);
return _volumeDao.persist(newVol);
}
private void handlePostMigration(boolean success, Map<VolumeInfo, VolumeInfo> srcVolumeInfoToDestVolumeInfo,
VirtualMachineTO vmTO, Host destHost) {
if (!success) {
try {
PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO);
pfmc.setRollback(true);
Answer pfma = _agentManager.send(destHost.getId(), pfmc);
if (pfma == null || !pfma.getResult()) {
String details = pfma != null ? pfma.getDetails() : "null answer returned";
String msg = "Unable to rollback prepare for migration due to the following: " + details;
throw new AgentUnavailableException(msg, destHost.getId());
}
} catch (Exception e) {
log.debug("Failed to disconnect one or more (original) dest volumes", e);
}
}
for (Map.Entry<VolumeInfo, VolumeInfo> entry : srcVolumeInfoToDestVolumeInfo.entrySet()) {
VolumeInfo srcVolumeInfo = entry.getKey();
VolumeInfo destVolumeInfo = entry.getValue();
if (success) {
srcVolumeInfo.processEvent(Event.OperationSuccessed);
destVolumeInfo.processEvent(Event.OperationSuccessed);
_volumeDao.updateUuid(srcVolumeInfo.getId(), destVolumeInfo.getId());
VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId());
volumeVO.setFormat(ImageFormat.QCOW2);
_volumeDao.update(volumeVO.getId(), volumeVO);
try {
_volumeService.destroyVolume(srcVolumeInfo.getId());
srcVolumeInfo = _volumeDataFactory.getVolume(srcVolumeInfo.getId());
AsyncCallFuture<VolumeApiResult> destroyFuture = _volumeService.expungeVolumeAsync(srcVolumeInfo);
if (destroyFuture.get().isFailed()) {
log.debug("Failed to clean up source volume on storage");
}
} catch (Exception e) {
log.debug("Failed to clean up source volume on storage", e);
}
// Update the volume ID for snapshots on secondary storage
if (!_snapshotDao.listByVolumeId(srcVolumeInfo.getId()).isEmpty()) {
_snapshotDao.updateVolumeIds(srcVolumeInfo.getId(), destVolumeInfo.getId());
_snapshotStoreDao.updateVolumeIds(srcVolumeInfo.getId(), destVolumeInfo.getId());
}
} else {
try {
disconnectHostFromVolume(destHost, destVolumeInfo.getPoolId(), destVolumeInfo.getPath());
} catch (Exception e) {
log.debug("Failed to disconnect (new) dest volume", e);
}
try {
_volumeService.revokeAccess(destVolumeInfo, destHost, destVolumeInfo.getDataStore());
} catch (Exception e) {
log.debug("Failed to revoke access from dest volume", e);
}
destVolumeInfo.processEvent(Event.OperationFailed);
srcVolumeInfo.processEvent(Event.OperationFailed);
try {
_volumeService.destroyVolume(destVolumeInfo.getId());
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId());
AsyncCallFuture<VolumeApiResult> destroyFuture = _volumeService.expungeVolumeAsync(destVolumeInfo);
if (destroyFuture.get().isFailed()) {
log.debug("Failed to clean up dest volume on storage");
}
} catch (Exception e) {
log.debug("Failed to clean up dest volume on storage", e);
}
}
}
}
private String generateDestPath(Host destHost, StoragePoolVO destStoragePool, VolumeInfo destVolumeInfo) {
return connectHostToVolume(destHost, destVolumeInfo.getPoolId(), destVolumeInfo.getPath());
}
private String connectHostToVolume(Host host, long storagePoolId, String iqn) {
ModifyTargetsCommand modifyTargetsCommand = getModifyTargetsCommand(storagePoolId, iqn, true);
return sendModifyTargetsCommand(modifyTargetsCommand, host.getId()).get(0);
}
private void disconnectHostFromVolume(Host host, long storagePoolId, String iqn) {
ModifyTargetsCommand modifyTargetsCommand = getModifyTargetsCommand(storagePoolId, iqn, false);
sendModifyTargetsCommand(modifyTargetsCommand, host.getId());
}
private ModifyTargetsCommand getModifyTargetsCommand(long storagePoolId, String iqn, boolean add) {
StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
Map<String, String> details = new HashMap<>();
details.put(ModifyTargetsCommand.IQN, iqn);
details.put(ModifyTargetsCommand.STORAGE_TYPE, storagePool.getPoolType().name());
details.put(ModifyTargetsCommand.STORAGE_UUID, storagePool.getUuid());
details.put(ModifyTargetsCommand.STORAGE_HOST, storagePool.getHostAddress());
details.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePool.getPort()));
ModifyTargetsCommand cmd = new ModifyTargetsCommand();
List<Map<String, String>> targets = new ArrayList<>();
targets.add(details);
cmd.setTargets(targets);
cmd.setApplyToAllHostsInCluster(true);
cmd.setAdd(add);
cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC);
return cmd;
}
private List<String> sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) {
ModifyTargetsAnswer modifyTargetsAnswer = (ModifyTargetsAnswer) _agentManager.easySend(hostId, cmd);
if (modifyTargetsAnswer == null) {
throw new CloudRuntimeException("Unable to get an answer to the modify targets command");
}
if (!modifyTargetsAnswer.getResult()) {
String msg = "Unable to modify targets on the following host: " + hostId;
throw new CloudRuntimeException(msg);
}
return modifyTargetsAnswer.getConnectedPaths();
}
protected MigrateCommand.MigrateDiskInfo configureMigrateDiskInfo(VolumeInfo srcVolumeInfo, String destPath) {
return new MigrateCommand.MigrateDiskInfo(srcVolumeInfo.getPath(),
MigrateCommand.MigrateDiskInfo.DiskType.BLOCK, MigrateCommand.MigrateDiskInfo.DriverType.RAW,
MigrateCommand.MigrateDiskInfo.Source.DEV, destPath);
}
}

View File

@ -0,0 +1,46 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.snapshot;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
public class StorPoolConfigurationManager implements Configurable {
public static final ConfigKey<Boolean> BypassSecondaryStorage = new ConfigKey<Boolean>(Boolean.class, "sp.bypass.secondary.storage", "Advanced", "false",
"For StorPool Managed storage backup to secondary", true, ConfigKey.Scope.Global, null);
public static final ConfigKey<String> StorPoolClusterId = new ConfigKey<String>(String.class, "sp.cluster.id", "Advanced", "n/a",
"For StorPool multi cluster authorization", true, ConfigKey.Scope.Cluster, null);
public static final ConfigKey<Boolean> AlternativeEndPointEnabled = new ConfigKey<Boolean>(Boolean.class, "sp.enable.alternative.endpoint", "Advanced", "false",
"Used for StorPool primary storage, definse if there is a need to be used alternative endpoint", true, ConfigKey.Scope.StoragePool, null);
public static final ConfigKey<String> AlternativeEndpoint = new ConfigKey<String>(String.class, "sp.alternative.endpoint", "Advanced", "",
"Used for StorPool primary storage for an alternative endpoint. Structure of the endpoint is - SP_API_HTTP=address:port;SP_AUTH_TOKEN=token;SP_TEMPLATE=template_name", true, ConfigKey.Scope.StoragePool, null);
@Override
public String getConfigComponentName() {
return StorPoolConfigurationManager.class.getSimpleName();
}
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[] { BypassSecondaryStorage, StorPoolClusterId, AlternativeEndPointEnabled, AlternativeEndpoint };
}
}

View File

@ -0,0 +1,289 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.snapshot;
import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy;
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.util.StorPoolHelper;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.Snapshot;
import com.cloud.storage.SnapshotVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotDetailsDao;
import com.cloud.storage.dao.SnapshotDetailsVO;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.fsm.NoTransitionException;
@Component
public class StorPoolSnapshotStrategy implements SnapshotStrategy {
private static final Logger log = Logger.getLogger(StorPoolSnapshotStrategy.class);
@Inject
private SnapshotDao _snapshotDao;
@Inject
private PrimaryDataStoreDao _primaryDataStoreDao;
@Inject
private VolumeDao _volumeDao;
@Inject
private SnapshotDataStoreDao _snapshotStoreDao;
@Inject
private SnapshotDetailsDao _snapshotDetailsDao;
@Inject
private SnapshotService snapshotSvr;
@Inject
private SnapshotDataFactory snapshotDataFactory;
@Inject
private StoragePoolDetailsDao storagePoolDetailsDao;
@Override
public SnapshotInfo backupSnapshot(SnapshotInfo snapshotInfo) {
SnapshotObject snapshotObj = (SnapshotObject) snapshotInfo;
try {
snapshotObj.processEvent(Snapshot.Event.BackupToSecondary);
snapshotObj.processEvent(Snapshot.Event.OperationSucceeded);
} catch (NoTransitionException ex) {
StorPoolUtil.spLog("Failed to change state: " + ex.toString());
try {
snapshotObj.processEvent(Snapshot.Event.OperationFailed);
} catch (NoTransitionException ex2) {
StorPoolUtil.spLog("Failed to change state: " + ex2.toString());
}
}
return snapshotInfo;
}
@Override
public boolean deleteSnapshot(Long snapshotId) {
final SnapshotVO snapshotVO = _snapshotDao.findById(snapshotId);
VolumeVO volume = _volumeDao.findByIdIncludingRemoved(snapshotVO.getVolumeId());
String name = StorPoolHelper.getSnapshotName(snapshotId, snapshotVO.getUuid(), _snapshotStoreDao, _snapshotDetailsDao);
boolean res = false;
// clean-up snapshot from Storpool storage pools
StoragePoolVO storage = _primaryDataStoreDao.findById(volume.getPoolId());
if (storage.getStorageProviderName().equals(StorPoolUtil.SP_PROVIDER_NAME)) {
try {
SpConnectionDesc conn = StorPoolUtil.getSpConnection(storage.getUuid(), storage.getId(), storagePoolDetailsDao, _primaryDataStoreDao);
SpApiResponse resp = StorPoolUtil.snapshotDelete(name, conn);
if (resp.getError() != null) {
final String err = String.format("Failed to clean-up Storpool snapshot %s. Error: %s", name, resp.getError());
StorPoolUtil.spLog(err);
} else {
SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshotId, snapshotVO.getUuid());
if (snapshotDetails != null) {
_snapshotDetailsDao.removeDetails(snapshotId);
}
res = deleteSnapshotFromDb(snapshotId);
StorPoolUtil.spLog("StorpoolSnapshotStrategy.deleteSnapshot: executed successfuly=%s, snapshot uuid=%s, name=%s", res, snapshotVO.getUuid(), name);
}
} catch (Exception e) {
String errMsg = String.format("Cannot delete snapshot due to %s", e.getMessage());
throw new CloudRuntimeException(errMsg);
}
}
return res;
}
@Override
public StrategyPriority canHandle(Snapshot snapshot, SnapshotOperation op) {
StorPoolUtil.spLog("StorpoolSnapshotStrategy.canHandle: snapshot=%s, uuid=%s, op=%s", snapshot.getName(), snapshot.getUuid(), op);
if (op != SnapshotOperation.DELETE) {
return StrategyPriority.CANT_HANDLE;
}
String name = StorPoolHelper.getSnapshotName(snapshot.getId(), snapshot.getUuid(), _snapshotStoreDao, _snapshotDetailsDao);
if (name != null) {
StorPoolUtil.spLog("StorpoolSnapshotStrategy.canHandle: globalId=%s", name);
return StrategyPriority.HIGHEST;
}
SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshot.getId(), snapshot.getUuid());
if (snapshotDetails != null) {
_snapshotDetailsDao.remove(snapshotDetails.getId());
}
return StrategyPriority.CANT_HANDLE;
}
private boolean deleteSnapshotChain(SnapshotInfo snapshot) {
log.debug("delete snapshot chain for snapshot: " + snapshot.getId());
boolean result = false;
boolean resultIsSet = false;
try {
while (snapshot != null &&
(snapshot.getState() == Snapshot.State.Destroying || snapshot.getState() == Snapshot.State.Destroyed || snapshot.getState() == Snapshot.State.Error)) {
SnapshotInfo child = snapshot.getChild();
if (child != null) {
log.debug("the snapshot has child, can't delete it on the storage");
break;
}
log.debug("Snapshot: " + snapshot.getId() + " doesn't have children, so it's ok to delete it and its parents");
SnapshotInfo parent = snapshot.getParent();
boolean deleted = false;
if (parent != null) {
if (parent.getPath() != null && parent.getPath().equalsIgnoreCase(snapshot.getPath())) {
log.debug("for empty delta snapshot, only mark it as destroyed in db");
snapshot.processEvent(Event.DestroyRequested);
snapshot.processEvent(Event.OperationSuccessed);
deleted = true;
if (!resultIsSet) {
result = true;
resultIsSet = true;
}
}
}
if (!deleted) {
SnapshotInfo snap = snapshotDataFactory.getSnapshot(snapshot.getId(), DataStoreRole.Image);
if (StorPoolStorageAdaptor.getVolumeNameFromPath(snap.getPath(), true) == null) {
try {
boolean r = snapshotSvr.deleteSnapshot(snapshot);
if (r) {
List<SnapshotInfo> cacheSnaps = snapshotDataFactory.listSnapshotOnCache(snapshot.getId());
for (SnapshotInfo cacheSnap : cacheSnaps) {
log.debug("Delete snapshot " + snapshot.getId() + " from image cache store: " + cacheSnap.getDataStore().getName());
cacheSnap.delete();
}
}
if (!resultIsSet) {
result = r;
resultIsSet = true;
}
} catch (Exception e) {
log.debug("Failed to delete snapshot on storage. ", e);
}
}
} else {
result = true;
}
snapshot = parent;
}
} catch (Exception e) {
log.debug("delete snapshot failed: ", e);
}
return result;
}
private boolean deleteSnapshotFromDb(Long snapshotId) {
SnapshotVO snapshotVO = _snapshotDao.findById(snapshotId);
if (snapshotVO.getState() == Snapshot.State.Allocated) {
_snapshotDao.remove(snapshotId);
return true;
}
if (snapshotVO.getState() == Snapshot.State.Destroyed) {
return true;
}
if (Snapshot.State.Error.equals(snapshotVO.getState())) {
List<SnapshotDataStoreVO> storeRefs = _snapshotStoreDao.findBySnapshotId(snapshotId);
for (SnapshotDataStoreVO ref : storeRefs) {
_snapshotStoreDao.expunge(ref.getId());
}
_snapshotDao.remove(snapshotId);
return true;
}
if (snapshotVO.getState() == Snapshot.State.CreatedOnPrimary) {
snapshotVO.setState(Snapshot.State.Destroyed);
_snapshotDao.update(snapshotId, snapshotVO);
return true;
}
if (!Snapshot.State.BackedUp.equals(snapshotVO.getState()) && !Snapshot.State.Error.equals(snapshotVO.getState()) &&
!Snapshot.State.Destroying.equals(snapshotVO.getState())) {
throw new InvalidParameterValueException("Can't delete snapshotshot " + snapshotId + " due to it is in " + snapshotVO.getState() + " Status");
}
SnapshotInfo snapshotOnImage = snapshotDataFactory.getSnapshot(snapshotId, DataStoreRole.Image);
if (snapshotOnImage == null) {
log.debug("Can't find snapshot on backup storage, delete it in db");
_snapshotDao.remove(snapshotId);
return true;
}
SnapshotObject obj = (SnapshotObject)snapshotOnImage;
try {
obj.processEvent(Snapshot.Event.DestroyRequested);
} catch (NoTransitionException e) {
log.debug("Failed to set the state to destroying: ", e);
return false;
}
try {
boolean result = deleteSnapshotChain(snapshotOnImage);
obj.processEvent(Snapshot.Event.OperationSucceeded);
if (result) {
SnapshotDataStoreVO snapshotOnPrimary = _snapshotStoreDao.findBySnapshot(snapshotId, DataStoreRole.Primary);
if (snapshotOnPrimary != null) {
snapshotOnPrimary.setState(State.Destroyed);
_snapshotStoreDao.update(snapshotOnPrimary.getId(), snapshotOnPrimary);
}
}
} catch (Exception e) {
log.debug("Failed to delete snapshot: ", e);
try {
obj.processEvent(Snapshot.Event.OperationFailed);
} catch (NoTransitionException e1) {
log.debug("Failed to change snapshot state: " + e.toString());
}
return false;
}
return true;
}
@Override
public SnapshotInfo takeSnapshot(SnapshotInfo snapshot) {
return null;
}
@Override
public boolean revertSnapshot(SnapshotInfo snapshot) {
return false;
}
@Override
public void postSnapshotCreation(SnapshotInfo snapshot) {
}
}

View File

@ -0,0 +1,387 @@
//
//Licensed to the Apache Software Foundation (ASF) under one
//or more contributor license agreements. See the NOTICE file
//distributed with this work for additional information
//regarding copyright ownership. The ASF licenses this file
//to you under the Apache License, Version 2.0 (the
//"License"); you may not use this file except in compliance
//with the License. You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing,
//software distributed under the License is distributed on an
//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
//KIND, either express or implied. See the License for the
//specific language governing permissions and limitations
//under the License.
//
package org.apache.cloudstack.storage.snapshot;
import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.storage.vmsnapshot.DefaultVMSnapshotStrategy;
import org.apache.cloudstack.storage.vmsnapshot.VMSnapshotHelper;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.agent.api.VMSnapshotTO;
import com.cloud.event.EventTypes;
import com.cloud.event.UsageEventUtils;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.VolumeDetailVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.dao.VolumeDetailsDao;
import com.cloud.uservm.UserVm;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.fsm.NoTransitionException;
import com.cloud.vm.UserVmVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.snapshot.VMSnapshot;
import com.cloud.vm.snapshot.VMSnapshotDetailsVO;
import com.cloud.vm.snapshot.VMSnapshotVO;
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
@Component
public class StorPoolVMSnapshotStrategy extends DefaultVMSnapshotStrategy {
private static final Logger log = Logger.getLogger(StorPoolVMSnapshotStrategy.class);
@Inject
private VMSnapshotHelper vmSnapshotHelper;
@Inject
private UserVmDao userVmDao;
@Inject
private VMSnapshotDao vmSnapshotDao;
@Inject
private VolumeDao volumeDao;
@Inject
private DiskOfferingDao diskOfferingDao;
@Inject
private PrimaryDataStoreDao storagePool;
@Inject
private VMSnapshotDetailsDao vmSnapshotDetailsDao;
@Inject
private VolumeDataFactory volFactory;
@Inject
private VolumeDetailsDao volumeDetailsDao;
@Inject
private StoragePoolDetailsDao storagePoolDetailsDao;
@Inject
private DataStoreManager dataStoreManager;
int _wait;
@Override
public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) {
log.info("KVMVMSnapshotStrategy take snapshot");
UserVm userVm = userVmDao.findById(vmSnapshot.getVmId());
VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot;
try {
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.CreateRequested);
} catch (NoTransitionException e) {
throw new CloudRuntimeException("No transiontion " + e.getMessage());
}
boolean result = false;
try {
List<VolumeObjectTO> volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId());
DataStore dataStore = dataStoreManager.getPrimaryDataStore(volumeTOs.get(0).getDataStore().getUuid());
SpConnectionDesc conn = StorPoolUtil.getSpConnection(dataStore.getUuid(), dataStore.getId(), storagePoolDetailsDao, storagePool);
long prev_chain_size = 0;
long virtual_size = 0;
for (VolumeObjectTO volume : volumeTOs) {
virtual_size += volume.getSize();
VolumeVO volumeVO = volumeDao.findById(volume.getId());
prev_chain_size += volumeVO.getVmSnapshotChainSize() == null ? 0 : volumeVO.getVmSnapshotChainSize();
}
VMSnapshotTO current = null;
VMSnapshotVO currentSnapshot = vmSnapshotDao.findCurrentSnapshotByVmId(userVm.getId());
if (currentSnapshot != null) {
current = vmSnapshotHelper.getSnapshotWithParents(currentSnapshot);
}
if (current == null) {
vmSnapshotVO.setParent(null);
} else {
vmSnapshotVO.setParent(current.getId());
}
SpApiResponse resp = StorPoolUtil.volumesGroupSnapshot(volumeTOs, userVm.getUuid(), vmSnapshotVO.getUuid(), "group", conn);
JsonObject obj = resp.fullJson.getAsJsonObject();
JsonArray snapshots = obj.getAsJsonObject("data").getAsJsonArray("snapshots");
StorPoolUtil.spLog("Volumes=%s attached to virtual machine", volumeTOs.toString());
for (VolumeObjectTO vol : volumeTOs) {
for (JsonElement jsonElement : snapshots) {
JsonObject snapshotObject = jsonElement.getAsJsonObject();
String snapshot = StorPoolUtil
.devPath(snapshotObject.getAsJsonPrimitive(StorPoolUtil.GLOBAL_ID).getAsString());
if (snapshotObject.getAsJsonPrimitive("volume").getAsString().equals(StorPoolStorageAdaptor.getVolumeNameFromPath(vol.getPath(), true))
|| snapshotObject.getAsJsonPrimitive("volumeGlobalId").getAsString().equals(StorPoolStorageAdaptor.getVolumeNameFromPath(vol.getPath(), false))) {
VMSnapshotDetailsVO vmSnapshotDetailsVO = new VMSnapshotDetailsVO(vmSnapshot.getId(), vol.getUuid(), snapshot, false);
vmSnapshotDetailsDao.persist(vmSnapshotDetailsVO);
Long poolId = volumeDao.findById(vol.getId()).getPoolId();
if (poolId != null) {
VMSnapshotDetailsVO vmSnapshotDetailStoragePoolId = new VMSnapshotDetailsVO(
vmSnapshot.getId(), StorPoolUtil.SP_STORAGE_POOL_ID, String.valueOf(poolId), false);
vmSnapshotDetailsDao.persist(vmSnapshotDetailStoragePoolId);
}
StorPoolUtil.spLog("Snapshot=%s of volume=%s for a group snapshot=%s.", snapshot, vol.getUuid(), vmSnapshot.getUuid());
}
}
}
if (resp.getError() == null) {
StorPoolUtil.spLog("StorpoolVMSnapshotStrategy.takeSnapshot answer=%s", resp.getError());
finalizeCreate(vmSnapshotVO, volumeTOs);
result = vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded);
long new_chain_size = 0;
for (VolumeObjectTO volumeObjectTO : volumeTOs) {
publishUsageEvents(EventTypes.EVENT_VM_SNAPSHOT_CREATE, vmSnapshot, userVm, volumeObjectTO);
new_chain_size += volumeObjectTO.getSize();
log.info("EventTypes.EVENT_VM_SNAPSHOT_CREATE publishUsageEvent" + volumeObjectTO);
}
publishUsageEvents(EventTypes.EVENT_VM_SNAPSHOT_ON_PRIMARY, vmSnapshot, userVm, new_chain_size - prev_chain_size, virtual_size);
} else {
throw new CloudRuntimeException("Could not create vm snapshot");
}
return vmSnapshot;
} catch (Exception e) {
log.debug("Could not create VM snapshot:" + e.getMessage());
throw new CloudRuntimeException("Could not create VM snapshot:" + e.getMessage());
} finally {
if (!result) {
try {
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed);
log.info(String.format("VMSnapshot.Event.OperationFailed vmSnapshot=%s", vmSnapshot));
} catch (NoTransitionException nte) {
log.error("Cannot set vm state:" + nte.getMessage());
}
}
}
}
@Override
public StrategyPriority canHandle(VMSnapshot vmSnapshot) {
return areAllVolumesOnStorPool(vmSnapshot.getVmId());
}
public StrategyPriority canHandle(Long vmId, Long rootPoolId, boolean snapshotMemory) {
if (snapshotMemory) {
return StrategyPriority.CANT_HANDLE;
}
return areAllVolumesOnStorPool(vmId);
}
private StrategyPriority areAllVolumesOnStorPool(Long vmId) {
List<VolumeObjectTO> volumeTOs = vmSnapshotHelper.getVolumeTOList(vmId);
if (volumeTOs == null || volumeTOs.isEmpty()) {
return StrategyPriority.CANT_HANDLE;
}
for (VolumeObjectTO volumeTO : volumeTOs) {
Long poolId = volumeTO.getPoolId();
StoragePoolVO pool = storagePool.findById(poolId);
if (!pool.getStorageProviderName().equals(StorPoolUtil.SP_PROVIDER_NAME)) {
return StrategyPriority.CANT_HANDLE;
}
}
return StrategyPriority.HIGHEST;
}
@Override
public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) {
UserVmVO userVm = userVmDao.findById(vmSnapshot.getVmId());
VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot;
try {
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.ExpungeRequested);
} catch (NoTransitionException e) {
log.debug("Failed to change vm snapshot state with event ExpungeRequested");
throw new CloudRuntimeException(
"Failed to change vm snapshot state with event ExpungeRequested: " + e.getMessage());
}
List<VolumeObjectTO> volumeTOs = vmSnapshotHelper.getVolumeTOList(vmSnapshot.getVmId());
DataStore dataStore = dataStoreManager.getPrimaryDataStore(volumeTOs.get(0).getDataStore().getUuid());
SpConnectionDesc conn = null;
try {
conn = StorPoolUtil.getSpConnection(dataStore.getUuid(), dataStore.getId(), storagePoolDetailsDao, storagePool);
} catch (CloudRuntimeException e) {
throw e;
}
SpApiResponse resp = null;
for (VolumeObjectTO volumeObjectTO : volumeTOs) {
String err = null;
VMSnapshotDetailsVO snapshotDetailsVO = vmSnapshotDetailsDao.findDetail(vmSnapshot.getId(), volumeObjectTO.getUuid());
String snapshotName = StorPoolStorageAdaptor.getVolumeNameFromPath(snapshotDetailsVO.getValue(), true);
if (snapshotName == null) {
err = String.format("Could not find StorPool's snapshot vm snapshot uuid=%s and volume uui=%s",
vmSnapshot.getUuid(), volumeObjectTO.getUuid());
log.error("Could not delete snapshot for vm:" + err);
}
StorPoolUtil.spLog("StorpoolVMSnapshotStrategy.deleteVMSnapshot snapshotName=%s", snapshotName);
resp = StorPoolUtil.snapshotDelete(snapshotName, conn);
if (resp.getError() != null) {
err = String.format("Could not delete storpool vm error=%s", resp.getError());
log.error("Could not delete snapshot for vm:" + err);
} else {
// do we need to clean database?
if (snapshotDetailsVO != null) {
vmSnapshotDetailsDao.remove(snapshotDetailsVO.getId());
}
}
if (err != null) {
StorPoolUtil.spLog(
"StorpoolVMSnapshotStrategy.deleteVMSnapshot delete snapshot=%s of gropusnapshot=%s failed due to %s",
snapshotName, userVm.getInstanceName(), err);
throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm "
+ userVm.getInstanceName() + " failed due to " + err);
}
}
vmSnapshotDetailsDao.removeDetails(vmSnapshot.getId());
finalizeDelete(vmSnapshotVO, volumeTOs);
vmSnapshotDao.remove(vmSnapshot.getId());
long full_chain_size = 0;
for (VolumeObjectTO volumeTo : volumeTOs) {
publishUsageEvents(EventTypes.EVENT_VM_SNAPSHOT_DELETE, vmSnapshot, userVm, volumeTo);
full_chain_size += volumeTo.getSize();
}
publishUsageEvents(EventTypes.EVENT_VM_SNAPSHOT_OFF_PRIMARY, vmSnapshot, userVm, full_chain_size, 0L);
return true;
}
@Override
public boolean revertVMSnapshot(VMSnapshot vmSnapshot) {
log.debug("Revert vm snapshot");
VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot;
UserVmVO userVm = userVmDao.findById(vmSnapshot.getVmId());
if (userVm.getState() == VirtualMachine.State.Running && vmSnapshotVO.getType() == VMSnapshot.Type.Disk) {
throw new CloudRuntimeException("Virtual machine should be in stopped state for revert operation");
}
try {
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.RevertRequested);
} catch (NoTransitionException e) {
throw new CloudRuntimeException(e.getMessage());
}
boolean result = false;
try {
List<VolumeObjectTO> volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId());
DataStore dataStore = dataStoreManager.getPrimaryDataStore(volumeTOs.get(0).getDataStore().getUuid());
SpConnectionDesc conn = StorPoolUtil.getSpConnection(dataStore.getUuid(), dataStore.getId(), storagePoolDetailsDao, storagePool);
for (VolumeObjectTO volumeObjectTO : volumeTOs) {
String err = null;
VMSnapshotDetailsVO snapshotDetailsVO = vmSnapshotDetailsDao.findDetail(vmSnapshot.getId(),
volumeObjectTO.getUuid());
String snapshotName = StorPoolStorageAdaptor.getVolumeNameFromPath(snapshotDetailsVO.getValue(), true);
if (snapshotName == null) {
err = String.format("Could not find StorPool's snapshot vm snapshot uuid=%s and volume uui=%s",
vmSnapshot.getUuid(), volumeObjectTO.getUuid());
log.error("Could not delete snapshot for vm:" + err);
}
String volumeName = StorPoolStorageAdaptor.getVolumeNameFromPath(volumeObjectTO.getPath(), true);
VolumeDetailVO detail = volumeDetailsDao.findDetail(volumeObjectTO.getId(), StorPoolUtil.SP_PROVIDER_NAME);
if (detail != null) {
SpApiResponse updateVolumeResponse = StorPoolUtil.volumeUpdateRename(volumeName, "", StorPoolStorageAdaptor.getVolumeNameFromPath(detail.getValue(), false), conn);
if (updateVolumeResponse.getError() != null) {
StorPoolUtil.spLog("StorpoolVMSnapshotStrategy.canHandle - Could not update StorPool's volume %s to it's globalId due to %s", volumeName, updateVolumeResponse.getError().getDescr());
err = String.format("StorpoolVMSnapshotStrategy.canHandle - Could not update StorPool's volume %s to it's globalId due to %s", volumeName, updateVolumeResponse.getError().getDescr());
} else {
volumeDetailsDao.remove(detail.getId());
}
}
SpApiResponse resp = StorPoolUtil.detachAllForced(volumeName, false, conn);
if (resp.getError() != null) {
err = String.format("Could not detach StorPool volume %s from a group snapshot, due to %s",
volumeName, resp.getError());
throw new CloudRuntimeException(err);
}
resp = StorPoolUtil.volumeRevert(volumeName, snapshotName, conn);
if (resp.getError() != null) {
err = String.format("Create Could not complete revert task for volumeName=%s , and snapshotName=%s",
volumeName, snapshotName);
throw new CloudRuntimeException(err);
}
VolumeInfo vinfo = volFactory.getVolume(volumeObjectTO.getId());
if (vinfo.getMaxIops() != null) {
resp = StorPoolUtil.volumeUpadateTags(volumeName, null, vinfo.getMaxIops(), conn, null);
if (resp.getError() != null) {
StorPoolUtil.spLog("Volume was reverted successfully but max iops could not be set due to %s",
resp.getError().getDescr());
}
}
}
finalizeRevert(vmSnapshotVO, volumeTOs);
result = vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded);
} catch (CloudRuntimeException | NoTransitionException e) {
String errMsg = String.format("Error while finalize create vm snapshot [%s] due to %s", vmSnapshot.getName(), e.getMessage());
log.error(errMsg, e);
throw new CloudRuntimeException(errMsg);
} finally {
if (!result) {
try {
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed);
} catch (NoTransitionException e1) {
log.error("Cannot set vm snapshot state due to: " + e1.getMessage());
}
}
}
return result;
}
private void publishUsageEvents(String type, VMSnapshot vmSnapshot, UserVm userVm, VolumeObjectTO volumeTo) {
VolumeVO volume = volumeDao.findById(volumeTo.getId());
Long diskOfferingId = volume.getDiskOfferingId();
Long offeringId = null;
if (diskOfferingId != null) {
DiskOfferingVO offering = diskOfferingDao.findById(diskOfferingId);
if (offering != null && offering.isComputeOnly()) {
offeringId = offering.getId();
}
}
UsageEventUtils.publishUsageEvent(type, vmSnapshot.getAccountId(), userVm.getDataCenterId(), userVm.getId(),
vmSnapshot.getName(), offeringId, volume.getId(), volumeTo.getSize(), VMSnapshot.class.getName(), vmSnapshot.getUuid());
}
private void publishUsageEvents(String type, VMSnapshot vmSnapshot, UserVm userVm, Long vmSnapSize, Long virtualSize) {
try {
UsageEventUtils.publishUsageEvent(type, vmSnapshot.getAccountId(), userVm.getDataCenterId(), userVm.getId(),
vmSnapshot.getName(), 0L, 0L, vmSnapSize, virtualSize, VMSnapshot.class.getName(),
vmSnapshot.getUuid());
} catch (Exception e) {
log.error("Failed to publis usage event " + type, e);
}
}
}

View File

@ -0,0 +1,18 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
name=storage-volume-storpool
parent=storage

View File

@ -0,0 +1,38 @@
<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor
license agreements. See the NOTICE file distributed with this work for additional
information regarding copyright ownership. The ASF licenses this file to
you under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License. -->
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:context="http://www.springframework.org/schema/context"
xmlns:aop="http://www.springframework.org/schema/aop"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop-3.0.xsd
http://www.springframework.org/schema/context
http://www.springframework.org/schema/context/spring-context-3.0.xsd">
<bean id="storpoolPrimaryDataStoreProvider"
class="org.apache.cloudstack.storage.datastore.provider.StorPoolPrimaryDataStoreProvider" />
<bean id="storpoolSnapshotStrategy"
class="org.apache.cloudstack.storage.snapshot.StorPoolSnapshotStrategy" />
<bean id="storpoolVMSnapshotStrategy"
class="org.apache.cloudstack.storage.snapshot.StorPoolVMSnapshotStrategy" />
<bean id="storpoolConfigManager"
class="org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager" />
<bean id="storpoolDataMotionStrategy"
class="org.apache.cloudstack.storage.motion.StorPoolDataMotionStrategy" />
<bean id="cleanupTags"
class="org.apache.cloudstack.storage.collector.StorPoolAbandonObjectsCollector" />
</beans>

View File

@ -57,10 +57,12 @@ import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationSer
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
@ -2639,6 +2641,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (volume.getPoolId() != null) {
DataStore dataStore = dataStoreMgr.getDataStore(volume.getPoolId(), DataStoreRole.Primary);
volService.revokeAccess(volFactory.getVolume(volume.getId()), host, dataStore);
provideVMInfo(dataStore, vmId, volumeId);
}
if (volumePool != null && hostId != null) {
handleTargetsForVMware(hostId, volumePool.getHostAddress(), volumePool.getPort(), volume.get_iScsiName());
@ -3884,6 +3887,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (attached) {
ev = Volume.Event.OperationSucceeded;
s_logger.debug("Volume: " + volInfo.getName() + " successfully attached to VM: " + volInfo.getAttachedVmName());
provideVMInfo(dataStore, vm.getId(), volInfo.getId());
} else {
s_logger.debug("Volume: " + volInfo.getName() + " failed to attach to VM: " + volInfo.getAttachedVmName());
}
@ -3892,6 +3896,17 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
return _volsDao.findById(volumeToAttach.getId());
}
private void provideVMInfo(DataStore dataStore, long vmId, Long volumeId) {
DataStoreDriver dataStoreDriver = dataStore != null ? dataStore.getDriver() : null;
if (dataStoreDriver instanceof PrimaryDataStoreDriver) {
PrimaryDataStoreDriver storageDriver = (PrimaryDataStoreDriver)dataStoreDriver;
if (storageDriver.isVmInfoNeeded()) {
storageDriver.provideVmInfo(vmId, volumeId);
}
}
}
private int getMaxDataVolumesSupported(UserVmVO vm) {
Long hostId = vm.getHostId();
if (hostId == null) {

View File

@ -25,8 +25,10 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import javax.persistence.EntityExistsException;
import com.cloud.server.ResourceManagerUtil;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.commons.collections.MapUtils;
import org.apache.log4j.Logger;
@ -40,11 +42,14 @@ import com.cloud.network.vpc.NetworkACLItemVO;
import com.cloud.network.vpc.NetworkACLVO;
import com.cloud.network.vpc.VpcVO;
import com.cloud.projects.ProjectVO;
import com.cloud.server.ResourceManagerUtil;
import com.cloud.server.ResourceTag;
import com.cloud.server.ResourceTag.ResourceObjectType;
import com.cloud.server.TaggedResourceService;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.SnapshotPolicyVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.tags.dao.ResourceTagDao;
import com.cloud.user.Account;
import com.cloud.user.AccountManager;
@ -78,6 +83,10 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso
AccountDao _accountDao;
@Inject
ResourceManagerUtil resourceManagerUtil;
@Inject
VolumeDao volumeDao;
@Inject
DataStoreManager dataStoreMgr;
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
@ -196,6 +205,9 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso
throw new CloudRuntimeException(String.format("tag %s already on %s with id %s", resourceTag.getKey(), resourceType.toString(), resourceId),e);
}
resourceTags.add(resourceTag);
if (ResourceObjectType.UserVm.equals(resourceType)) {
informStoragePoolForVmTags(id, key, value);
}
}
}
}
@ -275,6 +287,9 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso
_resourceTagDao.remove(tagToRemove.getId());
s_logger.debug("Removed the tag '" + tagToRemove + "' for resources (" +
String.join(", ", resourceIds) + ")");
if (ResourceObjectType.UserVm.equals(resourceType)) {
informStoragePoolForVmTags(tagToRemove.getResourceId(), tagToRemove.getKey(), tagToRemove.getValue());
}
}
}
});
@ -292,4 +307,19 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso
List<? extends ResourceTag> listResourceTags = listByResourceTypeAndId(type, resourceId);
return listResourceTags == null ? null : listResourceTags.stream().collect(Collectors.toMap(ResourceTag::getKey, ResourceTag::getValue));
}
private void informStoragePoolForVmTags(long vmId, String key, String value) {
List<VolumeVO> volumeVos = volumeDao.findByInstance(vmId);
for (VolumeVO volume : volumeVos) {
DataStore dataStore = dataStoreMgr.getDataStore(volume.getPoolId(), DataStoreRole.Primary);
if (dataStore == null || !(dataStore.getDriver() instanceof PrimaryDataStoreDriver)) {
continue;
}
PrimaryDataStoreDriver dataStoreDriver = (PrimaryDataStoreDriver) dataStore.getDriver();
if (dataStoreDriver.isVmTagsNeeded(key)) {
dataStoreDriver.provideVmTags(vmId, volume.getId(), value);
}
}
}
}

View File

@ -0,0 +1,439 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Import Local Modules
import pprint
import random
import subprocess
import time
import uuid
from marvin.cloudstackAPI import (listOsTypes,
listTemplates,
listHosts,
createTemplate,
createVolume,
resizeVolume,
startVirtualMachine,
migrateVirtualMachine,
migrateVolume
)
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.codes import FAILED, KVM, PASS, XEN_SERVER, RUNNING
from marvin.configGenerator import configuration, cluster
from marvin.lib.base import (Account,
Configurations,
ServiceOffering,
Snapshot,
StoragePool,
Template,
Tag,
VirtualMachine,
VmSnapshot,
Volume,
SecurityGroup,
)
from marvin.lib.common import (get_zone,
get_domain,
get_template,
list_disk_offering,
list_snapshots,
list_storage_pools,
list_volumes,
list_virtual_machines,
list_configurations,
list_service_offering,
list_clusters,
list_zones)
from marvin.lib.utils import random_gen, cleanup_resources, validateList, is_snapshot_on_nfs, isAlmostEqual
from nose.plugins.attrib import attr
from storpool import spapi
from sp_util import (TestData, StorPoolHelper)
class TestMigrateVolumeToAnotherPool(cloudstackTestCase):
@classmethod
def setUpClass(cls):
super(TestMigrateVolumeToAnotherPool, cls).setUpClass()
try:
cls.setUpCloudStack()
except Exception:
cls.cleanUpCloudStack()
raise
@classmethod
def setUpCloudStack(cls):
cls.spapi = spapi.Api(host="10.2.23.248", port="81", auth="6549874687", multiCluster=True)
testClient = super(TestMigrateVolumeToAnotherPool, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls._cleanup = []
cls.unsupportedHypervisor = False
cls.hypervisor = testClient.getHypervisorInfo()
if cls.hypervisor.lower() in ("hyperv", "lxc"):
cls.unsupportedHypervisor = True
return
cls.services = testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = None
zones = list_zones(cls.apiclient)
for z in zones:
if z.name == cls.getClsConfig().mgtSvr[0].zone:
cls.zone = z
assert cls.zone is not None
td = TestData()
cls.testdata = td.testdata
cls.helper = StorPoolHelper()
storpool_primary_storage = cls.testdata[TestData.primaryStorage]
cls.template_name = storpool_primary_storage.get("name")
storpool_service_offerings = cls.testdata[TestData.serviceOffering]
nfs_service_offerings = cls.testdata[TestData.serviceOfferingsPrimary]
ceph_service_offerings = cls.testdata[TestData.serviceOfferingsCeph]
storage_pool = list_storage_pools(
cls.apiclient,
name=cls.template_name
)
nfs_storage_pool = list_storage_pools(
cls.apiclient,
name='nfs'
)
ceph_primary_storage = cls.testdata[TestData.primaryStorage4]
cls.ceph_storage_pool = list_storage_pools(
cls.apiclient,
name=ceph_primary_storage.get("name")
)[0]
service_offerings = list_service_offering(
cls.apiclient,
name=cls.template_name
)
nfs_service_offering = list_service_offering(
cls.apiclient,
name='nfs'
)
ceph_service_offering = list_service_offering(
cls.apiclient,
name=ceph_primary_storage.get("name")
)
disk_offerings = list_disk_offering(
cls.apiclient,
name="ssd"
)
cls.disk_offerings = disk_offerings[0]
if storage_pool is None:
storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage)
else:
storage_pool = storage_pool[0]
cls.storage_pool = storage_pool
if service_offerings is None:
service_offerings = ServiceOffering.create(cls.apiclient, storpool_service_offerings)
else:
service_offerings = service_offerings[0]
if nfs_service_offering is None:
nfs_service_offering = ServiceOffering.create(cls.apiclient, nfs_service_offerings)
else:
nfs_service_offering = nfs_service_offering[0]
if ceph_service_offering is None:
ceph_service_offering = ServiceOffering.create(cls.apiclient, ceph_service_offerings)
else:
ceph_service_offering = ceph_service_offering[0]
#The version of CentOS has to be supported
template = get_template(
cls.apiclient,
cls.zone.id,
account = "system"
)
cls.nfs_storage_pool = nfs_storage_pool[0]
if cls.nfs_storage_pool.state == "Maintenance":
cls.nfs_storage_pool = StoragePool.cancelMaintenance(cls.apiclient, cls.nfs_storage_pool.id)
if cls.ceph_storage_pool.state == "Maintenance":
cls.ceph_storage_pool = StoragePool.cancelMaintenance(cls.apiclient, cls.ceph_storage_pool.id)
cls.account = cls.helper.create_account(
cls.apiclient,
cls.services["account"],
accounttype = 1,
domainid=cls.domain.id,
roleid = 1
)
cls._cleanup.append(cls.account)
securitygroup = SecurityGroup.list(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid)[0]
cls.helper.set_securityGroups(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid, id = securitygroup.id)
cls.vm = VirtualMachine.create(cls.apiclient,
{"name":"StorPool-%s" % uuid.uuid4() },
zoneid=cls.zone.id,
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=nfs_service_offering.id,
hypervisor=cls.hypervisor,
rootdisksize=10
)
cls.vm2 = VirtualMachine.create(cls.apiclient,
{"name":"StorPool-%s" % uuid.uuid4() },
zoneid=cls.zone.id,
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=nfs_service_offering.id,
hypervisor= cls.hypervisor,
rootdisksize=10
)
cls.vm3 = VirtualMachine.create(cls.apiclient,
{"name":"StorPool-%s" % uuid.uuid4() },
zoneid=cls.zone.id,
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=nfs_service_offering.id,
hypervisor= cls.hypervisor,
rootdisksize=10
)
cls.vm4 = VirtualMachine.create(cls.apiclient,
{"name":"StorPool-%s" % uuid.uuid4() },
zoneid=cls.zone.id,
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=ceph_service_offering.id,
hypervisor= cls.hypervisor,
rootdisksize=10
)
cls.vm5 = VirtualMachine.create(cls.apiclient,
{"name":"StorPool-%s" % uuid.uuid4() },
zoneid=cls.zone.id,
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=ceph_service_offering.id,
hypervisor= cls.hypervisor,
rootdisksize=10
)
cls.storage_pool = StoragePool.update(cls.apiclient,
id=cls.storage_pool.id,
tags = ["ssd, nfs"])
if template == FAILED:
assert False, "get_template() failed to return template\
with description %s" % cls.services["ostype"]
cls.services["domainid"] = cls.domain.id
cls.services["small"]["zoneid"] = cls.zone.id
cls.services["templates"]["ostypeid"] = template.ostypeid
cls.services["zoneid"] = cls.zone.id
cls.service_offering = service_offerings
cls.nfs_service_offering = nfs_service_offering
cls.template = template
cls.random_data_0 = random_gen(size=100)
cls.test_dir = "/tmp"
cls.random_data = "random.data"
return
@classmethod
def tearDownClass(cls):
cls.cleanUpCloudStack()
@classmethod
def cleanUpCloudStack(cls):
try:
if cls.nfs_storage_pool.state is not "Maintenance":
cls.nfs_storage_pool = StoragePool.enableMaintenance(cls.apiclient, cls.nfs_storage_pool.id)
if cls.ceph_storage_pool.state is not "Maintenance":
cls.ceph_storage_pool = StoragePool.enableMaintenance(cls.apiclient, cls.ceph_storage_pool.id)
cls.storage_pool = StoragePool.update(cls.apiclient,
id=cls.storage_pool.id,
tags = ["ssd"])
# Cleanup resources used
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
if self.unsupportedHypervisor:
self.skipTest("Skipping test because unsupported hypervisor\
%s" % self.hypervisor)
return
def tearDown(self):
return
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_1_migrate_vm_from_nfs_to_storpool(self):
''' Test migrate virtual machine from NFS primary storage to StorPool'''
self.vm.stop(self.apiclient, forced=True)
cmd = migrateVirtualMachine.migrateVirtualMachineCmd()
cmd.virtualmachineid = self.vm.id
cmd.storageid = self.storage_pool.id
migrated_vm = self.apiclient.migrateVirtualMachine(cmd)
volumes = list_volumes(
self.apiclient,
virtualmachineid = migrated_vm.id,
listall=True
)
for v in volumes:
name = v.path.split("/")[3]
try:
sp_volume = self.spapi.volumeList(volumeName="~" + name)
except spapi.ApiError as err:
raise Exception(err)
self.assertEqual(v.storageid, self.storage_pool.id, "Did not migrate virtual machine from NFS to StorPool")
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_2_migrate_volume_from_nfs_to_storpool(self):
''' Test migrate volume from NFS primary storage to StorPool'''
self.vm2.stop(self.apiclient, forced=True)
volumes = list_volumes(
self.apiclient,
virtualmachineid = self.vm2.id,
listall=True
)
for v in volumes:
cmd = migrateVolume.migrateVolumeCmd()
cmd.storageid = self.storage_pool.id
cmd.volumeid = v.id
volume = self.apiclient.migrateVolume(cmd)
self.assertEqual(volume.storageid, self.storage_pool.id, "Did not migrate volume from NFS to StorPool")
volumes = list_volumes(
self.apiclient,
virtualmachineid = self.vm2.id,
listall=True
)
for v in volumes:
name = v.path.split("/")[3]
try:
sp_volume = self.spapi.volumeList(volumeName="~" + name)
except spapi.ApiError as err:
raise Exception(err)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_3_migrate_volume_from_nfs_to_storpool(self):
'''Test write on disk before migrating volume from NFS primary storage
Check that data is on disk after migration'''
try:
# Login to VM and write data to file system
ssh_client = self.vm3.get_ssh_client(reconnect = True)
cmds = [
"echo %s > %s/%s" %
(self.random_data_0, self.test_dir, self.random_data),
"sync",
"sleep 1",
"sync",
"sleep 1",
"cat %s/%s" %
(self.test_dir, self.random_data)
]
for c in cmds:
self.debug(c)
result = ssh_client.execute(c)
self.debug(result)
except Exception:
self.fail("SSH failed for Virtual machine: %s" %
self.vm3.ipaddress)
self.assertEqual(
self.random_data_0,
result[0],
"Check the random data has be write into temp file!"
)
self.vm3.stop(self.apiclient, forced=True)
volumes = list_volumes(
self.apiclient,
virtualmachineid = self.vm3.id,
listall=True
)
time.sleep(30)
for v in volumes:
cmd = migrateVolume.migrateVolumeCmd()
cmd.storageid = self.storage_pool.id
cmd.volumeid = v.id
volume = self.apiclient.migrateVolume(cmd)
self.assertEqual(volume.storageid, self.storage_pool.id, "Did not migrate volume from NFS to StorPool")
volumes = list_volumes(
self.apiclient,
virtualmachineid = self.vm3.id,
listall=True
)
for v in volumes:
name = v.path.split("/")[3]
try:
sp_volume = self.spapi.volumeList(volumeName="~" + name)
except spapi.ApiError as err:
raise Exception(err)
self.vm3.start(self.apiclient)
try:
ssh_client = self.vm3.get_ssh_client(reconnect=True)
cmds = [
"cat %s/%s" % (self.test_dir, self.random_data)
]
for c in cmds:
self.debug(c)
result = ssh_client.execute(c)
self.debug(result)
except Exception:
self.fail("SSH failed for Virtual machine: %s" %
self.vm3.ipaddress)
self.assertEqual(
self.random_data_0,
result[0],
"Check the random data is equal with the ramdom file!"
)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,576 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Import Local Modules
import pprint
import random
import subprocess
import time
import json
from marvin.cloudstackAPI import (listOsTypes,
listTemplates,
listHosts,
createTemplate,
createVolume,
resizeVolume,
revertSnapshot,
startVirtualMachine)
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.codes import FAILED, KVM, PASS, XEN_SERVER, RUNNING
from marvin.configGenerator import configuration, cluster
from marvin.lib.base import (Account,
Configurations,
ServiceOffering,
Snapshot,
StoragePool,
Template,
Tag,
VirtualMachine,
VmSnapshot,
Volume,
SecurityGroup,
)
from marvin.lib.common import (get_zone,
get_domain,
get_template,
list_disk_offering,
list_snapshots,
list_storage_pools,
list_volumes,
list_virtual_machines,
list_configurations,
list_service_offering,
list_clusters,
list_zones)
from marvin.lib.utils import random_gen, cleanup_resources, validateList, is_snapshot_on_nfs, isAlmostEqual
from nose.plugins.attrib import attr
from storpool import spapi
import uuid
from sp_util import (TestData, StorPoolHelper)
class TestStoragePool(cloudstackTestCase):
@classmethod
def setUpClass(cls):
super(TestStoragePool, cls).setUpClass()
try:
cls.setUpCloudStack()
except Exception:
cls.cleanUpCloudStack()
raise
@classmethod
def setUpCloudStack(cls):
cls.spapi = spapi.Api(host="10.2.23.248", port="81", auth="6549874687", multiCluster=True)
testClient = super(TestStoragePool, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.unsupportedHypervisor = False
cls.hypervisor = testClient.getHypervisorInfo()
if cls.hypervisor.lower() in ("hyperv", "lxc"):
cls.unsupportedHypervisor = True
return
cls._cleanup = []
cls.services = testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = None
zones = list_zones(cls.apiclient)
for z in zones:
if z.name == cls.getClsConfig().mgtSvr[0].zone:
cls.zone = z
assert cls.zone is not None
td = TestData()
cls.testdata = td.testdata
cls.helper = StorPoolHelper()
cls.account = cls.helper.create_account(
cls.apiclient,
cls.services["account"],
accounttype = 1,
domainid=cls.domain.id,
roleid = 1
)
cls._cleanup.append(cls.account)
securitygroup = SecurityGroup.list(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid)[0]
cls.helper.set_securityGroups(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid, id = securitygroup.id)
storpool_primary_storage = cls.testdata[TestData.primaryStorage]
storpool_service_offerings = cls.testdata[TestData.serviceOffering]
cls.template_name = storpool_primary_storage.get("name")
storage_pool = list_storage_pools(
cls.apiclient,
name=cls.template_name
)
service_offerings = list_service_offering(
cls.apiclient,
name=cls.template_name
)
disk_offerings = list_disk_offering(
cls.apiclient,
name="ssd"
)
cls.disk_offerings = disk_offerings[0]
if storage_pool is None:
storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage)
else:
storage_pool = storage_pool[0]
cls.storage_pool = storage_pool
cls.debug(pprint.pformat(storage_pool))
if service_offerings is None:
service_offerings = ServiceOffering.create(cls.apiclient, storpool_service_offerings)
else:
service_offerings = service_offerings[0]
#The version of CentOS has to be supported
template = get_template(
cls.apiclient,
cls.zone.id,
account = "system"
)
cls.debug(pprint.pformat(template))
cls.debug(pprint.pformat(cls.hypervisor))
if template == FAILED:
assert False, "get_template() failed to return template\
with description %s" % cls.services["ostype"]
cls.services["domainid"] = cls.domain.id
cls.services["small"]["zoneid"] = cls.zone.id
cls.services["templates"]["ostypeid"] = template.ostypeid
cls.services["zoneid"] = cls.zone.id
cls.services["diskofferingid"] = cls.disk_offerings.id
cls.service_offering = service_offerings
cls.debug(pprint.pformat(cls.service_offering))
cls.volume_1 = Volume.create(
cls.apiclient,
cls.services,
account=cls.account.name,
domainid=cls.account.domainid,
size=5
)
cls.volume_2 = Volume.create(
cls.apiclient,
cls.services,
account=cls.account.name,
domainid=cls.account.domainid,
size =5
)
cls.volume = Volume.create(
cls.apiclient,
cls.services,
account=cls.account.name,
domainid=cls.account.domainid,
size=5
)
cls.virtual_machine = VirtualMachine.create(
cls.apiclient,
{"name":"StorPool-%s" % uuid.uuid4() },
zoneid=cls.zone.id,
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id,
hypervisor=cls.hypervisor,
rootdisksize=10
)
cls.virtual_machine2= VirtualMachine.create(
cls.apiclient,
{"name":"StorPool-%s" % uuid.uuid4() },
zoneid=cls.zone.id,
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id,
hypervisor=cls.hypervisor,
rootdisksize=10
)
cls.template = template
cls.random_data_0 = random_gen(size=100)
cls.test_dir = "/tmp"
cls.random_data = "random.data"
return
@classmethod
def tearDownClass(cls):
cls.cleanUpCloudStack()
@classmethod
def cleanUpCloudStack(cls):
try:
# Cleanup resources used
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
if self.unsupportedHypervisor:
self.skipTest("Skipping test because unsupported hypervisor\
%s" % self.hypervisor)
return
def tearDown(self):
return
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_01_set_vcpolicy_tag_to_vm_with_attached_disks(self):
''' Test set vc-policy tag to VM with one attached disk
'''
volume_attached = self.virtual_machine.attach_volume(
self.apiclient,
self.volume_1
)
tag = Tag.create(
self.apiclient,
resourceIds=self.virtual_machine.id,
resourceType='UserVm',
tags={'vc-policy': 'testing_vc-policy'}
)
vm = list_virtual_machines(self.apiclient,id = self.virtual_machine.id, listall=True)
vm_tags = vm[0].tags
volumes = list_volumes(
self.apiclient,
virtualmachineid = self.virtual_machine.id, listall=True
)
self.vc_policy_tags(volumes, vm_tags, vm)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_02_set_vcpolicy_tag_to_attached_disk(self):
""" Test set vc-policy tag to new disk attached to VM"""
volume_attached = self.virtual_machine.attach_volume(
self.apiclient,
self.volume_2
)
volume = list_volumes(self.apiclient, id = volume_attached.id, listall=True)
name = volume[0].path.split("/")[3]
sp_volume = self.spapi.volumeList(volumeName="~" + name)
vm = list_virtual_machines(self.apiclient,id = self.virtual_machine.id, listall=True)
vm_tags = vm[0].tags
for vm_tag in vm_tags:
for sp_tag in sp_volume[0].tags:
if sp_tag == vm_tag.key:
self.assertEqual(sp_tag, vm_tag.key, "StorPool tag is not the same as the Virtual Machine tag")
self.assertEqual(sp_volume[0].tags[sp_tag], vm_tag.value, "StorPool tag value is not the same as the Virtual Machine tag value")
if sp_tag == 'cvm':
self.assertEqual(sp_volume[0].tags[sp_tag], vm[0].id, "cvm tag is not the expected value")
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_03_create_vm_snapshot_vc_policy_tag(self):
"""Test to create VM snapshots with VC policy tags
"""
volume_attached = self.virtual_machine.attach_volume(
self.apiclient,
self.volume
)
volumes = list_volumes(
self.apiclient,
virtualmachineid = self.virtual_machine.id,
listall=True)
vm = list_virtual_machines(self.apiclient,id = self.virtual_machine.id, listall=True)
vm_tags = vm[0].tags
self.vc_policy_tags(volumes, vm_tags, vm)
self.assertEqual(volume_attached.id, self.volume.id, "Is not the same volume ")
try:
# Login to VM and write data to file system
ssh_client = self.virtual_machine.get_ssh_client()
cmds = [
"echo %s > %s/%s" %
(self.random_data_0, self.test_dir, self.random_data),
"sync",
"sleep 1",
"sync",
"sleep 1",
"cat %s/%s" %
(self.test_dir, self.random_data)
]
for c in cmds:
self.debug(c)
result = ssh_client.execute(c)
self.debug(result)
except Exception:
self.fail("SSH failed for Virtual machine: %s" %
self.virtual_machine.ipaddress)
self.assertEqual(
self.random_data_0,
result[0],
"Check the random data has be write into temp file!"
)
time.sleep(30)
MemorySnapshot = False
vm_snapshot = VmSnapshot.create(
self.apiclient,
self.virtual_machine.id,
MemorySnapshot,
"TestSnapshot",
"Display Text"
)
self.assertEqual(
vm_snapshot.state,
"Ready",
"Check the snapshot of vm is ready!"
)
return
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_04_revert_vm_snapshots_vc_policy_tag(self):
"""Test to revert VM snapshots with VC policy tag
"""
try:
ssh_client = self.virtual_machine.get_ssh_client()
cmds = [
"rm -rf %s/%s" % (self.test_dir, self.random_data),
"ls %s/%s" % (self.test_dir, self.random_data)
]
for c in cmds:
self.debug(c)
result = ssh_client.execute(c)
self.debug(result)
except Exception:
self.fail("SSH failed for Virtual machine: %s" %
self.virtual_machine.ipaddress)
if str(result[0]).index("No such file or directory") == -1:
self.fail("Check the random data has be delete from temp file!")
time.sleep(30)
list_snapshot_response = VmSnapshot.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
listall=True)
self.assertEqual(
isinstance(list_snapshot_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_snapshot_response,
None,
"Check if snapshot exists in ListSnapshot"
)
self.assertEqual(
list_snapshot_response[0].state,
"Ready",
"Check the snapshot of vm is ready!"
)
self.virtual_machine.stop(self.apiclient, forced=True)
VmSnapshot.revertToSnapshot(
self.apiclient,
list_snapshot_response[0].id
)
self.virtual_machine.start(self.apiclient)
try:
ssh_client = self.virtual_machine.get_ssh_client(reconnect=True)
cmds = [
"cat %s/%s" % (self.test_dir, self.random_data)
]
for c in cmds:
self.debug(c)
result = ssh_client.execute(c)
self.debug(result)
except Exception:
self.fail("SSH failed for Virtual machine: %s" %
self.virtual_machine.ipaddress)
volumes = list_volumes(
self.apiclient,
virtualmachineid = self.virtual_machine.id, listall=True
)
vm = list_virtual_machines(self.apiclient,id = self.virtual_machine.id, listall=True)
vm_tags = vm[0].tags
self.vc_policy_tags(volumes, vm_tags, vm)
self.assertEqual(
self.random_data_0,
result[0],
"Check the random data is equal with the ramdom file!"
)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_05_delete_vm_snapshots(self):
"""Test to delete vm snapshots
"""
list_snapshot_response = VmSnapshot.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
listall=True)
self.assertEqual(
isinstance(list_snapshot_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_snapshot_response,
None,
"Check if snapshot exists in ListSnapshot"
)
VmSnapshot.deleteVMSnapshot(
self.apiclient,
list_snapshot_response[0].id)
time.sleep(30)
list_snapshot_response = VmSnapshot.list(
self.apiclient,
#vmid=self.virtual_machine.id,
virtualmachineid=self.virtual_machine.id,
listall=False)
self.debug('list_snapshot_response -------------------- %s' % list_snapshot_response)
self.assertIsNone(list_snapshot_response, "snapshot is already deleted")
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_06_remove_vcpolicy_tag_when_disk_detached(self):
""" Test remove vc-policy tag to disk detached from VM"""
time.sleep(60)
volume_detached = self.virtual_machine.detach_volume(
self.apiclient,
self.volume_2
)
vm = list_virtual_machines(self.apiclient,id = self.virtual_machine.id, listall=True)
vm_tags = vm[0].tags
volumes = list_volumes(
self.apiclient,
virtualmachineid = self.virtual_machine.id, listall=True
)
self.vc_policy_tags( volumes, vm_tags, vm)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_07_delete_vcpolicy_tag(self):
""" Test delete vc-policy tag of VM"""
Tag.delete(self.apiclient,
resourceIds=self.virtual_machine.id,
resourceType='UserVm',
tags={'vc-policy': 'testing_vc-policy'})
volumes = list_volumes(
self.apiclient,
virtualmachineid = self.virtual_machine.id, listall=True
)
for v in volumes:
name = v.path.split("/")[3]
spvolume = self.spapi.volumeList(volumeName="~" + name)
tags = spvolume[0].tags
for t in tags:
self.assertFalse(t.lower() == 'vc-policy'.lower(), "There is VC Policy tag")
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_08_vcpolicy_tag_to_reverted_disk(self):
tag = Tag.create(
self.apiclient,
resourceIds=self.virtual_machine2.id,
resourceType='UserVm',
tags={'vc-policy': 'testing_vc-policy'}
)
vm = list_virtual_machines(self.apiclient,id = self.virtual_machine2.id, listall=True)
vm_tags = vm[0].tags
volume = Volume.list(
self.apiclient,
virtualmachineid = self.virtual_machine2.id, listall=True,
type = "ROOT"
)
self.vc_policy_tags(volume, vm_tags, vm)
snapshot = Snapshot.create(
self.apiclient,
volume[0].id,
account=self.account.name,
domainid=self.account.domainid
)
virtual_machine = self.virtual_machine2.stop(
self.apiclient,
forced=True
)
cmd = revertSnapshot.revertSnapshotCmd()
cmd.id = snapshot.id
revertedn = self.apiclient.revertSnapshot(cmd)
vm = list_virtual_machines(self.apiclient,id = self.virtual_machine2.id)
vm_tags = vm[0].tags
vol = list_volumes(self.apiclient, id = snapshot.volumeid, listall=True)
self.vc_policy_tags(vol, vm_tags, vm)
def vc_policy_tags(self, volumes, vm_tags, vm):
flag = False
for v in volumes:
name = v.path.split("/")[3]
spvolume = self.spapi.volumeList(volumeName="~" + name)
tags = spvolume[0].tags
for t in tags:
for vm_tag in vm_tags:
if t == vm_tag.key:
flag = True
self.assertEqual(tags[t], vm_tag.value, "Tags are not equal")
if t == 'cvm':
self.assertEqual(tags[t], vm[0].id, "CVM tag is not the same as vm UUID")
#self.assertEqual(tag.tags., second, msg)
self.assertTrue(flag, "There aren't volumes with vm tags")

View File

@ -0,0 +1,369 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Import Local Modules
import random
import time
from marvin.cloudstackAPI import (listTemplates)
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.codes import FAILED, KVM, PASS, XEN_SERVER, RUNNING
from marvin.lib.base import (Account,
ServiceOffering,
VirtualMachine,
VmSnapshot,
User,
Volume,
SecurityGroup,
)
from marvin.lib.common import (get_zone,
get_domain,
get_template,
list_clusters,
list_snapshots,
list_virtual_machines,
list_configurations,
list_disk_offering,
list_accounts,
list_storage_pools,
list_service_offering,
list_zones
)
from marvin.lib.utils import random_gen, cleanup_resources, validateList, is_snapshot_on_nfs, isAlmostEqual, get_hypervisor_type
from nose.plugins.attrib import attr
import uuid
from sp_util import (TestData, StorPoolHelper)
class TestVmSnapshot(cloudstackTestCase):
@classmethod
def setUpClass(cls):
super(TestVmSnapshot, cls).setUpClass()
try:
cls.setUpCloudStack()
except Exception:
cls.cleanUpCloudStack()
raise
@classmethod
def setUpCloudStack(cls):
testClient = super(TestVmSnapshot, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls._cleanup = []
cls.unsupportedHypervisor = False
# Setup test data
td = TestData()
cls.testdata = td.testdata
cls.helper = StorPoolHelper()
cls.services = testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = None
zones = list_zones(cls.apiclient)
for z in zones:
if z.name == cls.getClsConfig().mgtSvr[0].zone:
cls.zone = z
assert cls.zone is not None
cls.cluster = list_clusters(cls.apiclient)[0]
cls.hypervisor = get_hypervisor_type(cls.apiclient)
#The version of CentOS has to be supported
template = get_template(
cls.apiclient,
cls.zone.id,
account = "system"
)
if template == FAILED:
assert False, "get_template() failed to return template\
with description %s" % cls.services["ostype"]
cls.template = template
cls.account = cls.helper.create_account(
cls.apiclient,
cls.services["account"],
accounttype = 1,
domainid=cls.domain.id,
roleid = 1
)
cls._cleanup.append(cls.account)
securitygroup = SecurityGroup.list(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid)[0]
cls.helper.set_securityGroups(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid, id = securitygroup.id)
primarystorage = cls.testdata[TestData.primaryStorage]
serviceOffering = cls.testdata[TestData.serviceOffering]
storage_pool = list_storage_pools(
cls.apiclient,
name = primarystorage.get("name")
)
cls.primary_storage = storage_pool[0]
disk_offering = list_disk_offering(
cls.apiclient,
name="ssd"
)
assert disk_offering is not None
service_offering_only = list_service_offering(
cls.apiclient,
name="ssd"
)
if service_offering_only is not None:
cls.service_offering_only = service_offering_only[0]
else:
cls.service_offering_only = ServiceOffering.create(
cls.apiclient,
serviceOffering)
assert cls.service_offering_only is not None
cls.disk_offering = disk_offering[0]
# Create 1 data volume_1
cls.volume = Volume.create(
cls.apiclient,
cls.testdata[TestData.volume_1],
account=cls.account.name,
domainid=cls.domain.id,
zoneid=cls.zone.id,
diskofferingid=cls.disk_offering.id,
size=10
)
cls.virtual_machine = VirtualMachine.create(
cls.apiclient,
{"name":"StorPool-%s" % uuid.uuid4() },
zoneid=cls.zone.id,
templateid=cls.template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering_only.id,
hypervisor=cls.hypervisor,
rootdisksize=10
)
cls.random_data_0 = random_gen(size=100)
cls.test_dir = "/tmp"
cls.random_data = "random.data"
return
@classmethod
def tearDownClass(cls):
cls.cleanUpCloudStack()
@classmethod
def cleanUpCloudStack(cls):
try:
# Cleanup resources used
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
if self.unsupportedHypervisor:
self.skipTest("Skipping test because unsupported hypervisor\
%s" % self.hypervisor)
return
def tearDown(self):
return
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_01_create_vm_snapshots(self):
"""Test to create VM snapshots
"""
volume_attached = self.virtual_machine.attach_volume(
self.apiclient,
self.volume
)
self.assertEqual(volume_attached.id, self.volume.id, "Is not the same volume ")
try:
# Login to VM and write data to file system
ssh_client = self.virtual_machine.get_ssh_client(reconnect=True)
cmds = [
"echo %s > %s/%s" %
(self.random_data_0, self.test_dir, self.random_data),
"sync",
"sleep 1",
"sync",
"sleep 1",
"cat %s/%s" %
(self.test_dir, self.random_data)
]
for c in cmds:
self.debug(c)
result = ssh_client.execute(c)
self.debug(result)
except Exception:
self.fail("SSH failed for Virtual machine: %s" %
self.virtual_machine.ipaddress)
self.assertEqual(
self.random_data_0,
result[0],
"Check the random data has be write into temp file!"
)
time.sleep(30)
MemorySnapshot = False
vm_snapshot = VmSnapshot.create(
self.apiclient,
self.virtual_machine.id,
MemorySnapshot,
"TestSnapshot",
"Display Text"
)
self.assertEqual(
vm_snapshot.state,
"Ready",
"Check the snapshot of vm is ready!"
)
return
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_02_revert_vm_snapshots(self):
"""Test to revert VM snapshots
"""
try:
ssh_client = self.virtual_machine.get_ssh_client(reconnect=True)
cmds = [
"rm -rf %s/%s" % (self.test_dir, self.random_data),
"ls %s/%s" % (self.test_dir, self.random_data)
]
for c in cmds:
self.debug(c)
result = ssh_client.execute(c)
self.debug(result)
except Exception:
self.fail("SSH failed for Virtual machine: %s" %
self.virtual_machine.ipaddress)
if str(result[0]).index("No such file or directory") == -1:
self.fail("Check the random data has be delete from temp file!")
time.sleep(30)
list_snapshot_response = VmSnapshot.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
listall=True)
self.assertEqual(
isinstance(list_snapshot_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_snapshot_response,
None,
"Check if snapshot exists in ListSnapshot"
)
self.assertEqual(
list_snapshot_response[0].state,
"Ready",
"Check the snapshot of vm is ready!"
)
self.virtual_machine.stop(self.apiclient, forced=True)
VmSnapshot.revertToSnapshot(
self.apiclient,
list_snapshot_response[0].id
)
self.virtual_machine.start(self.apiclient)
try:
ssh_client = self.virtual_machine.get_ssh_client(reconnect=True)
cmds = [
"cat %s/%s" % (self.test_dir, self.random_data)
]
for c in cmds:
self.debug(c)
result = ssh_client.execute(c)
self.debug(result)
except Exception:
self.fail("SSH failed for Virtual machine: %s" %
self.virtual_machine.ipaddress)
self.assertEqual(
self.random_data_0,
result[0],
"Check the random data is equal with the ramdom file!"
)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_03_delete_vm_snapshots(self):
"""Test to delete vm snapshots
"""
list_snapshot_response = VmSnapshot.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
listall=True)
self.assertEqual(
isinstance(list_snapshot_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_snapshot_response,
None,
"Check if snapshot exists in ListSnapshot"
)
VmSnapshot.deleteVMSnapshot(
self.apiclient,
list_snapshot_response[0].id)
time.sleep(30)
list_snapshot_response = VmSnapshot.list(
self.apiclient,
#vmid=self.virtual_machine.id,
virtualmachineid=self.virtual_machine.id,
listall=False)
self.debug('list_snapshot_response -------------------- %s' % list_snapshot_response)
self.assertIsNone(list_snapshot_response, "snapshot is already deleted")

View File

@ -0,0 +1,748 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from marvin.codes import FAILED, KVM, PASS, XEN_SERVER, RUNNING
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import random_gen, cleanup_resources, validateList, is_snapshot_on_nfs, isAlmostEqual
from marvin.lib.base import (Account,
Cluster,
Configurations,
ServiceOffering,
Snapshot,
StoragePool,
Template,
VirtualMachine,
VmSnapshot,
Volume)
from marvin.lib.common import (get_zone,
get_domain,
get_template,
list_disk_offering,
list_hosts,
list_snapshots,
list_storage_pools,
list_volumes,
list_virtual_machines,
list_configurations,
list_service_offering,
list_clusters,
list_zones)
from marvin.cloudstackAPI import (listOsTypes,
listTemplates,
listHosts,
createTemplate,
createVolume,
getVolumeSnapshotDetails,
resizeVolume,
authorizeSecurityGroupIngress,
migrateVirtualMachineWithVolume,
destroyVirtualMachine,
deployVirtualMachine,
createAccount,
startVirtualMachine,
)
import time
import pprint
import random
from marvin.configGenerator import configuration
import uuid
import logging
import subprocess
import json
from storpool import spapi
from storpool import sptypes
class TestData():
account = "account"
capacityBytes = "capacitybytes"
capacityIops = "capacityiops"
clusterId = "clusterId"
diskName = "diskname"
diskOffering = "diskoffering"
diskOffering2 = "diskoffering2"
cephDiskOffering = "cephDiskOffering"
nfsDiskOffering = "nfsDiskOffering"
domainId = "domainId"
hypervisor = "hypervisor"
login = "login"
mvip = "mvip"
password = "password"
port = "port"
primaryStorage = "primarystorage"
primaryStorage2 = "primarystorage2"
primaryStorage3 = "primarystorage3"
primaryStorage4 = "primaryStorage4"
provider = "provider"
serviceOffering = "serviceOffering"
serviceOfferingssd2 = "serviceOffering-ssd2"
serviceOfferingsPrimary = "serviceOfferingsPrimary"
serviceOfferingsIops = "serviceOfferingsIops"
serviceOfferingsCeph = "serviceOfferingsCeph"
scope = "scope"
StorPool = "StorPool"
storageTag = ["ssd", "ssd2"]
tags = "tags"
virtualMachine = "virtualmachine"
virtualMachine2 = "virtualmachine2"
volume_1 = "volume_1"
volume_2 = "volume_2"
volume_3 = "volume_3"
volume_4 = "volume_4"
volume_5 = "volume_5"
volume_6 = "volume_6"
volume_7 = "volume_7"
zoneId = "zoneId"
def __init__(self):
sp_template_1 = 'ssd'
sp_template_2 = 'ssd2'
sp_template_3 = 'test-primary'
self.testdata = {
TestData.primaryStorage: {
"name": sp_template_1,
TestData.scope: "ZONE",
"url": sp_template_1,
TestData.provider: "StorPool",
"path": "/dev/storpool",
TestData.capacityBytes: 2251799813685248,
TestData.hypervisor: "KVM"
},
TestData.primaryStorage2: {
"name": sp_template_2,
TestData.scope: "ZONE",
"url": sp_template_2,
TestData.provider: "StorPool",
"path": "/dev/storpool",
TestData.capacityBytes: 2251799813685248,
TestData.hypervisor: "KVM"
},
TestData.primaryStorage3: {
"name": sp_template_3,
TestData.scope: "ZONE",
"url": sp_template_3,
TestData.provider: "StorPool",
"path": "/dev/storpool",
TestData.capacityBytes: 2251799813685248,
TestData.hypervisor: "KVM"
},
TestData.primaryStorage4: {
"name": "ceph",
TestData.scope: "ZONE",
TestData.provider: "RBD",
TestData.hypervisor: "KVM"
},
TestData.virtualMachine: {
"name": "TestVM",
"displayname": "TestVM",
"privateport": 22,
"publicport": 22,
"protocol": "tcp"
},
TestData.virtualMachine2: {
"name": "TestVM2",
"displayname": "TestVM2",
"privateport": 22,
"publicport": 22,
"protocol": "tcp"
},
TestData.serviceOffering:{
"name": sp_template_1,
"displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)",
"cpunumber": 1,
"cpuspeed": 500,
"memory": 512,
"storagetype": "shared",
"customizediops": False,
"hypervisorsnapshotreserve": 200,
"tags": sp_template_1
},
TestData.serviceOfferingssd2:{
"name": sp_template_2,
"displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)",
"cpunumber": 1,
"cpuspeed": 500,
"memory": 512,
"storagetype": "shared",
"customizediops": False,
"hypervisorsnapshotreserve": 200,
"tags": sp_template_2
},
TestData.serviceOfferingsPrimary:{
"name": "nfs",
"displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)",
"cpunumber": 1,
"cpuspeed": 500,
"memory": 512,
"storagetype": "shared",
"customizediops": False,
"hypervisorsnapshotreserve": 200,
"tags": "nfs"
},
TestData.serviceOfferingsCeph:{
"name": "ceph",
"displaytext": "Ceph Service offerings",
"cpunumber": 1,
"cpuspeed": 500,
"memory": 512,
"storagetype": "shared",
"customizediops": False,
"hypervisorsnapshotreserve": 200,
"tags": "ceph"
},
TestData.serviceOfferingsIops:{
"name": "iops",
"displaytext": "Testing IOPS on StorPool",
"cpunumber": 1,
"cpuspeed": 500,
"memory": 512,
"storagetype": "shared",
"customizediops": True,
"tags": sp_template_1,
},
TestData.diskOffering: {
"name": "SP_DO_1",
"displaytext": "SP_DO_1 (5GB Min IOPS = 300; Max IOPS = 500)",
"disksize": 5,
"customizediops": False,
"miniops": 300,
"maxiops": 500,
"hypervisorsnapshotreserve": 200,
TestData.tags: sp_template_1,
"storagetype": "shared"
},
TestData.diskOffering2: {
"name": "SP_DO_1",
"displaytext": "SP_DO_1 (5GB Min IOPS = 300; Max IOPS = 500)",
"disksize": 5,
"customizediops": False,
"miniops": 300,
"maxiops": 500,
"hypervisorsnapshotreserve": 200,
TestData.tags: sp_template_2,
"storagetype": "shared"
},
TestData.cephDiskOffering: {
"name": "ceph",
"displaytext": "Ceph fixed disk offering",
"disksize": 5,
"customizediops": False,
"miniops": 300,
"maxiops": 500,
"hypervisorsnapshotreserve": 200,
TestData.tags: "ceph",
"storagetype": "shared"
},
TestData.nfsDiskOffering: {
"name": "nfs",
"displaytext": "NFS fixed disk offering",
"disksize": 5,
"customizediops": False,
"miniops": 300,
"maxiops": 500,
"hypervisorsnapshotreserve": 200,
TestData.tags: "nfs",
"storagetype": "shared"
},
TestData.volume_1: {
TestData.diskName: "test-volume-1",
},
TestData.volume_2: {
TestData.diskName: "test-volume-2",
},
TestData.volume_3: {
TestData.diskName: "test-volume-3",
},
TestData.volume_4: {
TestData.diskName: "test-volume-4",
},
TestData.volume_5: {
TestData.diskName: "test-volume-5",
},
TestData.volume_6: {
TestData.diskName: "test-volume-6",
},
TestData.volume_7: {
TestData.diskName: "test-volume-7",
},
}
class StorPoolHelper():
@classmethod
def create_template_from_snapshot(self, apiclient, services, snapshotid=None, volumeid=None):
"""Create template from Volume"""
# Create template from Virtual machine and Volume ID
cmd = createTemplate.createTemplateCmd()
cmd.displaytext = "StorPool_Template"
cmd.name = "-".join(["StorPool-", random_gen()])
if "ostypeid" in services:
cmd.ostypeid = services["ostypeid"]
elif "ostype" in services:
# Find OSTypeId from Os type
sub_cmd = listOsTypes.listOsTypesCmd()
sub_cmd.description = services["ostype"]
ostypes = apiclient.listOsTypes(sub_cmd)
if not isinstance(ostypes, list):
raise Exception(
"Unable to find Ostype id with desc: %s" %
services["ostype"])
cmd.ostypeid = ostypes[0].id
else:
raise Exception(
"Unable to find Ostype is required for creating template")
cmd.isfeatured = True
cmd.ispublic = True
cmd.isextractable = False
if snapshotid:
cmd.snapshotid = snapshotid
if volumeid:
cmd.volumeid = volumeid
return Template(apiclient.createTemplate(cmd).__dict__)
@classmethod
def getCfgFromUrl(self, url):
cfg = dict([
option.split('=')
for option in url.split(';')
])
host, port = cfg['SP_API_HTTP'].split(':')
auth = cfg['SP_AUTH_TOKEN']
return host, int(port), auth
@classmethod
def get_remote_storpool_cluster(cls):
logging.debug("######################## get_remote_storpool_cluster")
storpool_clusterid = subprocess.check_output(['storpool_confshow', 'CLUSTER_ID']).strip()
clusterid = storpool_clusterid.split("=")[1].split(".")[1]
logging.debug("######################## %s" % storpool_clusterid)
cmd = ["storpool", "-j", "cluster", "list"]
proc = subprocess.Popen(cmd,stdout=subprocess.PIPE).stdout.read()
csl = json.loads(proc)
logging.debug("######################## %s" % csl)
clusters = csl.get("data").get("clusters")
logging.debug("######################## %s" % clusters)
for c in clusters:
c_id = c.get("id")
if c_id != clusterid:
return c.get("name")
@classmethod
def get_local_cluster(cls, apiclient, zoneid):
storpool_clusterid = subprocess.check_output(['storpool_confshow', 'CLUSTER_ID'])
clusterid = storpool_clusterid.split("=")
logging.debug(storpool_clusterid)
clusters = list_clusters(apiclient, zoneid = zoneid)
for c in clusters:
configuration = list_configurations(
apiclient,
clusterid = c.id
)
for conf in configuration:
if conf.name == 'sp.cluster.id' and (conf.value in clusterid[1]):
return c
@classmethod
def get_remote_cluster(cls, apiclient, zoneid):
storpool_clusterid = subprocess.check_output(['storpool_confshow', 'CLUSTER_ID'])
clusterid = storpool_clusterid.split("=")
logging.debug(storpool_clusterid)
clusters = list_clusters(apiclient, zoneid = zoneid)
for c in clusters:
configuration = list_configurations(
apiclient,
clusterid = c.id
)
for conf in configuration:
if conf.name == 'sp.cluster.id' and (conf.value not in clusterid[1]):
return c
@classmethod
def get_snapshot_template_id(self, apiclient, snapshot, storage_pool_id):
try:
cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd()
cmd.snapshotid = snapshot.id
snapshot_details = apiclient.getVolumeSnapshotDetails(cmd)
logging.debug("Snapshot details %s" % snapshot_details)
logging.debug("Snapshot with uuid %s" % snapshot.id)
for s in snapshot_details:
if s["snapshotDetailsName"] == storage_pool_id:
return s["snapshotDetailsValue"]
except Exception as err:
raise Exception(err)
return None
@classmethod
def getDestinationHost(self, hostsToavoid, hosts):
destinationHost = None
for host in hosts:
if host.id not in hostsToavoid:
destinationHost = host
break
return destinationHost
@classmethod
def getDestinationPool(self,
poolsToavoid,
migrateto,
pools
):
""" Get destination pool which has scope same as migrateto
and which is not in avoid set
"""
destinationPool = None
# Get Storage Pool Id to migrate to
for storagePool in pools:
if storagePool.scope == migrateto:
if storagePool.name not in poolsToavoid:
destinationPool = storagePool
break
return destinationPool
@classmethod
def get_destination_pools_hosts(self, apiclient, vm, hosts):
vol_list = list_volumes(
apiclient,
virtualmachineid=vm.id,
listall=True)
# Get destination host
destinationHost = self.getDestinationHost(vm.hostid, hosts)
return destinationHost, vol_list
@classmethod
def list_hosts_by_cluster_id(cls, apiclient, clusterid):
"""List all Hosts matching criteria"""
cmd = listHosts.listHostsCmd()
cmd.clusterid = clusterid
return(apiclient.listHosts(cmd))
@classmethod
def set_securityGroups(cls, apiclient, account, domainid, id):
cmd = authorizeSecurityGroupIngress.authorizeSecurityGroupIngressCmd()
cmd.protocol = 'TCP'
cmd.startport = 22
cmd.endport = 22
cmd.cidrlist = '0.0.0.0/0'
cmd.securitygroupid = id
cmd.account = account
cmd.domainid = domainid
apiclient.authorizeSecurityGroupIngress(cmd)
cmd.protocol = 'ICMP'
cmd.icmptype = "-1"
cmd.icmpcode = "-1"
# Authorize to only account not CIDR
cmd.securitygroupid = id
cmd.account = account
cmd.domainid = domainid
apiclient.authorizeSecurityGroupIngress(cmd)
@classmethod
def migrateVm(self, apiclient, vm, destinationHost):
"""
This method is to migrate a VM using migrate virtual machine API
"""
vm.migrate(
apiclient,
hostid=destinationHost.id,
)
vm.getState(
apiclient,
"Running"
)
# check for the VM's host and volume's storage post migration
migrated_vm_response = list_virtual_machines(apiclient, id=vm.id)
assert isinstance(migrated_vm_response, list), "Check list virtual machines response for valid list"
assert migrated_vm_response[0].hostid == destinationHost.id, "VM did not migrate to a specified host"
return migrated_vm_response[0]
@classmethod
def migrateVmWithVolumes(self, apiclient, vm, destinationHost, volumes, pool):
"""
This method is used to migrate a vm and its volumes using migrate virtual machine with volume API
INPUTS:
1. vm -> virtual machine object
2. destinationHost -> the host to which VM will be migrated
3. volumes -> list of volumes which are to be migrated
4. pools -> list of destination pools
"""
vol_pool_map = {vol.id: pool.id for vol in volumes}
cmd = migrateVirtualMachineWithVolume.migrateVirtualMachineWithVolumeCmd()
cmd.hostid = destinationHost.id
cmd.migrateto = []
cmd.virtualmachineid = self.virtual_machine.id
for volume, pool1 in vol_pool_map.items():
cmd.migrateto.append({
'volume': volume,
'pool': pool1
})
apiclient.migrateVirtualMachineWithVolume(cmd)
vm.getState(
apiclient,
"Running"
)
# check for the VM's host and volume's storage post migration
migrated_vm_response = list_virtual_machines(apiclient, id=vm.id)
assert isinstance(migrated_vm_response, list), "Check list virtual machines response for valid list"
assert migrated_vm_response[0].hostid == destinationHost.id, "VM did not migrate to a specified host"
for vol in volumes:
migrated_volume_response = list_volumes(
apiclient,
virtualmachineid=migrated_vm_response[0].id,
name=vol.name,
listall=True)
assert isinstance(migrated_volume_response, list), "Check list virtual machines response for valid list"
assert migrated_volume_response[0].storageid == pool.id, "Volume did not migrate to a specified pool"
assert str(migrated_volume_response[0].state).lower().eq('ready'), "Check migrated volume is in Ready state"
return migrated_vm_response[0]
@classmethod
def create_sp_template_and_storage_pool(self, apiclient, template_name, primary_storage, zoneid):
spapiRemote = spapi.Api.fromConfig()
logging.debug("================ %s" % spapiRemote)
sp_api = spapi.Api.fromConfig(multiCluster= True)
logging.debug("================ %s" % sp_api)
remote_cluster = self.get_remote_storpool_cluster()
logging.debug("================ %s" % remote_cluster)
newTemplate = sptypes.VolumeTemplateCreateDesc(name = template_name, placeAll = "ssd", placeTail = "ssd", placeHead = "ssd", replication=1)
template_on_remote = spapiRemote.volumeTemplateCreate(newTemplate, clusterName = remote_cluster)
template_on_local = spapiRemote.volumeTemplateCreate(newTemplate)
storage_pool = StoragePool.create(apiclient, primary_storage, zoneid = zoneid,)
return storage_pool, spapiRemote, sp_api
@classmethod
def destroy_vm(self, apiclient, virtualmachineid):
cmd = destroyVirtualMachine.destroyVirtualMachineCmd()
cmd.id = virtualmachineid
cmd.expunge = True
apiclient.destroyVirtualMachine(cmd)
@classmethod
def check_storpool_volume_size(cls, volume, spapi):
name = volume.path.split("/")[3]
try:
spvolume = spapi.volumeList(volumeName = "~" + name)
if spvolume[0].size != volume.size:
raise Exception("Storpool volume size is not the same as CloudStack db size")
except spapi.ApiError as err:
raise Exception(err)
@classmethod
def check_storpool_volume_iops(cls, spapi, volume,):
name = volume.path.split("/")[3]
try:
spvolume = spapi.volumeList(volumeName = "~" + name)
logging.debug(spvolume[0].iops)
logging.debug(volume.maxiops)
if spvolume[0].iops != volume.maxiops:
raise Exception("Storpool volume size is not the same as CloudStack db size")
except spapi.ApiError as err:
raise Exception(err)
@classmethod
def create_custom_disk(cls, apiclient, services, size = None, miniops = None, maxiops =None, diskofferingid=None, zoneid=None, account=None, domainid=None, snapshotid=None):
"""Create Volume from Custom disk offering"""
cmd = createVolume.createVolumeCmd()
cmd.name = services["diskname"]
if diskofferingid:
cmd.diskofferingid = diskofferingid
if size:
cmd.size = size
if miniops:
cmd.miniops = miniops
if maxiops:
cmd.maxiops = maxiops
if account:
cmd.account = account
if domainid:
cmd.domainid = domainid
if snapshotid:
cmd.snapshotid = snapshotid
cmd.zoneid = zoneid
return Volume(apiclient.createVolume(cmd).__dict__)
@classmethod
def create_vm_custom(cls, apiclient, services, templateid=None, zoneid=None,
serviceofferingid=None, method='GET', hypervisor=None,
cpuNumber=None, cpuSpeed=None, memory=None, minIops=None,
maxIops=None, hostid=None, rootdisksize=None, account=None, domainid=None
):
"""Create the instance"""
cmd = deployVirtualMachine.deployVirtualMachineCmd()
if serviceofferingid:
cmd.serviceofferingid = serviceofferingid
elif "serviceoffering" in services:
cmd.serviceofferingid = services["serviceoffering"]
if zoneid:
cmd.zoneid = zoneid
elif "zoneid" in services:
cmd.zoneid = services["zoneid"]
if hypervisor:
cmd.hypervisor = hypervisor
if hostid:
cmd.hostid = hostid
if "displayname" in services:
cmd.displayname = services["displayname"]
if "name" in services:
cmd.name = services["name"]
if templateid:
cmd.templateid = templateid
elif "template" in services:
cmd.templateid = services["template"]
cmd.details = [{}]
if cpuNumber:
cmd.details[0]["cpuNumber"] = cpuNumber
if cpuSpeed:
cmd.details[0]["cpuSpeed"] = cpuSpeed
if memory:
cmd.details[0]["memory"] = memory
if minIops:
cmd.details[0]["minIops"] = minIops
if maxIops:
cmd.details[0]["maxIops"] = maxIops
if rootdisksize >= 0:
cmd.details[0]["rootdisksize"] = rootdisksize
if account:
cmd.account = account
if domainid:
cmd.domainid = domainid
virtual_machine = apiclient.deployVirtualMachine(cmd, method=method)
return VirtualMachine(virtual_machine.__dict__, services)
@classmethod
def resize_volume(cls, apiclient, volume, shrinkOk=None, disk_offering =None, size=None, maxiops=None, miniops=None):
cmd = resizeVolume.resizeVolumeCmd()
cmd.id = volume.id
if disk_offering:
cmd.diskofferingid = disk_offering.id
if size:
cmd.size = size
if maxiops:
cmd.maxiops = maxiops
if miniops:
cmd.miniops
cmd.shrinkok = shrinkOk
apiclient.resizeVolume(cmd)
new_size = Volume.list(
apiclient,
id=volume.id
)
volume_size = new_size[0].size
return new_size[0]
@classmethod
def create_account(cls, apiclient, services, accounttype=None, domainid=None, roleid=None):
"""Creates an account"""
cmd = createAccount.createAccountCmd()
# 0 - User, 1 - Root Admin, 2 - Domain Admin
if accounttype:
cmd.accounttype = accounttype
else:
cmd.accounttype = 1
cmd.email = services["email"]
cmd.firstname = services["firstname"]
cmd.lastname = services["lastname"]
cmd.password = services["password"]
username = services["username"]
# Limit account username to 99 chars to avoid failure
# 6 chars start string + 85 chars apiclientid + 6 chars random string + 2 chars joining hyphen string = 99
username = username[:6]
apiclientid = apiclient.id[-85:] if len(apiclient.id) > 85 else apiclient.id
cmd.username = "-".join([username,
random_gen(id=apiclientid, size=6)])
if "accountUUID" in services:
cmd.accountid = "-".join([services["accountUUID"], random_gen()])
if "userUUID" in services:
cmd.userid = "-".join([services["userUUID"], random_gen()])
if domainid:
cmd.domainid = domainid
if roleid:
cmd.roleid = roleid
account = apiclient.createAccount(cmd)
return Account(account.__dict__)
def start(cls, apiclient, vmid, hostid):
"""Start the instance"""
cmd = startVirtualMachine.startVirtualMachineCmd()
cmd.id = vmid
cmd.hostid = hostid
return (apiclient.startVirtualMachine(cmd))