New feature: Import/Unamange DATA volume from storage pool (#8808)

This commit is contained in:
Wei Zhou 2024-04-23 16:05:59 +02:00 committed by GitHub
parent 80adf5ead1
commit 0b857def68
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
32 changed files with 4439 additions and 9 deletions

View File

@ -316,6 +316,8 @@ public class EventTypes {
public static final String EVENT_VOLUME_UPDATE = "VOLUME.UPDATE"; public static final String EVENT_VOLUME_UPDATE = "VOLUME.UPDATE";
public static final String EVENT_VOLUME_DESTROY = "VOLUME.DESTROY"; public static final String EVENT_VOLUME_DESTROY = "VOLUME.DESTROY";
public static final String EVENT_VOLUME_RECOVER = "VOLUME.RECOVER"; public static final String EVENT_VOLUME_RECOVER = "VOLUME.RECOVER";
public static final String EVENT_VOLUME_IMPORT = "VOLUME.IMPORT";
public static final String EVENT_VOLUME_UNMANAGE = "VOLUME.UNMANAGE";
public static final String EVENT_VOLUME_CHANGE_DISK_OFFERING = "VOLUME.CHANGE.DISK.OFFERING"; public static final String EVENT_VOLUME_CHANGE_DISK_OFFERING = "VOLUME.CHANGE.DISK.OFFERING";
// Domains // Domains

View File

@ -117,6 +117,7 @@ public class ApiConstants {
public static final String CURRENT_START_IP = "currentstartip"; public static final String CURRENT_START_IP = "currentstartip";
public static final String CURRENT_END_IP = "currentendip"; public static final String CURRENT_END_IP = "currentendip";
public static final String ENCRYPT = "encrypt"; public static final String ENCRYPT = "encrypt";
public static final String ENCRYPT_FORMAT = "encryptformat";
public static final String ENCRYPT_ROOT = "encryptroot"; public static final String ENCRYPT_ROOT = "encryptroot";
public static final String ENCRYPTION_SUPPORTED = "encryptionsupported"; public static final String ENCRYPTION_SUPPORTED = "encryptionsupported";
public static final String MIN_IOPS = "miniops"; public static final String MIN_IOPS = "miniops";
@ -191,6 +192,7 @@ public class ApiConstants {
public static final String FORMAT = "format"; public static final String FORMAT = "format";
public static final String FOR_VIRTUAL_NETWORK = "forvirtualnetwork"; public static final String FOR_VIRTUAL_NETWORK = "forvirtualnetwork";
public static final String FOR_SYSTEM_VMS = "forsystemvms"; public static final String FOR_SYSTEM_VMS = "forsystemvms";
public static final String FULL_PATH = "fullpath";
public static final String GATEWAY = "gateway"; public static final String GATEWAY = "gateway";
public static final String IP6_GATEWAY = "ip6gateway"; public static final String IP6_GATEWAY = "ip6gateway";
public static final String GROUP = "group"; public static final String GROUP = "group";
@ -550,6 +552,7 @@ public class ApiConstants {
public static final String ALLOCATION_STATE = "allocationstate"; public static final String ALLOCATION_STATE = "allocationstate";
public static final String MANAGED_STATE = "managedstate"; public static final String MANAGED_STATE = "managedstate";
public static final String MANAGEMENT_SERVER_ID = "managementserverid"; public static final String MANAGEMENT_SERVER_ID = "managementserverid";
public static final String STORAGE = "storage";
public static final String STORAGE_ID = "storageid"; public static final String STORAGE_ID = "storageid";
public static final String PING_STORAGE_SERVER_IP = "pingstorageserverip"; public static final String PING_STORAGE_SERVER_IP = "pingstorageserverip";
public static final String PING_DIR = "pingdir"; public static final String PING_DIR = "pingdir";

View File

@ -0,0 +1,165 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.volume;
import com.cloud.event.EventTypes;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.NetworkRuleConflictException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.ResourceUnavailableException;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseAsyncCmd;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ResponseObject;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.DiskOfferingResponse;
import org.apache.cloudstack.api.response.DomainResponse;
import org.apache.cloudstack.api.response.ProjectResponse;
import org.apache.cloudstack.api.response.StoragePoolResponse;
import org.apache.cloudstack.api.response.VolumeResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.storage.volume.VolumeImportUnmanageService;
import javax.inject.Inject;
@APICommand(name = "importVolume",
description = "Import an unmanaged volume from a storage pool on a host into CloudStack",
responseObject = VolumeResponse.class,
responseView = ResponseObject.ResponseView.Full,
requestHasSensitiveInfo = false,
responseHasSensitiveInfo = true,
authorized = {RoleType.Admin},
since = "4.19.1")
public class ImportVolumeCmd extends BaseAsyncCmd {
@Inject
public VolumeImportUnmanageService volumeImportService;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.PATH,
type = BaseCmd.CommandType.STRING,
required = true,
description = "the path of the volume")
private String path;
@Parameter(name = ApiConstants.NAME,
type = BaseCmd.CommandType.STRING,
description = "the name of the volume. If not set, it will be set to the path of the volume.")
private String name;
@Parameter(name = ApiConstants.STORAGE_ID,
type = BaseCmd.CommandType.UUID,
required = true,
entityType = StoragePoolResponse.class,
description = "the ID of the storage pool")
private Long storageId;
@Parameter(name = ApiConstants.DISK_OFFERING_ID,
type = BaseCmd.CommandType.UUID,
entityType = DiskOfferingResponse.class,
description = "the ID of the disk offering linked to the volume")
private Long diskOfferingId;
@Parameter(name = ApiConstants.ACCOUNT,
type = BaseCmd.CommandType.STRING,
description = "an optional account for the volume. Must be used with domainId.")
private String accountName;
@Parameter(name = ApiConstants.DOMAIN_ID,
type = BaseCmd.CommandType.UUID,
entityType = DomainResponse.class,
description = "import volume to the domain specified")
private Long domainId;
@Parameter(name = ApiConstants.PROJECT_ID,
type = BaseCmd.CommandType.UUID,
entityType = ProjectResponse.class,
description = "import volume for the project")
private Long projectId;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getPath() {
return path;
}
public String getName() {
return name;
}
public Long getStorageId() {
return storageId;
}
public Long getDiskOfferingId() {
return diskOfferingId;
}
public String getAccountName() {
return accountName;
}
public Long getDomainId() {
return domainId;
}
public Long getProjectId() {
return projectId;
}
@Override
public String getEventType() {
return EventTypes.EVENT_VOLUME_IMPORT;
}
@Override
public String getEventDescription() {
return String.format("Importing unmanaged Volume with path: %s", path);
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException {
VolumeResponse response = volumeImportService.importVolume(this);
response.setResponseName(getCommandName());
setResponseObject(response);
}
@Override
public long getEntityOwnerId() {
Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true);
if (accountId == null) {
return CallContext.current().getCallingAccount().getId();
}
return accountId;
}
}

View File

@ -0,0 +1,93 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.volume;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.NetworkRuleConflictException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.ResourceUnavailableException;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.BaseListCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ResponseObject;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.api.response.StoragePoolResponse;
import org.apache.cloudstack.api.response.VolumeForImportResponse;
import org.apache.cloudstack.storage.volume.VolumeImportUnmanageService;
import org.apache.cloudstack.storage.volume.VolumeOnStorageTO;
import javax.inject.Inject;
@APICommand(name = "listVolumesForImport",
description = "Lists unmanaged volumes on a storage pool",
responseObject = VolumeForImportResponse.class,
responseView = ResponseObject.ResponseView.Full,
entityType = {VolumeOnStorageTO.class},
requestHasSensitiveInfo = false,
responseHasSensitiveInfo = true,
authorized = {RoleType.Admin},
since = "4.19.1")
public class ListVolumesForImportCmd extends BaseListCmd {
@Inject
public VolumeImportUnmanageService volumeImportService;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.STORAGE_ID,
type = BaseCmd.CommandType.UUID,
required = true,
entityType = StoragePoolResponse.class,
description = "the ID of the storage pool")
private Long storageId;
@Parameter(name = ApiConstants.PATH,
type = BaseCmd.CommandType.STRING,
description = "the path of the volume on the storage pool")
private String path;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getStorageId() {
return storageId;
}
public String getPath() {
return path;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException {
ListResponse<VolumeForImportResponse> response = volumeImportService.listVolumesForImport(this);
response.setResponseName(getCommandName());
setResponseObject(response);
}
}

View File

@ -0,0 +1,127 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package org.apache.cloudstack.api.command.admin.volume;
import com.cloud.event.EventTypes;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.NetworkRuleConflictException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.storage.Volume;
import com.cloud.user.Account;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiCommandResourceType;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.BaseAsyncCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.SuccessResponse;
import org.apache.cloudstack.api.response.VolumeResponse;
import org.apache.cloudstack.storage.volume.VolumeImportUnmanageService;
import javax.inject.Inject;
@APICommand(name = "unmanageVolume",
description = "Unmanage a volume on storage pool.",
entityType = {Volume.class},
responseObject = SuccessResponse.class,
requestHasSensitiveInfo = false,
authorized = {RoleType.Admin},
since = "4.19.1")
public class UnmanageVolumeCmd extends BaseAsyncCmd {
@Inject
public VolumeImportUnmanageService volumeImportService;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.ID,
type = CommandType.UUID,
entityType = VolumeResponse.class,
required = true,
description = "The ID of the volume to unmanage")
private Long volumeId;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getVolumeId() {
return volumeId;
}
@Override
public String getEventType() {
return EventTypes.EVENT_VOLUME_UNMANAGE;
}
@Override
public String getEventDescription() {
return String.format("Unmanaging Volume with ID %s", volumeId);
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException,
ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException {
try {
boolean result = volumeImportService.unmanageVolume(volumeId);
if (result) {
SuccessResponse response = new SuccessResponse(getCommandName());
setResponseObject(response);
} else {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to unmanage the volume");
}
} catch (Exception e) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getLocalizedMessage());
}
}
@Override
public long getEntityOwnerId() {
Volume volume = _responseGenerator.findVolumeById(volumeId);
if (volume != null) {
return volume.getAccountId();
}
return Account.ACCOUNT_ID_SYSTEM;
}
@Override
public ApiCommandResourceType getApiResourceType() {
return ApiCommandResourceType.Volume;
}
@Override
public Long getApiResourceId() {
return volumeId;
}
}

View File

@ -0,0 +1,176 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.response;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseResponse;
import org.apache.cloudstack.api.EntityReference;
import org.apache.cloudstack.storage.volume.VolumeOnStorageTO;
import java.util.Map;
@EntityReference(value = VolumeOnStorageTO.class)
public class VolumeForImportResponse extends BaseResponse {
@SerializedName(ApiConstants.NAME)
@Param(description = "the name of the volume")
private String name;
@SerializedName(ApiConstants.PATH)
@Param(description = "the path of the volume")
private String path;
@SerializedName(ApiConstants.FULL_PATH)
@Param(description = "the full path of the volume")
private String fullPath;
@SerializedName(ApiConstants.FORMAT)
@Param(description = "the format of the volume")
private String format;
@SerializedName(ApiConstants.SIZE)
@Param(description = "the size of the volume")
private long size;
@SerializedName(ApiConstants.VIRTUAL_SIZE)
@Param(description = "the virtual size of the volume")
private long virtualSize;
@SerializedName(ApiConstants.ENCRYPT_FORMAT)
@Param(description = "the encrypt format of the volume")
private String qemuEncryptFormat;
@SerializedName(ApiConstants.STORAGE_ID)
@Param(description = "id of the primary storage hosting the volume")
private String storagePoolId;
@SerializedName(ApiConstants.STORAGE)
@Param(description = "name of the primary storage hosting the volume")
private String storagePoolName;
@SerializedName(ApiConstants.STORAGE_TYPE)
@Param(description = "type of the primary storage hosting the volume")
private String storagePoolType;
@SerializedName(ApiConstants.DETAILS)
@Param(description = "volume details in key/value pairs.")
private Map details;
@SerializedName(ApiConstants.CHAIN_INFO)
@Param(description = "the chain info of the volume")
String chainInfo;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getPath() {
return path;
}
public void setPath(String path) {
this.path = path;
}
public String getFullPath() {
return fullPath;
}
public void setFullPath(String fullPath) {
this.fullPath = fullPath;
}
public String getFormat() {
return format;
}
public void setFormat(String format) {
this.format = format;
}
public long getSize() {
return size;
}
public void setSize(long size) {
this.size = size;
}
public long getVirtualSize() {
return virtualSize;
}
public void setVirtualSize(long virtualSize) {
this.virtualSize = virtualSize;
}
public String getQemuEncryptFormat() {
return qemuEncryptFormat;
}
public void setQemuEncryptFormat(String qemuEncryptFormat) {
this.qemuEncryptFormat = qemuEncryptFormat;
}
public String getStoragePoolId() {
return storagePoolId;
}
public void setStoragePoolId(String storagePoolId) {
this.storagePoolId = storagePoolId;
}
public String getStoragePoolName() {
return storagePoolName;
}
public void setStoragePoolName(String storagePoolName) {
this.storagePoolName = storagePoolName;
}
public String getStoragePoolType() {
return storagePoolType;
}
public void setStoragePoolType(String storagePoolType) {
this.storagePoolType = storagePoolType;
}
public Map getDetails() {
return details;
}
public void setDetails(Map details) {
this.details = details;
}
public String getChainInfo() {
return chainInfo;
}
public void setChainInfo(String chainInfo) {
this.chainInfo = chainInfo;
}
}

View File

@ -0,0 +1,46 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.volume;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.Storage;
import com.cloud.utils.component.PluggableService;
import org.apache.cloudstack.api.command.admin.volume.ListVolumesForImportCmd;
import org.apache.cloudstack.api.command.admin.volume.ImportVolumeCmd;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.api.response.VolumeForImportResponse;
import org.apache.cloudstack.api.response.VolumeResponse;
import java.util.Arrays;
import java.util.List;
public interface VolumeImportUnmanageService extends PluggableService {
List<Hypervisor.HypervisorType> SUPPORTED_HYPERVISORS =
Arrays.asList(Hypervisor.HypervisorType.KVM, Hypervisor.HypervisorType.VMware);
List<Storage.StoragePoolType> SUPPORTED_STORAGE_POOL_TYPES_FOR_KVM = Arrays.asList(Storage.StoragePoolType.NetworkFilesystem,
Storage.StoragePoolType.Filesystem, Storage.StoragePoolType.RBD);
ListResponse<VolumeForImportResponse> listVolumesForImport(ListVolumesForImportCmd cmd);
VolumeResponse importVolume(ImportVolumeCmd cmd);
boolean unmanageVolume(long volumeId);
}

View File

@ -0,0 +1,130 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.volume;
import com.cloud.hypervisor.Hypervisor;
import java.util.HashMap;
import java.util.Map;
public class VolumeOnStorageTO {
Hypervisor.HypervisorType hypervisorType;
private String path;
private String fullPath;
private String name;
private String format;
private long size;
private long virtualSize;
private String qemuEncryptFormat;
private Map<Detail, String> details = new HashMap<>();
public enum Detail {
BACKING_FILE, BACKING_FILE_FORMAT, CLUSTER_SIZE, FILE_FORMAT, IS_LOCKED, IS_ENCRYPTED
}
public VolumeOnStorageTO() {
}
public VolumeOnStorageTO(Hypervisor.HypervisorType hypervisorType, String path, String name, String fullPath, String format, long size, long virtualSize) {
this.hypervisorType = hypervisorType;
this.path = path;
this.name = name;
this.fullPath = fullPath;
this.format = format;
this.size = size;
this.virtualSize = virtualSize;
}
public Hypervisor.HypervisorType getHypervisorType() {
return hypervisorType;
}
public void setHypervisorType(Hypervisor.HypervisorType hypervisorType) {
this.hypervisorType = hypervisorType;
}
public String getPath() {
return path;
}
public void setPath(String path) {
this.path = path;
}
public String getFullPath() {
return fullPath;
}
public void setFullPath(String fullPath) {
this.fullPath = fullPath;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getFormat() {
return format;
}
public void setFormat(String format) {
this.format = format;
}
public long getSize() {
return size;
}
public void setSize(long size) {
this.size = size;
}
public long getVirtualSize() {
return virtualSize;
}
public void setVirtualSize(long virtualSize) {
this.virtualSize = virtualSize;
}
public String getQemuEncryptFormat() {
return qemuEncryptFormat;
}
public void setQemuEncryptFormat(String qemuEncryptFormat) {
this.qemuEncryptFormat = qemuEncryptFormat;
}
public Map<Detail, String> getDetails() {
return details;
}
public void setDetails(Map<Detail, String> details) {
this.details = details;
}
public void addDetail(Detail detail, String value) {
details.put(detail, value);
}
}

View File

@ -0,0 +1,83 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.volume;
import com.cloud.event.EventTypes;
import com.cloud.user.AccountService;
import org.apache.cloudstack.api.response.VolumeResponse;
import org.apache.cloudstack.storage.volume.VolumeImportUnmanageService;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import org.springframework.test.util.ReflectionTestUtils;
@RunWith(MockitoJUnitRunner.class)
public class ImportVolumeCmdTest {
VolumeImportUnmanageService volumeImportService = Mockito.spy(VolumeImportUnmanageService.class);
AccountService accountService = Mockito.spy(AccountService.class);
@Test
public void testImportVolumeCmd() {
String name = "volume name";
String path = "file path";
Long storageId = 2L;
Long diskOfferingId = 3L;
String accountName = "account";
Long domainId = 4L;
Long projectId = 5L;
long accountId = 6L;
Mockito.when(accountService.finalyzeAccountId(accountName, domainId, projectId, true)).thenReturn(accountId);
ImportVolumeCmd cmd = new ImportVolumeCmd();
ReflectionTestUtils.setField(cmd, "path", path);
ReflectionTestUtils.setField(cmd, "name", name);
ReflectionTestUtils.setField(cmd, "storageId", storageId);
ReflectionTestUtils.setField(cmd, "diskOfferingId", diskOfferingId);
ReflectionTestUtils.setField(cmd, "accountName", accountName);
ReflectionTestUtils.setField(cmd, "domainId", domainId);
ReflectionTestUtils.setField(cmd, "projectId", projectId);
ReflectionTestUtils.setField(cmd,"volumeImportService", volumeImportService);
ReflectionTestUtils.setField(cmd, "_accountService", accountService);
Assert.assertEquals(path, cmd.getPath());
Assert.assertEquals(name, cmd.getName());
Assert.assertEquals(storageId, cmd.getStorageId());
Assert.assertEquals(diskOfferingId, cmd.getDiskOfferingId());
Assert.assertEquals(accountName, cmd.getAccountName());
Assert.assertEquals(domainId, cmd.getDomainId());
Assert.assertEquals(projectId, cmd.getProjectId());
Assert.assertEquals(EventTypes.EVENT_VOLUME_IMPORT, cmd.getEventType());
Assert.assertEquals("Importing unmanaged Volume with path: " + path, cmd.getEventDescription());
Assert.assertEquals(accountId, cmd.getEntityOwnerId());
VolumeResponse response = Mockito.mock(VolumeResponse.class);
Mockito.when(volumeImportService.importVolume(cmd)).thenReturn(response);
try {
cmd.execute();
} catch (Exception ignored) {
}
Assert.assertEquals(response, cmd.getResponseObject());
}
}

View File

@ -0,0 +1,58 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.volume;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.api.response.VolumeForImportResponse;
import org.apache.cloudstack.storage.volume.VolumeImportUnmanageService;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import org.springframework.test.util.ReflectionTestUtils;
@RunWith(MockitoJUnitRunner.class)
public class ListVolumesForImportCmdTest {
VolumeImportUnmanageService volumeImportService = Mockito.spy(VolumeImportUnmanageService.class);
@Test
public void testListVolumesForImportCmd() {
Long storageId = 2L;
String filePath = "file path";
ListVolumesForImportCmd cmd = new ListVolumesForImportCmd();
ReflectionTestUtils.setField(cmd, "storageId", storageId);
ReflectionTestUtils.setField(cmd, "path", filePath);
ReflectionTestUtils.setField(cmd,"volumeImportService", volumeImportService);
Assert.assertEquals(storageId, cmd.getStorageId());
Assert.assertEquals(filePath, cmd.getPath());
ListResponse<VolumeForImportResponse> response = Mockito.mock(ListResponse.class);
Mockito.when(volumeImportService.listVolumesForImport(cmd)).thenReturn(response);
try {
cmd.execute();
} catch (Exception ignored) {
}
Assert.assertEquals(response, cmd.getResponseObject());
}
}

View File

@ -0,0 +1,70 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.volume;
import com.cloud.event.EventTypes;
import com.cloud.storage.Volume;
import org.apache.cloudstack.api.ApiCommandResourceType;
import org.apache.cloudstack.api.ResponseGenerator;
import org.apache.cloudstack.api.response.SuccessResponse;
import org.apache.cloudstack.storage.volume.VolumeImportUnmanageService;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import org.springframework.test.util.ReflectionTestUtils;
@RunWith(MockitoJUnitRunner.class)
public class UnmanageVolumeCmdTest {
VolumeImportUnmanageService volumeImportService = Mockito.spy(VolumeImportUnmanageService.class);
ResponseGenerator responseGenerator = Mockito.spy(ResponseGenerator.class);
@Test
public void testUnmanageVolumeCmd() {
long accountId = 2L;
Long volumeId = 3L;
Volume volume = Mockito.mock(Volume.class);
Mockito.when(responseGenerator.findVolumeById(volumeId)).thenReturn(volume);
Mockito.when(volume.getAccountId()).thenReturn(accountId);
UnmanageVolumeCmd cmd = new UnmanageVolumeCmd();
ReflectionTestUtils.setField(cmd, "volumeId", volumeId);
ReflectionTestUtils.setField(cmd,"volumeImportService", volumeImportService);
ReflectionTestUtils.setField(cmd,"_responseGenerator", responseGenerator);
Assert.assertEquals(volumeId, cmd.getVolumeId());
Assert.assertEquals(accountId, cmd.getEntityOwnerId());
Assert.assertEquals(volumeId, cmd.getApiResourceId());
Assert.assertEquals(ApiCommandResourceType.Volume, cmd.getApiResourceType());
Assert.assertEquals(EventTypes.EVENT_VOLUME_UNMANAGE, cmd.getEventType());
Assert.assertEquals("Unmanaging Volume with ID " + volumeId, cmd.getEventDescription());
Mockito.when(volumeImportService.unmanageVolume(volumeId)).thenReturn(true);
try {
cmd.execute();
} catch (Exception ignored) {
}
Object response = cmd.getResponseObject();
Assert.assertTrue(response instanceof SuccessResponse);
}
}

View File

@ -0,0 +1,77 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.response;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.Storage;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.junit.MockitoJUnitRunner;
import java.util.HashMap;
import java.util.Map;
@RunWith(MockitoJUnitRunner.class)
public final class VolumeForImportResponseTest {
private static String path = "path";
private static String name = "name";
private static String fullPath = "fullPath";
private static String format = "qcow2";
private static long size = 10;
private static long virtualSize = 20;
private static String encryptFormat = "LUKS";
private static Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.KVM;
private static String storagePoolId = "storage pool uuid";
private static String storagePoolName = "storage pool 1";
private static String storagePoolType = Storage.StoragePoolType.NetworkFilesystem.name();
private static String chainInfo = "chain info";
@Test
public void testVolumeForImportResponse() {
final VolumeForImportResponse response = new VolumeForImportResponse();
response.setPath(path);
response.setName(name);
response.setFullPath(fullPath);
response.setFormat(format);
response.setSize(size);
response.setVirtualSize(virtualSize);
response.setQemuEncryptFormat(encryptFormat);
response.setStoragePoolType(storagePoolType);
response.setStoragePoolName(storagePoolName);
response.setStoragePoolId(storagePoolId);
response.setChainInfo(chainInfo);
Map<String, String> details = new HashMap<>();
details.put("key", "value");
response.setDetails(details);
Assert.assertEquals(path, response.getPath());
Assert.assertEquals(name, response.getName());
Assert.assertEquals(fullPath, response.getFullPath());
Assert.assertEquals(format, response.getFormat());
Assert.assertEquals(size, response.getSize());
Assert.assertEquals(virtualSize, response.getVirtualSize());
Assert.assertEquals(encryptFormat, response.getQemuEncryptFormat());
Assert.assertEquals(storagePoolType, response.getStoragePoolType());
Assert.assertEquals(storagePoolName, response.getStoragePoolName());
Assert.assertEquals(storagePoolId, response.getStoragePoolId());
Assert.assertEquals(chainInfo, response.getChainInfo());
Assert.assertEquals(details, response.getDetails());
}
}

View File

@ -0,0 +1,84 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.volume;
import com.cloud.hypervisor.Hypervisor;
import org.junit.Assert;
import org.junit.Test;
import java.util.HashMap;
import java.util.Map;
public class VolumeOnStorageTOTest {
private static String path = "path";
private static String name = "name";
private static String fullPath = "fullPath";
private static String format = "qcow2";
private static long size = 10;
private static long virtualSize = 20;
private static String encryptFormat = "LUKS";
private static Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.KVM;
private static String BACKING_FILE = "backing file";
private static String BACKING_FILE_FORMAT = "qcow2";
@Test
public void testVolumeOnStorageTO() {
VolumeOnStorageTO volumeOnStorageTO = new VolumeOnStorageTO(hypervisorType, path, name, fullPath,
format, size, virtualSize);
Assert.assertEquals(hypervisorType, volumeOnStorageTO.getHypervisorType());
Assert.assertEquals(path, volumeOnStorageTO.getPath());
Assert.assertEquals(name, volumeOnStorageTO.getName());
Assert.assertEquals(fullPath, volumeOnStorageTO.getFullPath());
Assert.assertEquals(format, volumeOnStorageTO.getFormat());
Assert.assertEquals(size, volumeOnStorageTO.getSize());
Assert.assertEquals(virtualSize, volumeOnStorageTO.getVirtualSize());
}
@Test
public void testVolumeOnStorageTO3() {
VolumeOnStorageTO volumeOnStorageTO = new VolumeOnStorageTO();
volumeOnStorageTO.setHypervisorType(hypervisorType);
volumeOnStorageTO.setPath(path);
volumeOnStorageTO.setFullPath(fullPath);
volumeOnStorageTO.setName(name);
volumeOnStorageTO.setFormat(format);
volumeOnStorageTO.setSize(size);
volumeOnStorageTO.setVirtualSize(virtualSize);
volumeOnStorageTO.setQemuEncryptFormat(encryptFormat);
Map<VolumeOnStorageTO.Detail, String> details = new HashMap<>();
details.put(VolumeOnStorageTO.Detail.BACKING_FILE, BACKING_FILE);
volumeOnStorageTO.setDetails(details);
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.BACKING_FILE_FORMAT, BACKING_FILE_FORMAT);
Assert.assertEquals(hypervisorType, volumeOnStorageTO.getHypervisorType());
Assert.assertEquals(path, volumeOnStorageTO.getPath());
Assert.assertEquals(name, volumeOnStorageTO.getName());
Assert.assertEquals(fullPath, volumeOnStorageTO.getFullPath());
Assert.assertEquals(format, volumeOnStorageTO.getFormat());
Assert.assertEquals(size, volumeOnStorageTO.getSize());
Assert.assertEquals(virtualSize, volumeOnStorageTO.getVirtualSize());
Assert.assertEquals(encryptFormat, volumeOnStorageTO.getQemuEncryptFormat());
details = volumeOnStorageTO.getDetails();
Assert.assertEquals(2, details.size());
Assert.assertEquals(BACKING_FILE, details.get(VolumeOnStorageTO.Detail.BACKING_FILE));
Assert.assertEquals(BACKING_FILE_FORMAT, details.get(VolumeOnStorageTO.Detail.BACKING_FILE_FORMAT));
}
}

View File

@ -0,0 +1,42 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.agent.api;
import org.apache.cloudstack.storage.volume.VolumeOnStorageTO;
import java.util.List;
public class GetVolumesOnStorageAnswer extends Answer {
private List<VolumeOnStorageTO> volumes;
GetVolumesOnStorageAnswer() {
}
public GetVolumesOnStorageAnswer(GetVolumesOnStorageCommand cmd, List<VolumeOnStorageTO> volumes) {
super(cmd, true, null);
this.volumes = volumes;
}
public GetVolumesOnStorageAnswer(final GetVolumesOnStorageCommand cmd, final boolean success, final String details) {
super(cmd, success, details);
}
public List<VolumeOnStorageTO> getVolumes() {
return volumes;
}
}

View File

@ -0,0 +1,55 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.agent.api;
import com.cloud.agent.api.to.StorageFilerTO;
public class GetVolumesOnStorageCommand extends Command {
StorageFilerTO pool;
private String volumePath; //search by file path
private String keyword; //filter by keyword
public GetVolumesOnStorageCommand() {
}
public GetVolumesOnStorageCommand(StorageFilerTO pool, String filePath, String keyword) {
this.pool = pool;
this.volumePath = filePath;
this.keyword = keyword;
}
public StorageFilerTO getPool() {
return pool;
}
public String getVolumePath() {
return volumePath;
}
public String getKeyword() {
return keyword;
}
@Override
public boolean executeInSequence() {
return false;
}
}

View File

@ -0,0 +1,73 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.agent.api;
import com.cloud.hypervisor.Hypervisor;
import org.apache.cloudstack.storage.volume.VolumeOnStorageTO;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import java.util.ArrayList;
import java.util.List;
public class GetVolumesOnStorageAnswerTest {
private static String path = "path";
private static String name = "name";
private static String fullPath = "fullPath";
private static String format = "qcow2";
private static long size = 10;
private static long virtualSize = 20;
private static String encryptFormat = "LUKS";
private static GetVolumesOnStorageCommand command = Mockito.mock(GetVolumesOnStorageCommand.class);
@Test
public void testGetVolumesOnStorageAnswer() {
VolumeOnStorageTO volumeOnStorageTO = new VolumeOnStorageTO(Hypervisor.HypervisorType.KVM, path, name, fullPath,
format, size, virtualSize);
volumeOnStorageTO.setQemuEncryptFormat(encryptFormat);
List<VolumeOnStorageTO> volumesOnStorageTO = new ArrayList<>();
volumesOnStorageTO.add(volumeOnStorageTO);
GetVolumesOnStorageAnswer answer = new GetVolumesOnStorageAnswer(command, volumesOnStorageTO);
List<VolumeOnStorageTO> volumes = answer.getVolumes();
Assert.assertEquals(1, volumes.size());
VolumeOnStorageTO volume = volumes.get(0);
Assert.assertEquals(Hypervisor.HypervisorType.KVM, volume.getHypervisorType());
Assert.assertEquals(path, volume.getPath());
Assert.assertEquals(name, volume.getName());
Assert.assertEquals(fullPath, volume.getFullPath());
Assert.assertEquals(format, volume.getFormat());
Assert.assertEquals(size, volume.getSize());
Assert.assertEquals(virtualSize, volume.getVirtualSize());
Assert.assertEquals(encryptFormat, volume.getQemuEncryptFormat());
Assert.assertEquals(path, volume.getPath());
}
@Test
public void testGetVolumesOnStorageAnswer2() {
String details = "details";
GetVolumesOnStorageAnswer answer = new GetVolumesOnStorageAnswer(command, false, details);
Assert.assertFalse(answer.getResult());
Assert.assertEquals(details, answer.getDetails());
}
}

View File

@ -0,0 +1,41 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.agent.api;
import com.cloud.agent.api.to.StorageFilerTO;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
public class GetVolumesOnStorageCommandTest {
final StorageFilerTO pool = Mockito.mock(StorageFilerTO.class);
final String localPath = "localPath";
final String volumePath = "volumePath";
final String keyword = "keyword";
@Test
public void testGetVolumesOnStorageCommand() {
GetVolumesOnStorageCommand command = new GetVolumesOnStorageCommand(pool, volumePath, keyword);
Assert.assertEquals(pool, command.getPool());
Assert.assertEquals(volumePath, command.getVolumePath());
Assert.assertEquals(keyword, command.getKeyword());
Assert.assertFalse(command.executeInSequence());
}
}

View File

@ -167,7 +167,8 @@ public interface VolumeOrchestrationService {
* @param chainInfo chain info for the volume. Hypervisor specific. * @param chainInfo chain info for the volume. Hypervisor specific.
* @return DiskProfile of imported volume * @return DiskProfile of imported volume
*/ */
DiskProfile importVolume(Type type, String name, DiskOffering offering, Long sizeInBytes, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, DiskProfile importVolume(Type type, String name, DiskOffering offering, Long sizeInBytes, Long minIops, Long maxIops,
Long zoneId, HypervisorType hypervisorType, VirtualMachine vm, VirtualMachineTemplate template,
Account owner, Long deviceId, Long poolId, String path, String chainInfo); Account owner, Long deviceId, Long poolId, String path, String chainInfo);
DiskProfile updateImportedVolume(Type type, DiskOffering offering, VirtualMachine vm, VirtualMachineTemplate template, DiskProfile updateImportedVolume(Type type, DiskOffering offering, VirtualMachine vm, VirtualMachineTemplate template,

View File

@ -2191,7 +2191,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
@Override @Override
public DiskProfile importVolume(Type type, String name, DiskOffering offering, Long sizeInBytes, Long minIops, Long maxIops, public DiskProfile importVolume(Type type, String name, DiskOffering offering, Long sizeInBytes, Long minIops, Long maxIops,
VirtualMachine vm, VirtualMachineTemplate template, Account owner, Long zoneId, HypervisorType hypervisorType, VirtualMachine vm, VirtualMachineTemplate template, Account owner,
Long deviceId, Long poolId, String path, String chainInfo) { Long deviceId, Long poolId, String path, String chainInfo) {
if (sizeInBytes == null) { if (sizeInBytes == null) {
sizeInBytes = offering.getDiskSize(); sizeInBytes = offering.getDiskSize();
@ -2200,9 +2200,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
minIops = minIops != null ? minIops : offering.getMinIops(); minIops = minIops != null ? minIops : offering.getMinIops();
maxIops = maxIops != null ? maxIops : offering.getMaxIops(); maxIops = maxIops != null ? maxIops : offering.getMaxIops();
VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(), owner.getDomainId(), owner.getId(), offering.getId(), offering.getProvisioningType(), sizeInBytes, minIops, maxIops, null); VolumeVO vol = new VolumeVO(type, name, zoneId, owner.getDomainId(), owner.getId(), offering.getId(), offering.getProvisioningType(), sizeInBytes, minIops, maxIops, null);
if (vm != null) { if (vm != null) {
vol.setInstanceId(vm.getId()); vol.setInstanceId(vm.getId());
vol.setAttached(new Date());
} }
if (deviceId != null) { if (deviceId != null) {
@ -2225,17 +2226,16 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
} }
// display flag matters only for the User vms // display flag matters only for the User vms
if (VirtualMachine.Type.User.equals(vm.getType())) { if (vm != null && VirtualMachine.Type.User.equals(vm.getType())) {
UserVmVO userVm = _userVmDao.findById(vm.getId()); UserVmVO userVm = _userVmDao.findById(vm.getId());
vol.setDisplayVolume(userVm.isDisplayVm()); vol.setDisplayVolume(userVm.isDisplayVm());
} }
vol.setFormat(getSupportedImageFormatForCluster(vm.getHypervisorType())); vol.setFormat(getSupportedImageFormatForCluster(hypervisorType));
vol.setPoolId(poolId); vol.setPoolId(poolId);
vol.setPath(path); vol.setPath(path);
vol.setChainInfo(chainInfo); vol.setChainInfo(chainInfo);
vol.setState(Volume.State.Ready); vol.setState(Volume.State.Ready);
vol.setAttached(new Date());
vol = _volsDao.persist(vol); vol = _volsDao.persist(vol);
return toDiskProfile(vol, offering); return toDiskProfile(vol, offering);
} }

View File

@ -17,7 +17,14 @@
package org.apache.cloudstack.engine.orchestration; package org.apache.cloudstack.engine.orchestration;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Date;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.offering.DiskOffering;
import com.cloud.storage.Storage;
import com.cloud.storage.Volume;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.Account;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
@ -32,6 +39,7 @@ import org.junit.Test;
import org.junit.runner.RunWith; import org.junit.runner.RunWith;
import org.mockito.InjectMocks; import org.mockito.InjectMocks;
import org.mockito.Mock; import org.mockito.Mock;
import org.mockito.MockedConstruction;
import org.mockito.Mockito; import org.mockito.Mockito;
import org.mockito.Spy; import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner; import org.mockito.junit.MockitoJUnitRunner;
@ -42,6 +50,7 @@ import com.cloud.exception.StorageAccessException;
import com.cloud.host.Host; import com.cloud.host.Host;
import com.cloud.host.HostVO; import com.cloud.host.HostVO;
import com.cloud.storage.VolumeVO; import com.cloud.storage.VolumeVO;
import com.cloud.storage.Volume.Type;
import com.cloud.user.ResourceLimitService; import com.cloud.user.ResourceLimitService;
import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.CloudRuntimeException;
@ -54,6 +63,8 @@ public class VolumeOrchestratorTest {
protected VolumeService volumeService; protected VolumeService volumeService;
@Mock @Mock
protected VolumeDataFactory volumeDataFactory; protected VolumeDataFactory volumeDataFactory;
@Mock
protected VolumeDao volumeDao;
@Spy @Spy
@InjectMocks @InjectMocks
@ -155,4 +166,46 @@ public class VolumeOrchestratorTest {
volumeOrchestrator.grantVolumeAccessToHostIfNeeded(store, 1L, volumeOrchestrator.grantVolumeAccessToHostIfNeeded(store, 1L,
Mockito.mock(HostVO.class), ""); Mockito.mock(HostVO.class), "");
} }
@Test
public void testImportVolume() {
Type volumeType = Type.DATADISK;
String name = "new-volume";
Long sizeInBytes = 1000000L;
Long zoneId = 1L;
Long domainId = 2L;
Long accountId = 3L;
Long diskOfferingId = 4L;
DiskOffering diskOffering = Mockito.mock(DiskOffering.class);
Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.KVM;
Account owner = Mockito.mock(Account.class);
Mockito.when(owner.getDomainId()).thenReturn(domainId);
Mockito.when(owner.getId()).thenReturn(accountId);
Mockito.when(diskOffering.getId()).thenReturn(diskOfferingId);
Long deviceId = 2L;
Long poolId = 3L;
String path = "volume path";
String chainInfo = "chain info";
MockedConstruction<VolumeVO> volumeVOMockedConstructionConstruction = Mockito.mockConstruction(VolumeVO.class, (mock, context) -> {
});
VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
Mockito.when(volumeDao.persist(Mockito.any(VolumeVO.class))).thenReturn(volumeVO);
volumeOrchestrator.importVolume(volumeType, name, diskOffering, sizeInBytes, null, null,
zoneId, hypervisorType, null, null, owner,
deviceId, poolId, path, chainInfo);
VolumeVO volume = volumeVOMockedConstructionConstruction.constructed().get(0);
Mockito.verify(volume, Mockito.never()).setInstanceId(Mockito.anyLong());
Mockito.verify(volume, Mockito.never()).setAttached(Mockito.any(Date.class));
Mockito.verify(volume, Mockito.times(1)).setDeviceId(deviceId);
Mockito.verify(volume, Mockito.never()).setDisplayVolume(Mockito.any(Boolean.class));
Mockito.verify(volume, Mockito.times(1)).setFormat(Storage.ImageFormat.QCOW2);
Mockito.verify(volume, Mockito.times(1)).setPoolId(poolId);
Mockito.verify(volume, Mockito.times(1)).setPath(path);
Mockito.verify(volume, Mockito.times(1)).setChainInfo(chainInfo);
Mockito.verify(volume, Mockito.times(1)).setState(Volume.State.Ready);
}
} }

View File

@ -0,0 +1,177 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.hypervisor.kvm.resource.wrapper;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.GetVolumesOnStorageAnswer;
import com.cloud.agent.api.GetVolumesOnStorageCommand;
import com.cloud.agent.api.to.StorageFilerTO;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.cloud.storage.Storage.StoragePoolType;
import org.apache.cloudstack.storage.volume.VolumeOnStorageTO;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import org.apache.cloudstack.utils.qemu.QemuImgException;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
import org.apache.commons.lang3.StringUtils;
import org.libvirt.LibvirtException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@ResourceWrapper(handles = GetVolumesOnStorageCommand.class)
public final class LibvirtGetVolumesOnStorageCommandWrapper extends CommandWrapper<GetVolumesOnStorageCommand, Answer, LibvirtComputingResource> {
static final List<StoragePoolType> STORAGE_POOL_TYPES_SUPPORTED_BY_QEMU_IMG = Arrays.asList(StoragePoolType.NetworkFilesystem,
StoragePoolType.Filesystem, StoragePoolType.RBD);
@Override
public Answer execute(final GetVolumesOnStorageCommand command, final LibvirtComputingResource libvirtComputingResource) {
final StorageFilerTO pool = command.getPool();
final String volumePath = command.getVolumePath();
final String keyword = command.getKeyword();
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
final KVMStoragePool storagePool = storagePoolMgr.getStoragePool(pool.getType(), pool.getUuid(), true);
if (StringUtils.isNotBlank(volumePath)) {
return addVolumeByVolumePath(command, storagePool, volumePath);
} else {
return addAllVolumes(command, storagePool, keyword);
}
}
private GetVolumesOnStorageAnswer addVolumeByVolumePath(final GetVolumesOnStorageCommand command, final KVMStoragePool storagePool, String volumePath) {
List<VolumeOnStorageTO> volumes = new ArrayList<>();
KVMPhysicalDisk disk = storagePool.getPhysicalDisk(volumePath);
if (disk != null) {
if (!volumePath.equals(disk.getPath()) && !volumePath.equals(disk.getName())) {
String error = String.format("Volume path mismatch. Expected volume path (%s) is not the same as the actual name (%s) and path (%s)", volumePath, disk.getName(), disk.getPath());
return new GetVolumesOnStorageAnswer(command, false, error);
}
if (!isDiskFormatSupported(disk)) {
return new GetVolumesOnStorageAnswer(command, false, String.format("disk format %s is unsupported", disk.getFormat()));
}
Map<String, String> info = getDiskFileInfo(storagePool, disk, true);
if (info == null) {
return new GetVolumesOnStorageAnswer(command, false, "failed to get information of disk file. The disk might be locked or unsupported");
}
VolumeOnStorageTO volumeOnStorageTO = new VolumeOnStorageTO(Hypervisor.HypervisorType.KVM, disk.getName(), disk.getName(), disk.getPath(),
disk.getFormat().toString(), disk.getSize(), disk.getVirtualSize());
if (disk.getQemuEncryptFormat() != null) {
volumeOnStorageTO.setQemuEncryptFormat(disk.getQemuEncryptFormat().toString());
}
String backingFilePath = info.get(QemuImg.BACKING_FILE);
if (StringUtils.isNotBlank(backingFilePath)) {
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.BACKING_FILE, backingFilePath);
}
String backingFileFormat = info.get(QemuImg.BACKING_FILE_FORMAT);
if (StringUtils.isNotBlank(backingFileFormat)) {
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.BACKING_FILE_FORMAT, backingFileFormat);
}
String clusterSize = info.get(QemuImg.CLUSTER_SIZE);
if (StringUtils.isNotBlank(clusterSize)) {
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.CLUSTER_SIZE, clusterSize);
}
String fileFormat = info.get(QemuImg.FILE_FORMAT);
if (StringUtils.isNotBlank(fileFormat)) {
if (!fileFormat.equalsIgnoreCase(disk.getFormat().toString())) {
return new GetVolumesOnStorageAnswer(command, false, String.format("The file format is %s, but expected to be %s", fileFormat, disk.getFormat()));
}
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.FILE_FORMAT, fileFormat);
}
String encrypted = info.get(QemuImg.ENCRYPTED);
if (StringUtils.isNotBlank(encrypted) && encrypted.equalsIgnoreCase("yes")) {
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.IS_ENCRYPTED, String.valueOf(Boolean.TRUE));
}
Boolean isLocked = isDiskFileLocked(storagePool, disk);
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.IS_LOCKED, String.valueOf(isLocked));
volumes.add(volumeOnStorageTO);
}
return new GetVolumesOnStorageAnswer(command, volumes);
}
private GetVolumesOnStorageAnswer addAllVolumes(final GetVolumesOnStorageCommand command, final KVMStoragePool storagePool, String keyword) {
List<VolumeOnStorageTO> volumes = new ArrayList<>();
List<KVMPhysicalDisk> disks = storagePool.listPhysicalDisks();
if (StringUtils.isNotBlank(keyword)) {
disks = disks.stream().filter(disk -> disk.getName().contains(keyword)).collect(Collectors.toList());
}
disks.sort(Comparator.comparing(KVMPhysicalDisk::getName));
for (KVMPhysicalDisk disk: disks) {
if (!isDiskFormatSupported(disk)) {
continue;
}
VolumeOnStorageTO volumeOnStorageTO = new VolumeOnStorageTO(Hypervisor.HypervisorType.KVM, disk.getName(), disk.getName(), disk.getPath(),
disk.getFormat().toString(), disk.getSize(), disk.getVirtualSize());
if (disk.getQemuEncryptFormat() != null) {
volumeOnStorageTO.setQemuEncryptFormat(disk.getQemuEncryptFormat().toString());
}
volumes.add(volumeOnStorageTO);
}
return new GetVolumesOnStorageAnswer(command, volumes);
}
private boolean isDiskFormatSupported(KVMPhysicalDisk disk) {
return PhysicalDiskFormat.QCOW2.equals(disk.getFormat()) || PhysicalDiskFormat.RAW.equals(disk.getFormat());
}
private boolean isDiskFileLocked(KVMStoragePool pool, KVMPhysicalDisk disk) {
Map<String, String> info = getDiskFileInfo(pool, disk, false);
return info == null;
}
private Map<String, String> getDiskFileInfo(KVMStoragePool pool, KVMPhysicalDisk disk, boolean secure) {
if (!STORAGE_POOL_TYPES_SUPPORTED_BY_QEMU_IMG.contains(pool.getType())) {
return new HashMap<>(); // unknown
}
try {
QemuImg qemu = new QemuImg(0);
QemuImgFile qemuFile = new QemuImgFile(disk.getPath(), disk.getFormat());
if (StoragePoolType.RBD.equals(pool.getType())) {
String rbdDestFile = KVMPhysicalDisk.RBDStringBuilder(pool.getSourceHost(),
pool.getSourcePort(),
pool.getAuthUserName(),
pool.getAuthSecret(),
disk.getPath());
qemuFile = new QemuImgFile(rbdDestFile, disk.getFormat());
}
return qemu.info(qemuFile, secure);
} catch (QemuImgException | LibvirtException ex) {
logger.error("Failed to get info of disk file: " + ex.getMessage());
return null;
}
}
}

View File

@ -45,6 +45,7 @@ public class QemuImg {
public static final String FILE_FORMAT = "file_format"; public static final String FILE_FORMAT = "file_format";
public static final String IMAGE = "image"; public static final String IMAGE = "image";
public static final String VIRTUAL_SIZE = "virtual_size"; public static final String VIRTUAL_SIZE = "virtual_size";
public static final String ENCRYPTED = "encrypted";
public static final String ENCRYPT_FORMAT = "encrypt.format"; public static final String ENCRYPT_FORMAT = "encrypt.format";
public static final String ENCRYPT_KEY_SECRET = "encrypt.key-secret"; public static final String ENCRYPT_KEY_SECRET = "encrypt.key-secret";
public static final String TARGET_ZERO_FLAG = "--target-is-zero"; public static final String TARGET_ZERO_FLAG = "--target-is-zero";
@ -554,9 +555,13 @@ public class QemuImg {
* @return A HashMap with string key-value information as returned by 'qemu-img info'. * @return A HashMap with string key-value information as returned by 'qemu-img info'.
*/ */
public Map<String, String> info(final QemuImgFile file) throws QemuImgException { public Map<String, String> info(final QemuImgFile file) throws QemuImgException {
return info(file, true);
}
public Map<String, String> info(final QemuImgFile file, boolean secure) throws QemuImgException {
final Script s = new Script(_qemuImgPath); final Script s = new Script(_qemuImgPath);
s.add("info"); s.add("info");
if (this.version >= QEMU_2_10) { if (this.version >= QEMU_2_10 && secure) {
s.add("-U"); s.add("-U");
} }
s.add(file.getFileName()); s.add(file.getFileName());

View File

@ -0,0 +1,158 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.hypervisor.kvm.resource.wrapper;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.GetVolumesOnStorageAnswer;
import com.cloud.agent.api.GetVolumesOnStorageCommand;
import com.cloud.agent.api.to.StorageFilerTO;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.storage.Storage;
import org.apache.cloudstack.storage.volume.VolumeOnStorageTO;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
import org.apache.cloudstack.utils.qemu.QemuObject;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.MockedConstruction;
import org.mockito.Mockito;
import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import static org.mockito.Mockito.times;
@RunWith(MockitoJUnitRunner.class)
public class LibvirtGetVolumesOnStorageCommandWrapperTest {
@Mock
LibvirtComputingResource libvirtComputingResource;
@Mock
KVMStoragePoolManager storagePoolMgr;
@Mock
KVMStoragePool storagePool;
@Mock
StorageFilerTO pool;
@Mock
Map<String, String> qemuImgInfo;
private final Storage.StoragePoolType poolType = Storage.StoragePoolType.NetworkFilesystem;
private final String poolUuid = "pool-uuid";
private final String volumePath = "volume-path";
private final String backingFilePath = "backing file path";
private final String backingFileFormat = "QCOW2";
private final String clusterSize = "4096";
private final String fileFormat = "QCOW2";
private final String encrypted = "yes";
private final String diskNamePrefix = "disk-";
@Spy
LibvirtGetVolumesOnStorageCommandWrapper libvirtGetVolumesOnStorageCommandWrapper = new LibvirtGetVolumesOnStorageCommandWrapper();
MockedConstruction<QemuImg> qemuImg;
MockedConstruction<VolumeOnStorageTO> volumeOnStorageTOMock;
@Before
public void setUp() {
Mockito.when(pool.getUuid()).thenReturn(poolUuid);
Mockito.when(pool.getType()).thenReturn(poolType);
Mockito.when(libvirtComputingResource.getStoragePoolMgr()).thenReturn(storagePoolMgr);
Mockito.when(storagePoolMgr.getStoragePool(poolType, poolUuid, true)).thenReturn(storagePool);
qemuImg = Mockito.mockConstruction(QemuImg.class, (mock, context) -> {
Mockito.when(mock.info(Mockito.any(QemuImgFile.class), Mockito.eq(true))).thenReturn(qemuImgInfo);
});
volumeOnStorageTOMock = Mockito.mockConstruction(VolumeOnStorageTO.class);
}
@After
public void tearDown() {
qemuImg.close();
volumeOnStorageTOMock.close();
}
@Test
public void testLibvirtGetVolumesOnStorageCommandWrapperForAllVolumes() {
GetVolumesOnStorageCommand command = new GetVolumesOnStorageCommand(pool, null, diskNamePrefix);
List<KVMPhysicalDisk> physicalDisks = new ArrayList<>();
int numberDisks = 3;
for (int i = 0; i < numberDisks; i++) {
KVMPhysicalDisk disk = Mockito.mock(KVMPhysicalDisk.class);
Mockito.when(disk.getName()).thenReturn(diskNamePrefix + (numberDisks - i));
Mockito.when(disk.getFormat()).thenReturn(QemuImg.PhysicalDiskFormat.QCOW2);
Mockito.when(disk.getQemuEncryptFormat()).thenReturn(QemuObject.EncryptFormat.LUKS);
physicalDisks.add(disk);
}
Mockito.when(storagePool.listPhysicalDisks()).thenReturn(physicalDisks);
Answer answer = libvirtGetVolumesOnStorageCommandWrapper.execute(command, libvirtComputingResource);
Assert.assertTrue(answer instanceof GetVolumesOnStorageAnswer);
Assert.assertTrue(answer.getResult());
List<VolumeOnStorageTO> volumes = ((GetVolumesOnStorageAnswer) answer).getVolumes();
Assert.assertEquals(numberDisks, volumes.size());
volumeOnStorageTOMock.constructed().forEach(s -> Mockito.verify(s, times(1)).setQemuEncryptFormat(QemuObject.EncryptFormat.LUKS.toString()));
}
@Test
public void testLibvirtGetVolumesOnStorageCommandWrapperForVolume() {
KVMPhysicalDisk disk = Mockito.mock(KVMPhysicalDisk.class);
Mockito.when(disk.getPath()).thenReturn(volumePath);
Mockito.when(disk.getFormat()).thenReturn(QemuImg.PhysicalDiskFormat.QCOW2);
Mockito.when(disk.getQemuEncryptFormat()).thenReturn(QemuObject.EncryptFormat.LUKS);
Mockito.when(storagePool.getPhysicalDisk(volumePath)).thenReturn(disk);
Mockito.when(storagePool.getType()).thenReturn(poolType);
Mockito.when(qemuImgInfo.get(QemuImg.BACKING_FILE)).thenReturn(backingFilePath);
Mockito.when(qemuImgInfo.get(QemuImg.BACKING_FILE_FORMAT)).thenReturn(backingFileFormat);
Mockito.when(qemuImgInfo.get(QemuImg.CLUSTER_SIZE)).thenReturn(clusterSize);
Mockito.when(qemuImgInfo.get(QemuImg.FILE_FORMAT)).thenReturn(fileFormat);
Mockito.when(qemuImgInfo.get(QemuImg.ENCRYPTED)).thenReturn(encrypted);
GetVolumesOnStorageCommand command = new GetVolumesOnStorageCommand(pool, volumePath, null);
Answer answer = libvirtGetVolumesOnStorageCommandWrapper.execute(command, libvirtComputingResource);
Assert.assertTrue(answer instanceof GetVolumesOnStorageAnswer);
Assert.assertTrue(answer.getResult());
List<VolumeOnStorageTO> volumes = ((GetVolumesOnStorageAnswer) answer).getVolumes();
Assert.assertEquals(1, volumes.size());
VolumeOnStorageTO volumeOnStorageTO = volumeOnStorageTOMock.constructed().get(0);
Mockito.verify(volumeOnStorageTO).setQemuEncryptFormat(QemuObject.EncryptFormat.LUKS.toString());
Mockito.verify(volumeOnStorageTO).addDetail(VolumeOnStorageTO.Detail.BACKING_FILE, backingFilePath);
Mockito.verify(volumeOnStorageTO).addDetail(VolumeOnStorageTO.Detail.BACKING_FILE_FORMAT, backingFileFormat);
Mockito.verify(volumeOnStorageTO).addDetail(VolumeOnStorageTO.Detail.CLUSTER_SIZE, clusterSize);
Mockito.verify(volumeOnStorageTO).addDetail(VolumeOnStorageTO.Detail.FILE_FORMAT, fileFormat);
Mockito.verify(volumeOnStorageTO).addDetail(VolumeOnStorageTO.Detail.IS_ENCRYPTED, "true");
Mockito.verify(volumeOnStorageTO).addDetail(VolumeOnStorageTO.Detail.IS_LOCKED, "false");
}
}

View File

@ -147,7 +147,7 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl implements PrimaryDataStore
String uri = String.format("%s://%s%s", scheme, storageHost, hostPath); String uri = String.format("%s://%s%s", scheme, storageHost, hostPath);
Object localStorage = dsInfos.get("localStorage"); Object localStorage = dsInfos.get("localStorage");
if (localStorage != null) { if (localStorage != null) {
hostPath = hostPath.contains("//") ? hostPath.replaceFirst("/", "") : hostPath; hostPath = hostPath.contains("//") ? hostPath.replaceFirst("/", "") : hostPath;
hostPath = hostPath.replace("+", " "); hostPath = hostPath.replace("+", " ");
} }

View File

@ -0,0 +1,515 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.volume;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.GetVolumesOnStorageAnswer;
import com.cloud.agent.api.GetVolumesOnStorageCommand;
import com.cloud.agent.api.to.StorageFilerTO;
import com.cloud.configuration.ConfigurationManager;
import com.cloud.configuration.Resource;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.event.ActionEventUtils;
import com.cloud.event.EventTypes;
import com.cloud.event.EventVO;
import com.cloud.event.UsageEventUtils;
import com.cloud.exception.PermissionDeniedException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.offering.DiskOffering;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.Storage;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeApiService;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.VMTemplatePoolDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.Account;
import com.cloud.user.AccountManager;
import com.cloud.user.ResourceLimitService;
import com.cloud.user.User;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.DiskProfile;
import org.apache.cloudstack.api.ApiCommandResourceType;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.ResponseGenerator;
import org.apache.cloudstack.api.ResponseObject;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.command.admin.volume.ImportVolumeCmd;
import org.apache.cloudstack.api.command.admin.volume.ListVolumesForImportCmd;
import org.apache.cloudstack.api.command.admin.volume.UnmanageVolumeCmd;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.api.response.VolumeForImportResponse;
import org.apache.cloudstack.api.response.VolumeResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.utils.bytescale.ByteScaleUtils;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
import javax.inject.Inject;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.Map;
public class VolumeImportUnmanageManagerImpl implements VolumeImportUnmanageService {
protected Logger logger = Logger.getLogger(VolumeImportUnmanageManagerImpl.class);
@Inject
private AccountManager accountMgr;
@Inject
private AgentManager agentManager;
@Inject
private HostDao hostDao;
@Inject
private DiskOfferingDao diskOfferingDao;
@Inject
private ResourceLimitService resourceLimitService;
@Inject
private ResponseGenerator responseGenerator;
@Inject
private VolumeDao volumeDao;
@Inject
private PrimaryDataStoreDao primaryDataStoreDao;
@Inject
private StoragePoolHostDao storagePoolHostDao;
@Inject
private ConfigurationManager configMgr;
@Inject
private DataCenterDao dcDao;
@Inject
private VolumeOrchestrationService volumeManager;
@Inject
private VMTemplatePoolDao templatePoolDao;
@Inject
private VolumeApiService volumeApiService;
@Inject
private SnapshotDataStoreDao snapshotDataStoreDao;
static final String DEFAULT_DISK_OFFERING_NAME = "Default Custom Offering for Volume Import";
static final String DEFAULT_DISK_OFFERING_UNIQUE_NAME = "Volume-Import";
static final String DISK_OFFERING_NAME_SUFFIX_LOCAL = " - Local Storage";
static final String DISK_OFFERING_UNIQUE_NAME_SUFFIX_LOCAL = "-Local";
protected void logFailureAndThrowException(String msg) {
logger.error(msg);
throw new CloudRuntimeException(msg);
}
@Override
public List<Class<?>> getCommands() {
final List<Class<?>> cmdList = new ArrayList<>();
cmdList.add(ListVolumesForImportCmd.class);
cmdList.add(ImportVolumeCmd.class);
cmdList.add(UnmanageVolumeCmd.class);
return cmdList;
}
@Override
public ListResponse<VolumeForImportResponse> listVolumesForImport(ListVolumesForImportCmd cmd) {
Long poolId = cmd.getStorageId();
String path = cmd.getPath();
String keyword = cmd.getKeyword();
if (StringUtils.isNotBlank(keyword)) {
keyword = keyword.trim();
}
StoragePoolVO pool = checkIfPoolAvailable(poolId);
List<VolumeOnStorageTO> volumes = listVolumesForImportInternal(pool, path, keyword);
List<VolumeForImportResponse> responses = new ArrayList<>();
for (VolumeOnStorageTO volume : volumes) {
if (checkIfVolumeManaged(pool, volume.getPath())
|| checkIfVolumeForTemplate(pool, volume.getPath())
|| checkIfVolumeForSnapshot(pool, volume.getFullPath())) {
continue;
}
responses.add(createVolumeForImportResponse(volume, pool));
}
ListResponse<VolumeForImportResponse> listResponses = new ListResponse<>();
listResponses.setResponses(responses, responses.size());
return listResponses;
}
@Override
public VolumeResponse importVolume(ImportVolumeCmd cmd) {
// 1. verify owner
final Account caller = CallContext.current().getCallingAccount();
if (caller.getType() != Account.Type.ADMIN) {
throw new PermissionDeniedException(String.format("Cannot import VM as the caller account [%s] is not ROOT Admin.", caller.getUuid()));
}
Account owner = accountMgr.finalizeOwner(caller, cmd.getAccountName(), cmd.getDomainId(), cmd.getProjectId());
if (owner == null) {
logFailureAndThrowException("Cannot import volume due to unknown owner");
}
// 2. check if pool exists and not in maintenance
Long poolId = cmd.getStorageId();
StoragePoolVO pool = checkIfPoolAvailable(poolId);
// 3. check if the volume already exists in cloudstack by path
String volumePath = cmd.getPath();
if (StringUtils.isBlank(volumePath)) {
logFailureAndThrowException("Volume path is null or blank: " + volumePath);
}
if (checkIfVolumeManaged(pool, volumePath)){
logFailureAndThrowException("Volume is already managed by CloudStack: " + volumePath);
}
if (checkIfVolumeForTemplate(pool, volumePath)) {
logFailureAndThrowException("Volume is a base image of a template: " + volumePath);
}
// 4. get volume info on storage through host and check
VolumeOnStorageTO volume = getVolumeOnStorageAndCheck(pool, volumePath);
if (checkIfVolumeForSnapshot(pool, volume.getFullPath())) {
logFailureAndThrowException("Volume is a reference of snapshot on primary: " + volume.getFullPath());
}
// 5. check resource limitation
checkResourceLimitForImportVolume(owner, volume);
// 6. get disk offering
DiskOfferingVO diskOffering = getOrCreateDiskOffering(owner, cmd.getDiskOfferingId(), pool.getDataCenterId(), pool.isLocal());
if (diskOffering.isCustomized()) {
volumeApiService.validateCustomDiskOfferingSizeRange(volume.getVirtualSize() / ByteScaleUtils.GiB);
}
if (!volumeApiService.doesTargetStorageSupportDiskOffering(pool, diskOffering.getTags())) {
logFailureAndThrowException(String.format("Disk offering: %s storage tags are not compatible with selected storage pool: %s", diskOffering.getUuid(), pool.getUuid()));
}
// 7. create records
String volumeName = StringUtils.isNotBlank(cmd.getName()) ? cmd.getName().trim() : volumePath;
VolumeVO volumeVO = importVolumeInternal(volume, diskOffering, owner, pool, volumeName);
// 8. Update resource count
updateResourceLimitForVolumeImport(volumeVO);
// 9. Publish event
publicUsageEventForVolumeImportAndUnmanage(volumeVO, true);
return responseGenerator.createVolumeResponse(ResponseObject.ResponseView.Full, volumeVO);
}
protected VolumeOnStorageTO getVolumeOnStorageAndCheck(StoragePoolVO pool, String volumePath) {
// send a command to hypervisor to check
List<VolumeOnStorageTO> volumes = listVolumesForImportInternal(pool, volumePath, null);
if (CollectionUtils.isEmpty(volumes)) {
logFailureAndThrowException("Cannot find volume on storage pool: " + volumePath);
}
VolumeOnStorageTO volume = volumes.get(0);
// check if volume is locked, encrypted or has backing file
checkIfVolumeIsLocked(volume);
checkIfVolumeIsEncrypted(volume);
checkIfVolumeHasBackingFile(volume);
return volume;
}
protected List<VolumeOnStorageTO> listVolumesForImportInternal(StoragePoolVO pool, String volumePath, String keyword) {
Pair<HostVO, String> hostAndLocalPath = findHostAndLocalPathForVolumeImport(pool);
HostVO host = hostAndLocalPath.first();
checkIfHostAndPoolSupported(host, pool);
StorageFilerTO storageTO = new StorageFilerTO(pool);
GetVolumesOnStorageCommand command = new GetVolumesOnStorageCommand(storageTO, volumePath, keyword);
Answer answer = agentManager.easySend(host.getId(), command);
if (answer == null || !(answer instanceof GetVolumesOnStorageAnswer)) {
logFailureAndThrowException("Cannot get volumes on storage pool via host " + host.getName());
}
if (!answer.getResult()) {
logFailureAndThrowException("Volume cannot be imported due to " + answer.getDetails());
}
return ((GetVolumesOnStorageAnswer) answer).getVolumes();
}
@Override
public boolean unmanageVolume(long volumeId) {
// 1. check if volume can be unmanaged
VolumeVO volume = checkIfVolumeCanBeUnmanaged(volumeId);
// 2. check if pool available
StoragePoolVO pool = checkIfPoolAvailable(volume.getPoolId());
// 3. unmanage volume internally
getVolumeOnStorageAndCheck(pool, volume.getPath());
// 3. Update resource count
updateResourceLimitForVolumeUnmanage(volume);
// 4. publish events
publicUsageEventForVolumeImportAndUnmanage(volume, false);
// 5. update the state/removed of record
unmanageVolumeFromDatabase(volume);
return true;
}
protected StoragePoolVO checkIfPoolAvailable(Long poolId) {
StoragePoolVO pool = primaryDataStoreDao.findById(poolId);
if (pool == null) {
logFailureAndThrowException(String.format("Storage pool (ID: %s) does not exist", poolId));
}
if (pool.isInMaintenance()) {
logFailureAndThrowException(String.format("Storage pool (name: %s) is in maintenance", pool.getName()));
}
if (!StoragePoolStatus.Up.equals(pool.getStatus())) {
logFailureAndThrowException(String.format("Storage pool (ID: %s) is not Up: %s", pool.getName(), pool.getStatus()));
}
return pool;
}
protected Pair<HostVO, String> findHostAndLocalPathForVolumeImport(StoragePoolVO pool) {
List<HostVO> hosts = new ArrayList<>();
switch (pool.getScope()) {
case HOST:
return findHostAndLocalPathForVolumeImportForHostScope(pool.getId());
case CLUSTER:
hosts = hostDao.findHypervisorHostInCluster((pool.getClusterId()));
break;
case ZONE:
hosts = hostDao.listAllHostsUpByZoneAndHypervisor(pool.getDataCenterId(), pool.getHypervisor());
break;
}
for (HostVO host : hosts) {
StoragePoolHostVO storagePoolHostVO = storagePoolHostDao.findByPoolHost(pool.getId(), host.getId());
if (storagePoolHostVO != null) {
return new Pair<>(host, storagePoolHostVO.getLocalPath());
}
}
logFailureAndThrowException("No host found to perform volume import");
return null;
}
private Pair<HostVO, String> findHostAndLocalPathForVolumeImportForHostScope(Long poolId) {
List<StoragePoolHostVO> storagePoolHostVOs = storagePoolHostDao.listByPoolId(poolId);
if (CollectionUtils.isNotEmpty(storagePoolHostVOs)) {
for (StoragePoolHostVO storagePoolHostVO : storagePoolHostVOs) {
HostVO host = hostDao.findById(storagePoolHostVO.getHostId());
if (host != null) {
return new Pair<>(host, storagePoolHostVO.getLocalPath());
}
}
}
logFailureAndThrowException("No host found to perform volume import on pool: " + poolId);
return null;
}
private void checkIfHostAndPoolSupported(HostVO host, StoragePoolVO pool) {
if (!SUPPORTED_HYPERVISORS.contains(host.getHypervisorType())) {
logFailureAndThrowException("Importing and unmanaging volume are not supported for hypervisor: " + host.getHypervisorType());
}
if (Hypervisor.HypervisorType.KVM.equals(host.getHypervisorType()) && !SUPPORTED_STORAGE_POOL_TYPES_FOR_KVM.contains(pool.getPoolType())) {
logFailureAndThrowException(String.format("Importing and unmanaging volume are not supported for pool type %s on hypervisor %s", pool.getPoolType(), host.getHypervisorType()));
}
}
protected VolumeForImportResponse createVolumeForImportResponse(VolumeOnStorageTO volume, StoragePoolVO pool) {
VolumeForImportResponse response = new VolumeForImportResponse();
response.setPath(volume.getPath());
response.setName(volume.getName());
response.setFullPath(volume.getFullPath());
response.setFormat(volume.getFormat());
response.setSize(volume.getSize());
response.setVirtualSize(volume.getVirtualSize());
response.setQemuEncryptFormat(volume.getQemuEncryptFormat());
response.setStoragePoolId(pool.getUuid());
response.setStoragePoolName(pool.getName());
response.setStoragePoolType(String.valueOf(pool.getPoolType()));
response.setDetails(volume.getDetails());
response.setObjectName("volumeforimport");
return response;
}
private boolean checkIfVolumeManaged(StoragePoolVO pool, String volumePath) {
return volumeDao.findByPoolIdAndPath(pool.getId(), volumePath) != null;
}
private boolean checkIfVolumeForTemplate(StoragePoolVO pool, String volumePath) {
return templatePoolDao.findByPoolPath(pool.getId(), volumePath) != null;
}
private boolean checkIfVolumeForSnapshot(StoragePoolVO pool, String fullVolumePath) {
List<String> absPathList = Arrays.asList(fullVolumePath);
return CollectionUtils.isNotEmpty(snapshotDataStoreDao.listByStoreAndInstallPaths(pool.getId(), DataStoreRole.Primary, absPathList));
}
protected void checkIfVolumeIsLocked(VolumeOnStorageTO volume) {
Map<VolumeOnStorageTO.Detail, String> volumeDetails = volume.getDetails();
if (volumeDetails != null && volumeDetails.containsKey(VolumeOnStorageTO.Detail.IS_LOCKED)) {
String isLocked = volumeDetails.get(VolumeOnStorageTO.Detail.IS_LOCKED);
if (Boolean.parseBoolean(isLocked)) {
logFailureAndThrowException("Locked volume cannot be imported or unmanaged.");
}
}
}
protected void checkIfVolumeIsEncrypted(VolumeOnStorageTO volume) {
Map<VolumeOnStorageTO.Detail, String> volumeDetails = volume.getDetails();
if (volumeDetails != null && volumeDetails.containsKey(VolumeOnStorageTO.Detail.IS_ENCRYPTED)) {
String isEncrypted = volumeDetails.get(VolumeOnStorageTO.Detail.IS_ENCRYPTED);
if (Boolean.parseBoolean(isEncrypted)) {
logFailureAndThrowException("Encrypted volume cannot be imported or unmanaged.");
}
}
}
protected void checkIfVolumeHasBackingFile(VolumeOnStorageTO volume) {
Map<VolumeOnStorageTO.Detail, String> volumeDetails = volume.getDetails();
if (volumeDetails != null && volumeDetails.containsKey(VolumeOnStorageTO.Detail.BACKING_FILE)) {
String backingFile = volumeDetails.get(VolumeOnStorageTO.Detail.BACKING_FILE);
if (StringUtils.isNotBlank(backingFile)) {
logFailureAndThrowException("Volume with backing file cannot be imported or unmanaged.");
}
}
}
protected DiskOfferingVO getOrCreateDiskOffering(Account owner, Long diskOfferingId, Long zoneId, boolean isLocal) {
if (diskOfferingId != null) {
// check if disk offering exists and active
DiskOfferingVO diskOfferingVO = diskOfferingDao.findById(diskOfferingId);
if (diskOfferingVO == null) {
logFailureAndThrowException(String.format("Disk offering %s does not exist", diskOfferingId));
}
if (!DiskOffering.State.Active.equals(diskOfferingVO.getState())) {
logFailureAndThrowException(String.format("Disk offering with ID %s is not active", diskOfferingId));
}
if (diskOfferingVO.isUseLocalStorage() != isLocal) {
logFailureAndThrowException(String.format("Disk offering with ID %s should use %s storage", diskOfferingId, isLocal ? "local": "shared"));
}
if (diskOfferingVO.getEncrypt()) {
logFailureAndThrowException(String.format("Disk offering with ID %s should not support volume encryption", diskOfferingId));
}
// check if disk offering is accessible by the account/owner
try {
configMgr.checkDiskOfferingAccess(owner, diskOfferingVO, dcDao.findById(zoneId));
return diskOfferingVO;
} catch (PermissionDeniedException ex) {
logFailureAndThrowException(String.format("Disk offering with ID %s is not accessible by owner %s", diskOfferingId, owner));
}
}
return getOrCreateDefaultDiskOfferingIdForVolumeImport(isLocal);
}
private DiskOfferingVO getOrCreateDefaultDiskOfferingIdForVolumeImport(boolean isLocalStorage) {
final StringBuilder diskOfferingNameBuilder = new StringBuilder(DEFAULT_DISK_OFFERING_NAME);
final StringBuilder uniqueNameBuilder = new StringBuilder(DEFAULT_DISK_OFFERING_UNIQUE_NAME);
if (isLocalStorage) {
diskOfferingNameBuilder.append(DISK_OFFERING_NAME_SUFFIX_LOCAL);
uniqueNameBuilder.append(DISK_OFFERING_UNIQUE_NAME_SUFFIX_LOCAL);
}
final String diskOfferingName = diskOfferingNameBuilder.toString();
final String uniqueName = uniqueNameBuilder.toString();
DiskOfferingVO diskOffering = diskOfferingDao.findByUniqueName(uniqueName);
if (diskOffering != null) {
return diskOffering;
}
DiskOfferingVO newDiskOffering = new DiskOfferingVO(diskOfferingName, diskOfferingName,
Storage.ProvisioningType.THIN, 0, null, true, null, null, null);
newDiskOffering.setUseLocalStorage(isLocalStorage);
newDiskOffering.setUniqueName(uniqueName);
newDiskOffering = diskOfferingDao.persistDefaultDiskOffering(newDiskOffering);
return newDiskOffering;
}
private VolumeVO importVolumeInternal(VolumeOnStorageTO volume, DiskOfferingVO diskOffering,
Account owner, StoragePoolVO pool, String volumeName) {
DiskProfile diskProfile = volumeManager.importVolume(Volume.Type.DATADISK, volumeName, diskOffering,
volume.getVirtualSize(), null, null, pool.getDataCenterId(), volume.getHypervisorType(), null, null,
owner, null, pool.getId(), volume.getPath(), null);
return volumeDao.findById(diskProfile.getVolumeId());
}
protected void checkResourceLimitForImportVolume(Account owner, VolumeOnStorageTO volume) {
Long volumeSize = volume.getVirtualSize();
try {
resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.volume);
resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.primary_storage, volumeSize);
} catch (ResourceAllocationException e) {
logger.error(String.format("VM resource allocation error for account: %s", owner.getUuid()), e);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM resource allocation error for account: %s. %s", owner.getUuid(), StringUtils.defaultString(e.getMessage())));
}
}
private void updateResourceLimitForVolumeImport(VolumeVO volumeVO) {
resourceLimitService.incrementResourceCount(volumeVO.getAccountId(), Resource.ResourceType.volume);
resourceLimitService.incrementResourceCount(volumeVO.getAccountId(), Resource.ResourceType.primary_storage, volumeVO.getSize());
}
private void publicUsageEventForVolumeImportAndUnmanage(VolumeVO volumeVO, boolean isImport) {
try {
String eventType = isImport ? EventTypes.EVENT_VOLUME_IMPORT: EventTypes.EVENT_VOLUME_UNMANAGE;
String eventDescription = isImport ? "Successfully imported volume " + volumeVO.getUuid(): "Successfully unmanaged volume " + volumeVO.getUuid();
ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, volumeVO.getAccountId(), EventVO.LEVEL_INFO,
eventType, eventDescription, volumeVO.getId(), ApiCommandResourceType.Volume.toString(),0);
UsageEventUtils.publishUsageEvent(eventType, volumeVO.getAccountId(), volumeVO.getDataCenterId(),
volumeVO.getId(), volumeVO.getName(), volumeVO.getDiskOfferingId(), null, volumeVO.getSize(),
Volume.class.getName(), volumeVO.getUuid(), volumeVO.isDisplayVolume());
} catch (Exception e) {
logger.error(String.format("Failed to publish volume ID: %s event or usage records during volume import/unmanage", volumeVO.getUuid()), e);
}
}
private void updateResourceLimitForVolumeUnmanage(VolumeVO volumeVO) {
resourceLimitService.decrementResourceCount(volumeVO.getAccountId(), Resource.ResourceType.volume);
resourceLimitService.decrementResourceCount(volumeVO.getAccountId(), Resource.ResourceType.primary_storage, volumeVO.getSize());
}
private VolumeVO checkIfVolumeCanBeUnmanaged(long volumeId) {
VolumeVO volumeVO = volumeDao.findById(volumeId);
if (volumeVO == null) {
logFailureAndThrowException(String.format("Volume (ID: %s) does not exist", volumeId));
}
if (!Volume.State.Ready.equals(volumeVO.getState())) {
logFailureAndThrowException(String.format("Volume (ID: %s) is not ready", volumeId));
}
if (volumeVO.getEncryptFormat() != null) {
logFailureAndThrowException(String.format("Volume (ID: %s) is encrypted", volumeId));
}
if (volumeVO.getAttached() != null || volumeVO.getInstanceId() != null) {
logFailureAndThrowException(String.format("Volume (ID: %s) is attached to VM (ID: %s)", volumeId, volumeVO.getInstanceId()));
}
return volumeVO;
}
private void unmanageVolumeFromDatabase(VolumeVO volume) {
volume.setState(Volume.State.Destroy);
volume.setRemoved(new Date());
volumeDao.update(volume.getId(), volume);
}
}

View File

@ -853,7 +853,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
} }
StoragePool storagePool = getStoragePool(disk, zone, cluster); StoragePool storagePool = getStoragePool(disk, zone, cluster);
DiskProfile profile = volumeManager.importVolume(type, name, diskOffering, diskSize, DiskProfile profile = volumeManager.importVolume(type, name, diskOffering, diskSize,
minIops, maxIops, vm, template, owner, deviceId, storagePool.getId(), path, chainInfo); minIops, maxIops, vm.getDataCenterId(), vm.getHypervisorType(), vm, template, owner, deviceId, storagePool.getId(), path, chainInfo);
return new Pair<DiskProfile, StoragePool>(profile, storagePool); return new Pair<DiskProfile, StoragePool>(profile, storagePool);
} }

View File

@ -360,4 +360,6 @@
</bean> </bean>
<bean id="vnfTemplateManager" class="org.apache.cloudstack.storage.template.VnfTemplateManagerImpl" /> <bean id="vnfTemplateManager" class="org.apache.cloudstack.storage.template.VnfTemplateManagerImpl" />
<bean id="volumeImportUnmanageManager" class="org.apache.cloudstack.storage.volume.VolumeImportUnmanageManagerImpl" />
</beans> </beans>

View File

@ -0,0 +1,622 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.volume;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.GetVolumesOnStorageAnswer;
import com.cloud.agent.api.GetVolumesOnStorageCommand;
import com.cloud.configuration.ConfigurationManager;
import com.cloud.configuration.Resource;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.event.ActionEventUtils;
import com.cloud.event.UsageEventUtils;
import com.cloud.exception.PermissionDeniedException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.offering.DiskOffering;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeApiService;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.VMTemplatePoolDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.Account;
import com.cloud.user.AccountManager;
import com.cloud.user.AccountVO;
import com.cloud.user.ResourceLimitService;
import com.cloud.user.User;
import com.cloud.user.UserVO;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.DiskProfile;
import org.apache.cloudstack.api.ResponseGenerator;
import org.apache.cloudstack.api.ResponseObject;
import org.apache.cloudstack.api.command.admin.volume.ImportVolumeCmd;
import org.apache.cloudstack.api.command.admin.volume.ListVolumesForImportCmd;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.api.response.VolumeForImportResponse;
import org.apache.cloudstack.api.response.VolumeResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.MockedConstruction;
import org.mockito.MockedStatic;
import org.mockito.Mockito;
import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.UUID;
import static org.apache.cloudstack.storage.volume.VolumeImportUnmanageManagerImpl.DEFAULT_DISK_OFFERING_UNIQUE_NAME;
import static org.apache.cloudstack.storage.volume.VolumeImportUnmanageManagerImpl.DISK_OFFERING_UNIQUE_NAME_SUFFIX_LOCAL;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.ArgumentMatchers.isNull;
import static org.mockito.ArgumentMatchers.nullable;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@RunWith(MockitoJUnitRunner.class)
public class VolumeImportUnmanageManagerImplTest {
@Spy
@InjectMocks
VolumeImportUnmanageManagerImpl volumeImportUnmanageManager;
@Mock
private AccountManager accountMgr;
@Mock
private AgentManager agentManager;
@Mock
private HostDao hostDao;
@Mock
private DiskOfferingDao diskOfferingDao;
@Mock
private ResourceLimitService resourceLimitService;
@Mock
private ResponseGenerator responseGenerator;
@Mock
private VolumeDao volumeDao;
@Mock
private PrimaryDataStoreDao primaryDataStoreDao;
@Mock
private StoragePoolHostDao storagePoolHostDao;
@Mock
private ConfigurationManager configMgr;
@Mock
private DataCenterDao dcDao;
@Mock
private VolumeOrchestrationService volumeManager;
@Mock
private VMTemplatePoolDao templatePoolDao;
@Mock
private VolumeApiService volumeApiService;
@Mock
private SnapshotDataStoreDao snapshotDataStoreDao;
@Mock
StoragePoolVO storagePoolVO;
@Mock
VolumeVO volumeVO;
@Mock
DiskProfile diskProfile;
@Mock
HostVO hostVO;
@Mock
StoragePoolHostVO storagePoolHostVO;
@Mock
DiskOfferingVO diskOfferingVO;
@Mock
DataCenterVO dataCenterVO;
final static long accountId = 10L;
final static long zoneId = 11L;
final static long clusterId = 11L;
final static long hostId = 13L;
final static long poolId = 100L;
final static boolean isLocal = true;
final static long volumeId = 101L;
final static String volumeName = "import volume";
final static long diskOfferingId = 120L;
final static String localPath = "/mnt/localPath";
private static String path = "path";
private static String name = "name";
private static String fullPath = "fullPath";
private static String format = "qcow2";
private static long size = 100000L;
private static long virtualSize = 20000000L;
private static String encryptFormat = "LUKS";
private static Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.KVM;
private static String BACKING_FILE = "backing file";
private static String BACKING_FILE_FORMAT = "qcow2";
private static String storagePoolUuid = "pool-uuid";
private static String storagePoolName = "pool-name";
private static Storage.StoragePoolType storagePoolType = Storage.StoragePoolType.NetworkFilesystem;
AccountVO account;
@Before
public void setUp() {
CallContext.unregister();
account = new AccountVO("admin", 1L, "", Account.Type.ADMIN, "uuid");
account.setId(accountId);
UserVO user = new UserVO(1, "admin", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN);
CallContext.register(user, account);
when(accountMgr.finalizeOwner(any(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account);
when(primaryDataStoreDao.findById(poolId)).thenReturn(storagePoolVO);
when(storagePoolVO.getId()).thenReturn(poolId);
when(storagePoolVO.getDataCenterId()).thenReturn(zoneId);
when(storagePoolVO.isLocal()).thenReturn(isLocal);
when(storagePoolVO.getHypervisor()).thenReturn(hypervisorType);
when(storagePoolVO.getUuid()).thenReturn(storagePoolUuid);
when(storagePoolVO.getName()).thenReturn(storagePoolName);
when(storagePoolVO.getPoolType()).thenReturn(storagePoolType);
when(storagePoolVO.getStatus()).thenReturn(StoragePoolStatus.Up);
when(volumeDao.findById(volumeId)).thenReturn(volumeVO);
when(volumeVO.getId()).thenReturn(volumeId);
when(volumeVO.getAccountId()).thenReturn(accountId);
when(volumeVO.getSize()).thenReturn(virtualSize);
when(volumeVO.getDataCenterId()).thenReturn(zoneId);
when(volumeVO.getName()).thenReturn(volumeName);
when(hostVO.getHypervisorType()).thenReturn(hypervisorType);
when(hostVO.getId()).thenReturn(hostId);
when(hostDao.findById(hostId)).thenReturn(hostVO);
when(storagePoolHostVO.getLocalPath()).thenReturn(localPath);
when(storagePoolHostDao.findByPoolHost(poolId, hostId)).thenReturn(storagePoolHostVO);
when(storagePoolHostVO.getHostId()).thenReturn(hostId);
when(dcDao.findById(zoneId)).thenReturn(dataCenterVO);
}
@Test
public void testListVolumesForImport() {
ListVolumesForImportCmd cmd = mock(ListVolumesForImportCmd.class);
when(cmd.getPath()).thenReturn(path);
when(cmd.getStorageId()).thenReturn(poolId);
when(volumeDao.findByPoolIdAndPath(poolId, path)).thenReturn(null);
when(templatePoolDao.findByPoolPath(poolId, path)).thenReturn(null);
when(snapshotDataStoreDao.listByStoreAndInstallPaths(eq(poolId), eq(DataStoreRole.Primary), any())).thenReturn(null);
VolumeOnStorageTO volumeOnStorageTO = new VolumeOnStorageTO(hypervisorType, path, name, fullPath,
format, size, virtualSize);
volumeOnStorageTO.setQemuEncryptFormat(encryptFormat);
List<VolumeOnStorageTO> volumesOnStorageTO = new ArrayList<>();
volumesOnStorageTO.add(volumeOnStorageTO);
doReturn(volumesOnStorageTO).when(volumeImportUnmanageManager).listVolumesForImportInternal(storagePoolVO, path, null);
ListResponse<VolumeForImportResponse> listResponses = volumeImportUnmanageManager.listVolumesForImport(cmd);
Assert.assertEquals(1, listResponses.getResponses().size());
VolumeForImportResponse response = listResponses.getResponses().get(0);
Assert.assertEquals(path, response.getPath());
Assert.assertEquals(name, response.getName());
Assert.assertEquals(fullPath, response.getFullPath());
Assert.assertEquals(format, response.getFormat());
Assert.assertEquals(size, response.getSize());
Assert.assertEquals(virtualSize, response.getVirtualSize());
Assert.assertEquals(encryptFormat, response.getQemuEncryptFormat());
Assert.assertEquals(storagePoolType.name(), response.getStoragePoolType());
Assert.assertEquals(storagePoolName, response.getStoragePoolName());
Assert.assertEquals(storagePoolUuid, response.getStoragePoolId());
}
@Test
public void testImportVolumeAllGood() throws ResourceAllocationException {
ImportVolumeCmd cmd = mock(ImportVolumeCmd.class);
when(cmd.getPath()).thenReturn(path);
when(cmd.getStorageId()).thenReturn(poolId);
when(cmd.getDiskOfferingId()).thenReturn(diskOfferingId);
when(volumeDao.findByPoolIdAndPath(poolId, path)).thenReturn(null);
when(templatePoolDao.findByPoolPath(poolId, path)).thenReturn(null);
VolumeOnStorageTO volumeOnStorageTO = new VolumeOnStorageTO(hypervisorType, path, name, fullPath,
format, size, virtualSize);
volumeOnStorageTO.setQemuEncryptFormat(encryptFormat);
List<VolumeOnStorageTO> volumesOnStorageTO = new ArrayList<>();
volumesOnStorageTO.add(volumeOnStorageTO);
doReturn(volumesOnStorageTO).when(volumeImportUnmanageManager).listVolumesForImportInternal(storagePoolVO, path, null);
doNothing().when(volumeImportUnmanageManager).checkIfVolumeIsLocked(volumeOnStorageTO);
doNothing().when(volumeImportUnmanageManager).checkIfVolumeIsEncrypted(volumeOnStorageTO);
doNothing().when(volumeImportUnmanageManager).checkIfVolumeHasBackingFile(volumeOnStorageTO);
doNothing().when(resourceLimitService).checkResourceLimit(account, Resource.ResourceType.volume);
doNothing().when(resourceLimitService).checkResourceLimit(account, Resource.ResourceType.primary_storage, virtualSize);
DiskOfferingVO diskOffering = mock(DiskOfferingVO.class);
when(diskOffering.isCustomized()).thenReturn(true);
doReturn(diskOffering).when(volumeImportUnmanageManager).getOrCreateDiskOffering(account, diskOfferingId, zoneId, isLocal);
doNothing().when(volumeApiService).validateCustomDiskOfferingSizeRange(anyLong());
doReturn(true).when(volumeApiService).doesTargetStorageSupportDiskOffering(any(), isNull());
doReturn(diskProfile).when(volumeManager).importVolume(any(), anyString(), any(), eq(virtualSize), isNull(), isNull(), anyLong(),
any(), isNull(), isNull(), any(), isNull(), anyLong(), anyString(), isNull());
when(diskProfile.getVolumeId()).thenReturn(volumeId);
when(volumeDao.findById(volumeId)).thenReturn(volumeVO);
doNothing().when(resourceLimitService).incrementResourceCount(accountId, Resource.ResourceType.volume);
doNothing().when(resourceLimitService).incrementResourceCount(accountId, Resource.ResourceType.primary_storage, virtualSize);
VolumeResponse response = mock(VolumeResponse.class);
doReturn(response).when(responseGenerator).createVolumeResponse(ResponseObject.ResponseView.Full, volumeVO);
try (MockedStatic<UsageEventUtils> ignored = Mockito.mockStatic(UsageEventUtils.class);
MockedStatic<ActionEventUtils> ignoredtoo = Mockito.mockStatic(ActionEventUtils.class)) {
VolumeResponse result = volumeImportUnmanageManager.importVolume(cmd);
Assert.assertEquals(response, result);
}
}
@Test
public void testListVolumesForImportInternal() {
Pair<HostVO, String> hostAndLocalPath = mock(Pair.class);
doReturn(hostAndLocalPath).when(volumeImportUnmanageManager).findHostAndLocalPathForVolumeImport(storagePoolVO);
when(hostAndLocalPath.first()).thenReturn(hostVO);
VolumeOnStorageTO volumeOnStorageTO = new VolumeOnStorageTO(hypervisorType, path, name, fullPath,
format, size, virtualSize);
volumeOnStorageTO.setQemuEncryptFormat(encryptFormat);
List<VolumeOnStorageTO> volumesOnStorageTO = new ArrayList<>();
volumesOnStorageTO.add(volumeOnStorageTO);
GetVolumesOnStorageAnswer answer = mock(GetVolumesOnStorageAnswer.class);
when(answer.getResult()).thenReturn(true);
when(answer.getVolumes()).thenReturn(volumesOnStorageTO);
doReturn(answer).when(agentManager).easySend(eq(hostId), any(GetVolumesOnStorageCommand.class));
List<VolumeOnStorageTO> result = volumeImportUnmanageManager.listVolumesForImportInternal(storagePoolVO, path, null);
Assert.assertEquals(volumesOnStorageTO, result);
}
@Test
public void testCheckIfVolumeIsLocked() {
try {
VolumeOnStorageTO volumeOnStorageTO = new VolumeOnStorageTO(hypervisorType, path, name, fullPath,
format, size, virtualSize);
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.IS_LOCKED, "true");
volumeImportUnmanageManager.checkIfVolumeIsLocked(volumeOnStorageTO);
Assert.fail("It should fail as the volume is locked");
} catch (CloudRuntimeException ex) {
Assert.assertEquals("Locked volume cannot be imported or unmanaged.", ex.getMessage());
verify(volumeImportUnmanageManager).logFailureAndThrowException("Locked volume cannot be imported or unmanaged.");
}
}
@Test
public void testCheckIfVolumeIsEncrypted() {
try {
VolumeOnStorageTO volumeOnStorageTO = new VolumeOnStorageTO(hypervisorType, path, name, fullPath,
format, size, virtualSize);
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.IS_ENCRYPTED, "true");
volumeImportUnmanageManager.checkIfVolumeIsEncrypted(volumeOnStorageTO);
Assert.fail("It should fail as the volume is encrypted");
} catch (CloudRuntimeException ex) {
Assert.assertEquals("Encrypted volume cannot be imported or unmanaged.", ex.getMessage());
verify(volumeImportUnmanageManager).logFailureAndThrowException("Encrypted volume cannot be imported or unmanaged.");
}
}
@Test
public void testCheckIfVolumeHasBackingFile() {
try {
VolumeOnStorageTO volumeOnStorageTO = new VolumeOnStorageTO(hypervisorType, path, name, fullPath,
format, size, virtualSize);
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.BACKING_FILE, BACKING_FILE);
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.BACKING_FILE_FORMAT, BACKING_FILE_FORMAT);
volumeImportUnmanageManager.checkIfVolumeHasBackingFile(volumeOnStorageTO);
Assert.fail("It should fail as the volume has backing file");
} catch (CloudRuntimeException ex) {
Assert.assertEquals("Volume with backing file cannot be imported or unmanaged.", ex.getMessage());
verify(volumeImportUnmanageManager).logFailureAndThrowException("Volume with backing file cannot be imported or unmanaged.");
}
}
@Test
public void testUnmanageVolume() {
when(volumeVO.getState()).thenReturn(Volume.State.Ready);
when(volumeVO.getPoolId()).thenReturn(poolId);
when(volumeVO.getInstanceId()).thenReturn(null);
when(volumeVO.getPath()).thenReturn(path);
VolumeOnStorageTO volumeOnStorageTO = new VolumeOnStorageTO(hypervisorType, path, name, fullPath,
format, size, virtualSize);
doReturn(volumeOnStorageTO).when(volumeImportUnmanageManager).getVolumeOnStorageAndCheck(storagePoolVO, path);
doNothing().when(resourceLimitService).decrementResourceCount(accountId, Resource.ResourceType.volume);
doNothing().when(resourceLimitService).decrementResourceCount(accountId, Resource.ResourceType.primary_storage, virtualSize);
try (MockedStatic<UsageEventUtils> ignored = Mockito.mockStatic(UsageEventUtils.class);
MockedStatic<ActionEventUtils> ignoredtoo = Mockito.mockStatic(ActionEventUtils.class)) {
volumeImportUnmanageManager.unmanageVolume(volumeId);
}
verify(resourceLimitService).decrementResourceCount(volumeVO.getAccountId(), Resource.ResourceType.volume);
verify(resourceLimitService).decrementResourceCount(volumeVO.getAccountId(), Resource.ResourceType.primary_storage, virtualSize);
verify(volumeDao).update(eq(volumeId), any());
}
@Test
public void testUnmanageVolumeNotExist() {
try {
when(volumeDao.findById(volumeId)).thenReturn(null);
volumeImportUnmanageManager.unmanageVolume(volumeId);
Assert.fail("it should fail");
} catch (CloudRuntimeException ex) {
verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Volume (ID: %s) does not exist", volumeId));
}
}
@Test
public void testUnmanageVolumeNotReady() {
try {
when(volumeVO.getState()).thenReturn(Volume.State.Allocated);
volumeImportUnmanageManager.unmanageVolume(volumeId);
Assert.fail("it should fail");
} catch (CloudRuntimeException ex) {
verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Volume (ID: %s) is not ready", volumeId));
}
}
@Test
public void testUnmanageVolumeEncrypted() {
try {
when(volumeVO.getState()).thenReturn(Volume.State.Ready);
when(volumeVO.getEncryptFormat()).thenReturn(encryptFormat);
volumeImportUnmanageManager.unmanageVolume(volumeId);
Assert.fail("it should fail");
} catch (CloudRuntimeException ex) {
verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Volume (ID: %s) is encrypted", volumeId));
}
}
@Test
public void testUnmanageVolumeAttached() {
try {
when(volumeVO.getState()).thenReturn(Volume.State.Ready);
when(volumeVO.getAttached()).thenReturn(new Date());
volumeImportUnmanageManager.unmanageVolume(volumeId);
Assert.fail("it should fail");
} catch (CloudRuntimeException ex) {
verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Volume (ID: %s) is attached to VM (ID: %s)", volumeId, volumeVO.getInstanceId()));
}
}
@Test
public void testCheckIfPoolAvailableNotExist() {
try {
when(primaryDataStoreDao.findById(poolId)).thenReturn(null);
volumeImportUnmanageManager.checkIfPoolAvailable(poolId);
Assert.fail("it should fail");
} catch (CloudRuntimeException ex) {
verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Storage pool (ID: %s) does not exist", poolId));
}
}
@Test
public void testCheckIfPoolAvailableInMaintenance() {
try {
when(primaryDataStoreDao.findById(poolId)).thenReturn(storagePoolVO);
when(storagePoolVO.isInMaintenance()).thenReturn(true);
volumeImportUnmanageManager.checkIfPoolAvailable(poolId);
Assert.fail("it should fail");
} catch (CloudRuntimeException ex) {
verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Storage pool (name: %s) is in maintenance", storagePoolName));
}
}
@Test
public void testCheckIfPoolAvailableDisabled() {
try {
when(primaryDataStoreDao.findById(poolId)).thenReturn(storagePoolVO);
when(storagePoolVO.isInMaintenance()).thenReturn(false);
when(storagePoolVO.getStatus()).thenReturn(StoragePoolStatus.Disabled);
volumeImportUnmanageManager.checkIfPoolAvailable(poolId);
Assert.fail("it should fail");
} catch (CloudRuntimeException ex) {
verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Storage pool (ID: %s) is not Up: %s", storagePoolName, StoragePoolStatus.Disabled));
}
}
@Test
public void testFindHostAndLocalPathForVolumeImportZoneScope() {
when(storagePoolVO.getScope()).thenReturn(ScopeType.ZONE);
List<HostVO> hosts = new ArrayList<>();
hosts.add(hostVO);
when(hostDao.listAllHostsUpByZoneAndHypervisor(zoneId, hypervisorType)).thenReturn(hosts);
Pair<HostVO, String> result = volumeImportUnmanageManager.findHostAndLocalPathForVolumeImport(storagePoolVO);
Assert.assertNotNull(result);
Assert.assertEquals(hostVO, result.first());
Assert.assertEquals(localPath, result.second());
}
@Test
public void testFindHostAndLocalPathForVolumeImportClusterScope() {
when(storagePoolVO.getScope()).thenReturn(ScopeType.CLUSTER);
when(storagePoolVO.getClusterId()).thenReturn(clusterId);
List<HostVO> hosts = new ArrayList<>();
hosts.add(hostVO);
when(hostDao.findHypervisorHostInCluster(clusterId)).thenReturn(hosts);
Pair<HostVO, String> result = volumeImportUnmanageManager.findHostAndLocalPathForVolumeImport(storagePoolVO);
Assert.assertNotNull(result);
Assert.assertEquals(hostVO, result.first());
Assert.assertEquals(localPath, result.second());
}
@Test
public void testFindHostAndLocalPathForVolumeImportLocalHost() {
when(storagePoolVO.getScope()).thenReturn(ScopeType.HOST);
List<StoragePoolHostVO> storagePoolHostVOs = new ArrayList<>();
storagePoolHostVOs.add(storagePoolHostVO);
when(storagePoolHostDao.listByPoolId(poolId)).thenReturn(storagePoolHostVOs);
Pair<HostVO, String> result = volumeImportUnmanageManager.findHostAndLocalPathForVolumeImport(storagePoolVO);
Assert.assertNotNull(result);
Assert.assertEquals(hostVO, result.first());
Assert.assertEquals(localPath, result.second());
}
@Test
public void testGetOrCreateDiskOfferingAllGood() {
when(diskOfferingDao.findById(diskOfferingId)).thenReturn(diskOfferingVO);
when(diskOfferingVO.getState()).thenReturn(DiskOffering.State.Active);
when(diskOfferingVO.isUseLocalStorage()).thenReturn(isLocal);
when(diskOfferingVO.getEncrypt()).thenReturn(false);
doNothing().when(configMgr).checkDiskOfferingAccess(account, diskOfferingVO, dataCenterVO);
DiskOfferingVO result = volumeImportUnmanageManager.getOrCreateDiskOffering(account, diskOfferingId, zoneId, isLocal);
Assert.assertEquals(diskOfferingVO, result);
}
@Test
public void testGetOrCreateDiskOfferingNotExist() {
try {
when(diskOfferingDao.findById(diskOfferingId)).thenReturn(null);
volumeImportUnmanageManager.getOrCreateDiskOffering(account, diskOfferingId, zoneId, isLocal);
Assert.fail("it should fail");
} catch (CloudRuntimeException ex) {
verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering %s does not exist", diskOfferingId));
}
}
@Test
public void testGetOrCreateDiskOfferingNotActive() {
try {
when(diskOfferingDao.findById(diskOfferingId)).thenReturn(diskOfferingVO);
when(diskOfferingVO.getState()).thenReturn(DiskOffering.State.Inactive);
volumeImportUnmanageManager.getOrCreateDiskOffering(account, diskOfferingId, zoneId, isLocal);
Assert.fail("it should fail");
} catch (CloudRuntimeException ex) {
verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering with ID %s is not active", diskOfferingId));
}
}
@Test
public void testGetOrCreateDiskOfferingNotLocal() {
try {
when(diskOfferingDao.findById(diskOfferingId)).thenReturn(diskOfferingVO);
when(diskOfferingVO.getState()).thenReturn(DiskOffering.State.Active);
when(diskOfferingVO.isUseLocalStorage()).thenReturn(!isLocal);
volumeImportUnmanageManager.getOrCreateDiskOffering(account, diskOfferingId, zoneId, isLocal);
Assert.fail("it should fail");
} catch (CloudRuntimeException ex) {
verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering with ID %s should use %s storage", diskOfferingId, isLocal ? "local" : "shared"));
}
}
@Test
public void testGetOrCreateDiskOfferingForVolumeEncryption() {
try {
when(diskOfferingDao.findById(diskOfferingId)).thenReturn(diskOfferingVO);
when(diskOfferingVO.getState()).thenReturn(DiskOffering.State.Active);
when(diskOfferingVO.isUseLocalStorage()).thenReturn(isLocal);
when(diskOfferingVO.getEncrypt()).thenReturn(true);
volumeImportUnmanageManager.getOrCreateDiskOffering(account, diskOfferingId, zoneId, isLocal);
Assert.fail("it should fail");
} catch (CloudRuntimeException ex) {
verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering with ID %s should not support volume encryption", diskOfferingId));
}
}
@Test
public void testGetOrCreateDiskOfferingNoPermission() {
try {
when(diskOfferingDao.findById(diskOfferingId)).thenReturn(diskOfferingVO);
when(diskOfferingVO.getState()).thenReturn(DiskOffering.State.Active);
when(diskOfferingVO.isUseLocalStorage()).thenReturn(isLocal);
doThrow(PermissionDeniedException.class).when(configMgr).checkDiskOfferingAccess(account, diskOfferingVO, dataCenterVO);
volumeImportUnmanageManager.getOrCreateDiskOffering(account, diskOfferingId, zoneId, isLocal);
Assert.fail("it should fail");
} catch (CloudRuntimeException ex) {
verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering with ID %s is not accessible by owner %s", diskOfferingId, account));
}
}
@Test
public void testGetOrCreateDefaultDiskOfferingIdForVolumeImportExist() {
String uniqueName = DEFAULT_DISK_OFFERING_UNIQUE_NAME + (isLocal ? DISK_OFFERING_UNIQUE_NAME_SUFFIX_LOCAL : "");
when(diskOfferingDao.findByUniqueName(uniqueName)).thenReturn(diskOfferingVO);
DiskOfferingVO result = volumeImportUnmanageManager.getOrCreateDiskOffering(account, null, zoneId, isLocal);
Assert.assertEquals(diskOfferingVO, result);
}
@Test
public void testGetOrCreateDefaultDiskOfferingIdForVolumeImportNotExist() {
String uniqueName = DEFAULT_DISK_OFFERING_UNIQUE_NAME + (isLocal ? DISK_OFFERING_UNIQUE_NAME_SUFFIX_LOCAL : "");
when(diskOfferingDao.findByUniqueName(uniqueName)).thenReturn(null);
when(diskOfferingDao.persistDefaultDiskOffering(any())).thenReturn(diskOfferingVO);
try (
MockedConstruction<DiskOfferingVO> diskOfferingVOMockedConstruction = Mockito.mockConstruction(DiskOfferingVO.class);
) {
DiskOfferingVO result = volumeImportUnmanageManager.getOrCreateDiskOffering(account, null, zoneId, isLocal);
Assert.assertEquals(diskOfferingVO, result);
DiskOfferingVO diskOfferingVOMock = diskOfferingVOMockedConstruction.constructed().get(0);
verify(diskOfferingVOMock).setUseLocalStorage(isLocal);
verify(diskOfferingVOMock).setUniqueName(uniqueName);
}
}
@Test
public void testLogFailureAndThrowException() {
String message = "error message";
try {
volumeImportUnmanageManager.logFailureAndThrowException(message);
} catch (CloudRuntimeException ex) {
Assert.assertEquals(message, ex.getMessage());
}
}
}

View File

@ -0,0 +1,167 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Tests for importVolume and unmanageVolume APIs
"""
# Import Local Modules
from marvin.cloudstackAPI import unmanageVolume, listVolumesForImport, importVolume
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.codes import FAILED
from marvin.lib.base import (Account,
Domain,
Volume,
ServiceOffering,
DiskOffering,
VirtualMachine)
from marvin.lib.common import (get_domain, get_zone, get_suitable_test_template)
# Import System modules
from nose.plugins.attrib import attr
_multiprocess_shared_ = True
class TestImportAndUnmanageVolumes(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestImportAndUnmanageVolumes, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.services = testClient.getParsedTestDataConfig()
cls.hypervisor = testClient.getHypervisorInfo()
if cls.testClient.getHypervisorInfo().lower() != "kvm":
raise unittest.SkipTest("This is only available for KVM")
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient)
cls._cleanup = []
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["tiny"]
)
cls._cleanup.append(cls.service_offering)
template = get_suitable_test_template(
cls.apiclient,
cls.zone.id,
cls.services["ostype"],
cls.hypervisor
)
if template == FAILED:
assert False, "get_test_template() failed to return template"
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["mode"] = cls.zone.networktype
cls.disk_offering = DiskOffering.create(cls.apiclient,
cls.services["disk_offering"])
cls._cleanup.append(cls.disk_offering)
cls.test_domain = Domain.create(
cls.apiclient,
cls.services["domain"])
cls._cleanup.append(cls.test_domain)
cls.test_account = Account.create(
cls.apiclient,
cls.services["account"],
admin=True,
domainid=cls.test_domain.id)
cls._cleanup.append(cls.test_account)
# Create VM
cls.virtual_machine = VirtualMachine.create(
cls.apiclient,
cls.services["virtual_machine"],
templateid=template.id,
accountid=cls.test_account.name,
domainid=cls.test_account.domainid,
serviceofferingid=cls.service_offering.id,
mode=cls.services["mode"]
)
cls._cleanup.append(cls.virtual_machine)
cls.virtual_machine.stop(cls.apiclient, forced=True)
@classmethod
def tearDownClass(cls):
super(TestImportAndUnmanageVolumes, cls).tearDownClass()
@attr(tags=['advanced', 'basic', 'sg'], required_hardware=False)
def test_01_detach_unmanage_import_volume(self):
"""Test attach/detach/unmanage/import volume
"""
# Create DATA volume
volume = Volume.create(
self.apiclient,
self.testdata["volume"],
zoneid=self.zone.id,
account=self.test_account.name,
domainid=self.test_account.domainid,
diskofferingid=self.disk_offering.id
)
# Attach and Detach volume
try:
self.virtual_machine.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail("Attach volume failed with Exception: %s" % e)
self.virtual_machine.detach_volume(self.apiclient, volume)
# List volume by id
volumes = Volume.list(self.apiclient,
id = volume.id)
self.assertTrue(isinstance(volumes, list),
"listVolumes response should return a valid list"
)
self.assertTrue(len(volumes) > 0,
"listVolumes response should return a non-empty list"
)
volume = volumes[0]
# Unmanage volume
cmd = unmanageVolume.unmanageVolumeCmd()
cmd.id = volume.id
self.apiclient.unmanageVolume(cmd)
# List VMs for import
cmd = listVolumesForImport.listVolumesForImportCmd()
cmd.storageid = volume.storageid
volumesForImport = self.apiclient.listVolumesForImport(cmd)
self.assertTrue(isinstance(volumesForImport, list),
"Check listVolumesForImport response returns a valid list"
)
# Import volume
cmd = importVolume.importVolumeCmd()
cmd.storageid = volume.storageid
cmd.path = volume.path
self.apiclient.importVolume(cmd)
# List volume by name
volumes = Volume.list(self.apiclient,
storageid = volume.storageid,
name=volume.path)
self.assertTrue(isinstance(volumes, list),
"listVolumes response should return a valid list"
)
self.assertTrue(len(volumes) > 0,
"listVolumes response should return a non-empty list"
)

View File

@ -142,6 +142,7 @@
"label.action.image.store.read.only": "Make image store read-only", "label.action.image.store.read.only": "Make image store read-only",
"label.action.image.store.read.write": "Make image store read-write", "label.action.image.store.read.write": "Make image store read-write",
"label.action.import.export.instances": "Import-Export Instances", "label.action.import.export.instances": "Import-Export Instances",
"label.action.import.unmanage.volumes": "Import Data Volumes",
"label.action.ingest.instances": "Ingest instances", "label.action.ingest.instances": "Ingest instances",
"label.action.iso.permission": "Update ISO permissions", "label.action.iso.permission": "Update ISO permissions",
"label.action.iso.share": "Update ISO sharing", "label.action.iso.share": "Update ISO sharing",
@ -194,6 +195,8 @@
"label.action.unmanage.instance": "Unmanage Instance", "label.action.unmanage.instance": "Unmanage Instance",
"label.action.unmanage.instances": "Unmanage Instances", "label.action.unmanage.instances": "Unmanage Instances",
"label.action.unmanage.virtualmachine": "Unmanage Instance", "label.action.unmanage.virtualmachine": "Unmanage Instance",
"label.action.unmanage.volume": "Unmanage Volume",
"label.action.unmanage.volumes": "Unmanage Volumes",
"label.action.update.offering.access": "Update offering access", "label.action.update.offering.access": "Update offering access",
"label.action.update.resource.count": "Update resource count", "label.action.update.resource.count": "Update resource count",
"label.action.value": "Action/Value", "label.action.value": "Action/Value",
@ -683,6 +686,7 @@
"label.desc.import.ext.kvm.wizard": "Import Instance from remote KVM host", "label.desc.import.ext.kvm.wizard": "Import Instance from remote KVM host",
"label.desc.import.local.kvm.wizard": "Import QCOW2 image from Local Storage", "label.desc.import.local.kvm.wizard": "Import QCOW2 image from Local Storage",
"label.desc.import.shared.kvm.wizard": "Import QCOW2 image from Shared Storage", "label.desc.import.shared.kvm.wizard": "Import QCOW2 image from Shared Storage",
"label.desc.import.unmanage.volume": "Import and unmanage volume on Storage Pools",
"label.desc.ingesttinstancewizard": "Ingest instances from an external KVM host", "label.desc.ingesttinstancewizard": "Ingest instances from an external KVM host",
"label.desc.importmigratefromvmwarewizard": "Import instances from VMware into a KVM cluster", "label.desc.importmigratefromvmwarewizard": "Import instances from VMware into a KVM cluster",
"label.desc.usage.stats": "Usage Server Statistics", "label.desc.usage.stats": "Usage Server Statistics",
@ -892,6 +896,7 @@
"label.featured": "Featured", "label.featured": "Featured",
"label.fetch.instances": "Fetch Instances", "label.fetch.instances": "Fetch Instances",
"label.fetch.latest": "Fetch latest", "label.fetch.latest": "Fetch latest",
"label.filename": "File Name",
"label.files": "Alternate files to retrieve", "label.files": "Alternate files to retrieve",
"label.filter": "Filter", "label.filter": "Filter",
"label.filter.annotations.all": "All comments", "label.filter.annotations.all": "All comments",
@ -1024,6 +1029,7 @@
"label.import.instance": "Import Instance", "label.import.instance": "Import Instance",
"label.import.offering": "Import offering", "label.import.offering": "Import offering",
"label.import.role": "Import role", "label.import.role": "Import role",
"label.import.volume": "Import Volume",
"label.inactive": "Inactive", "label.inactive": "Inactive",
"label.in.progress": "in progress", "label.in.progress": "in progress",
"label.in.progress.for": "in progress for", "label.in.progress.for": "in progress for",
@ -1260,6 +1266,7 @@
"label.manage": "Manage", "label.manage": "Manage",
"label.manage.vpn.user": "Manage VPN Users", "label.manage.vpn.user": "Manage VPN Users",
"label.managed.instances": "Managed Instances", "label.managed.instances": "Managed Instances",
"label.managed.volumes": "Managed Volumes",
"label.managedstate": "Managed state", "label.managedstate": "Managed state",
"label.management": "Management", "label.management": "Management",
"label.management.ips": "Management IP addresses", "label.management.ips": "Management IP addresses",
@ -2160,8 +2167,10 @@
"label.unlimited": "Unlimited", "label.unlimited": "Unlimited",
"label.unmanaged": "Unmanaged", "label.unmanaged": "Unmanaged",
"label.unmanage.instance": "Unmanage Instance", "label.unmanage.instance": "Unmanage Instance",
"label.unmanage.volume": "Unmanage Volume",
"label.unmanaged.instance": "Unmanaged Instance", "label.unmanaged.instance": "Unmanaged Instance",
"label.unmanaged.instances": "Unmanaged Instances", "label.unmanaged.instances": "Unmanaged Instances",
"label.unmanaged.volumes": "Unmanaged Volumes",
"label.untagged": "Untagged", "label.untagged": "Untagged",
"label.up": "Up", "label.up": "Up",
"label.updateinsequence": "Update in sequence", "label.updateinsequence": "Update in sequence",
@ -2483,6 +2492,8 @@
"message.action.unmanage.instance": "Please confirm that you want to unmanage the Instance.", "message.action.unmanage.instance": "Please confirm that you want to unmanage the Instance.",
"message.action.unmanage.instances": "Please confirm that you want to unmanage the Instances.", "message.action.unmanage.instances": "Please confirm that you want to unmanage the Instances.",
"message.action.unmanage.virtualmachine": "Please confirm that you want to unmanage the Instance.", "message.action.unmanage.virtualmachine": "Please confirm that you want to unmanage the Instance.",
"message.action.unmanage.volume": "Please confirm that you want to unmanage the Volume.",
"message.action.unmanage.volumes": "Please confirm that you want to unmanage the Volumes.",
"message.action.vmsnapshot.delete": "Please confirm that you want to delete this Instance Snapshot. <br>Please notice that the Instance will be paused before the Snapshot deletion, and resumed after deletion, if it runs on KVM.", "message.action.vmsnapshot.delete": "Please confirm that you want to delete this Instance Snapshot. <br>Please notice that the Instance will be paused before the Snapshot deletion, and resumed after deletion, if it runs on KVM.",
"message.activate.project": "Are you sure you want to activate this project?", "message.activate.project": "Are you sure you want to activate this project?",
"message.add.egress.rule.failed": "Adding new egress rule failed.", "message.add.egress.rule.failed": "Adding new egress rule failed.",
@ -2712,6 +2723,7 @@
"message.desc.import.ext.kvm.wizard": "Import libvirt domain from External KVM Host not managed by CloudStack", "message.desc.import.ext.kvm.wizard": "Import libvirt domain from External KVM Host not managed by CloudStack",
"message.desc.import.local.kvm.wizard": "Import QCOW2 image from Local Storage of selected KVM Host", "message.desc.import.local.kvm.wizard": "Import QCOW2 image from Local Storage of selected KVM Host",
"message.desc.import.shared.kvm.wizard": "Import QCOW2 image from selected Primary Storage Pool", "message.desc.import.shared.kvm.wizard": "Import QCOW2 image from selected Primary Storage Pool",
"message.desc.import.unmanage.volume": "Please choose a storage pool that you want to import or unmanage volumes. The storage pool should be in Up status. <br>This feature only supports KVM.",
"message.desc.importexportinstancewizard": "By choosing to manage an Instance, CloudStack takes over the orchestration of that Instance. Unmanaging an Instance removes CloudStack ability to manage it. In both cases, the Instance is left running and no changes are done to the VM on the hypervisor.<br><br>For KVM, managing a VM is an experimental feature.", "message.desc.importexportinstancewizard": "By choosing to manage an Instance, CloudStack takes over the orchestration of that Instance. Unmanaging an Instance removes CloudStack ability to manage it. In both cases, the Instance is left running and no changes are done to the VM on the hypervisor.<br><br>For KVM, managing a VM is an experimental feature.",
"message.desc.importmigratefromvmwarewizard": "By selecting an existing or external VMware Datacenter and an instance to import, CloudStack migrates the selected instance from VMware to KVM on a conversion host using virt-v2v and imports it into a KVM cluster", "message.desc.importmigratefromvmwarewizard": "By selecting an existing or external VMware Datacenter and an instance to import, CloudStack migrates the selected instance from VMware to KVM on a conversion host using virt-v2v and imports it into a KVM cluster",
"message.desc.primary.storage": "Each cluster must contain one or more primary storage servers. We will add the first one now. Primary storage contains the disk volumes for all the Instances running on hosts in the cluster. Use any standards-compliant protocol that is supported by the underlying hypervisor.", "message.desc.primary.storage": "Each cluster must contain one or more primary storage servers. We will add the first one now. Primary storage contains the disk volumes for all the Instances running on hosts in the cluster. Use any standards-compliant protocol that is supported by the underlying hypervisor.",
@ -2918,6 +2930,7 @@
"message.host.dedicated": "Host Dedicated", "message.host.dedicated": "Host Dedicated",
"message.host.dedication.released": "Host dedication released.", "message.host.dedication.released": "Host dedication released.",
"message.import.running.instance.warning": "The selected VM is powered-on on the VMware Datacenter. The recommended state to convert a VMware VM into KVM is powered-off after a graceful shutdown of the guest OS.", "message.import.running.instance.warning": "The selected VM is powered-on on the VMware Datacenter. The recommended state to convert a VMware VM into KVM is powered-off after a graceful shutdown of the guest OS.",
"message.import.volume": "Please specify the domain, account or project name. <br>If not set, the volume will be imported for the caller.",
"message.info.cloudian.console": "Cloudian Management Console should open in another window.", "message.info.cloudian.console": "Cloudian Management Console should open in another window.",
"message.installwizard.cloudstack.helptext.website": " * Project website:\t ", "message.installwizard.cloudstack.helptext.website": " * Project website:\t ",
"message.infra.setup.tungsten.description": "This zone must contain a Tungsten-Fabric provider because the isolation method is TF", "message.infra.setup.tungsten.description": "This zone must contain a Tungsten-Fabric provider because the isolation method is TF",
@ -3187,6 +3200,7 @@
"message.success.edit.rule": "Successfully edited rule", "message.success.edit.rule": "Successfully edited rule",
"message.success.enable.saml.auth": "Successfully enabled SAML Authorization", "message.success.enable.saml.auth": "Successfully enabled SAML Authorization",
"message.success.import.instance": "Successfully imported Instance", "message.success.import.instance": "Successfully imported Instance",
"message.success.import.volume": "Successfully imported Volume",
"message.success.migrate.volume": "Successfully migrated volume", "message.success.migrate.volume": "Successfully migrated volume",
"message.success.migrating": "Migration completed successfully for", "message.success.migrating": "Migration completed successfully for",
"message.success.migration": "Migration completed successfully", "message.success.migration": "Migration completed successfully",
@ -3218,6 +3232,7 @@
"message.success.resize.volume": "Successfully resized volume", "message.success.resize.volume": "Successfully resized volume",
"message.success.scale.kubernetes": "Successfully scaled Kubernetes cluster", "message.success.scale.kubernetes": "Successfully scaled Kubernetes cluster",
"message.success.unmanage.instance": "Successfully unmanaged Instance", "message.success.unmanage.instance": "Successfully unmanaged Instance",
"message.success.unmanage.volume": "Successfully unmanaged Volume",
"message.success.update.bucket": "Successfully updated bucket", "message.success.update.bucket": "Successfully updated bucket",
"message.success.update.condition": "Successfully updated condition", "message.success.update.condition": "Successfully updated condition",
"message.success.update.ipaddress": "Successfully updated IP address", "message.success.update.ipaddress": "Successfully updated IP address",
@ -3330,6 +3345,8 @@
"message.volume.state.uploadinprogress": "Volume upload is in progress.", "message.volume.state.uploadinprogress": "Volume upload is in progress.",
"message.volume.state.uploadop": "The volume upload operation is in progress and will be on secondary storage shortly.", "message.volume.state.uploadop": "The volume upload operation is in progress and will be on secondary storage shortly.",
"message.volume.state.primary.storage.suitability": "The suitability of a primary storage for a volume depends on the disk offering of the volume and on the virtual machine allocation (if the volume is attached to a virtual machine).", "message.volume.state.primary.storage.suitability": "The suitability of a primary storage for a volume depends on the disk offering of the volume and on the virtual machine allocation (if the volume is attached to a virtual machine).",
"message.volumes.managed": "Volumes controlled by CloudStack.",
"message.volumes.unmanaged": "Volumes not controlled by CloudStack.",
"message.vr.alert.upon.network.offering.creation.l2": "As virtual routers are not created for L2 Networks, the compute offering will not be used.", "message.vr.alert.upon.network.offering.creation.l2": "As virtual routers are not created for L2 Networks, the compute offering will not be used.",
"message.vr.alert.upon.network.offering.creation.others": "As none of the obligatory services for creating a virtual router (VPN, DHCP, DNS, Firewall, LB, UserData, SourceNat, StaticNat, PortForwarding) are enabled, the virtual router will not be created and the compute offering will not be used.", "message.vr.alert.upon.network.offering.creation.others": "As none of the obligatory services for creating a virtual router (VPN, DHCP, DNS, Firewall, LB, UserData, SourceNat, StaticNat, PortForwarding) are enabled, the virtual router will not be created and the compute offering will not be used.",
"message.warn.filetype": "jpg, jpeg, png, bmp and svg are the only supported image formats.", "message.warn.filetype": "jpg, jpeg, png, bmp and svg are the only supported image formats.",

View File

@ -68,6 +68,15 @@ export default {
resourceType: 'UserVm', resourceType: 'UserVm',
permission: ['listInfrastructure', 'listUnmanagedInstances'], permission: ['listInfrastructure', 'listUnmanagedInstances'],
component: () => import('@/views/tools/ManageInstances.vue') component: () => import('@/views/tools/ManageInstances.vue')
},
{
name: 'managevolumes',
title: 'label.action.import.unmanage.volumes',
icon: 'interaction-outlined',
docHelp: 'adminguide/virtual_machines.html#importing-and-unmanaging-volume',
resourceType: 'UserVm',
permission: ['listInfrastructure', 'listVolumesForImport'],
component: () => import('@/views/tools/ManageVolumes.vue')
} }
] ]
} }

File diff suppressed because it is too large Load Diff