From b363fd49f70ac2092ebe6226a72a3d911dc99e1f Mon Sep 17 00:00:00 2001 From: dahn Date: Fri, 25 Jan 2019 13:05:13 +0100 Subject: [PATCH] Vmware offline migration (#2848) * - Offline VM and Volume migration on Vmware hypervisor hosts - Also add VM disk consolidation call on successful VM migrations * Fix indentation of marvin test file and reformat against PEP8 * * Fix few comment typos * Refactor debug messages to use String.format() when debug log level is enabled. * Send list of commands returned by hypervisor Guru instead of explicitly selecting the first one * Fix unhandled NPE during VM migration * Revert back to distinct event descriptions for VM to host or storage pool migration * Reformat test_primary_storage file against PEP-8 and Remove unused imports * Revert back the deprecation messages in the custom StringUtils class to favour the use of the ApacheUtils --- .../com/cloud/hypervisor/HypervisorGuru.java | 12 +- .../com/cloud/storage/VolumeApiService.java | 10 + .../apache/cloudstack/api/BaseAsyncCmd.java | 1 + .../api/command/admin/vm/MigrateVMCmd.java | 71 +- .../MigrateVirtualMachineWithVolumeCmd.java | 11 +- .../command/user/volume/MigrateVolumeCmd.java | 12 + .../api/command/test/UpdateRoleCmdTest.java | 0 .../agent/api/MigrateVmToPoolAnswer.java | 43 + .../agent/api/MigrateVmToPoolCommand.java | 70 ++ .../cloud/agent/api/UnregisterVMCommand.java | 7 +- .../api/storage/MigrateVolumeCommand.java | 18 +- .../api/storage/DataMotionStrategy.java | 17 + .../com/cloud/storage/StorageManager.java | 9 +- .../cloud/vm/VirtualMachineManagerImpl.java | 346 ++++++--- .../orchestration/VolumeOrchestrator.java | 27 + .../vm/VirtualMachineManagerImplTest.java | 52 +- .../storage/motion/DataMotionServiceImpl.java | 34 +- .../storage/volume/VolumeServiceImpl.java | 56 +- .../framework/jobs/AsyncJobManager.java | 2 + .../framework/jobs/dao/AsyncJobDao.java | 2 + .../framework/jobs/dao/AsyncJobDaoImpl.java | 19 + .../jobs/impl/AsyncJobManagerImpl.java | 5 + .../com/cloud/hypervisor/guru/VMwareGuru.java | 40 +- .../vmware/resource/VmwareResource.java | 340 +++++++- .../motion/VmwareStorageMotionStrategy.java | 211 ++++- .../VmwareStorageMotionStrategyTest.java | 66 +- .../java/com/cloud/api/ApiDispatcher.java | 10 +- .../main/java/com/cloud/api/ApiServer.java | 125 +-- .../java/com/cloud/configuration/Config.java | 10 - .../deploy/DeploymentPlanningManagerImpl.java | 3 +- .../cloud/hypervisor/HypervisorGuruBase.java | 5 + .../cloud/server/ManagementServerImpl.java | 31 +- .../com/cloud/storage/StorageManagerImpl.java | 7 +- .../cloud/storage/VolumeApiServiceImpl.java | 32 +- .../java/com/cloud/vm/UserVmManagerImpl.java | 39 +- .../storage/VolumeApiServiceImplTest.java | 14 +- .../integration/smoke/test_primary_storage.py | 363 +++++---- test/integration/smoke/test_vm_life_cycle.py | 732 +++++++++++------- .../java/com/cloud/utils/StringUtils.java | 8 + .../vmware/mo/VirtualMachineMO.java | 17 + 40 files changed, 2119 insertions(+), 758 deletions(-) rename api/{test => src/test/java}/org/apache/cloudstack/api/command/test/UpdateRoleCmdTest.java (100%) create mode 100644 core/src/main/java/com/cloud/agent/api/MigrateVmToPoolAnswer.java create mode 100644 core/src/main/java/com/cloud/agent/api/MigrateVmToPoolCommand.java diff --git a/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java b/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java index 45e19ee2674..da2c7d04eb3 100644 --- a/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java +++ b/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java @@ -19,6 +19,7 @@ package com.cloud.hypervisor; import java.util.List; import java.util.Map; +import com.cloud.storage.StoragePool; import org.apache.cloudstack.framework.config.ConfigKey; import com.cloud.agent.api.Command; @@ -32,7 +33,7 @@ import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; public interface HypervisorGuru extends Adapter { - static final ConfigKey VmwareFullClone = new ConfigKey("Advanced", Boolean.class, "vmware.create.full.clone", "true", + ConfigKey VmwareFullClone = new ConfigKey("Advanced", Boolean.class, "vmware.create.full.clone", "true", "If set to true, creates guest VMs as full clones on ESX", false); HypervisorType getHypervisorType(); @@ -84,4 +85,13 @@ public interface HypervisorGuru extends Adapter { List finalizeExpungeVolumes(VirtualMachine vm); Map getClusterSettings(long vmId); + + /** + * Will generate commands to migrate a vm to a pool. For now this will only work for stopped VMs on Vmware. + * + * @param vm the stopped vm to migrate + * @param destination the primary storage pool to migrate to + * @return a list of commands to perform for a successful migration + */ + List finalizeMigrate(VirtualMachine vm, StoragePool destination); } diff --git a/api/src/main/java/com/cloud/storage/VolumeApiService.java b/api/src/main/java/com/cloud/storage/VolumeApiService.java index 91b0bc0712f..7b38a6b1af1 100644 --- a/api/src/main/java/com/cloud/storage/VolumeApiService.java +++ b/api/src/main/java/com/cloud/storage/VolumeApiService.java @@ -29,11 +29,21 @@ import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd; import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; import org.apache.cloudstack.api.response.GetUploadParamsResponse; +import org.apache.cloudstack.framework.config.ConfigKey; import com.cloud.exception.ResourceAllocationException; import com.cloud.user.Account; public interface VolumeApiService { + + ConfigKey ConcurrentMigrationsThresholdPerDatastore = new ConfigKey("Advanced" + , Long.class + , "concurrent.migrations.per.target.datastore" + , "0" + , "Limits number of migrations that can be handled per datastore concurrently; default is 0 - unlimited" + , true // not sure if this is to be dynamic + , ConfigKey.Scope.Global); + /** * Creates the database object for a volume based on the given criteria * diff --git a/api/src/main/java/org/apache/cloudstack/api/BaseAsyncCmd.java b/api/src/main/java/org/apache/cloudstack/api/BaseAsyncCmd.java index 8963415d8e3..1c3822c1057 100644 --- a/api/src/main/java/org/apache/cloudstack/api/BaseAsyncCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/BaseAsyncCmd.java @@ -27,6 +27,7 @@ public abstract class BaseAsyncCmd extends BaseCmd { public static final String ipAddressSyncObject = "ipaddress"; public static final String networkSyncObject = "network"; public static final String vpcSyncObject = "vpc"; + public static final String migrationSyncObject = "migration"; public static final String snapshotHostSyncObject = "snapshothost"; public static final String gslbSyncObject = "globalserverloadbalancer"; private static final Logger s_logger = Logger.getLogger(BaseAsyncCmd.class.getName()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java index b5683e3f144..9f73ae586a0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java @@ -43,10 +43,10 @@ import com.cloud.uservm.UserVm; import com.cloud.vm.VirtualMachine; @APICommand(name = "migrateVirtualMachine", - description = "Attempts Migration of a VM to a different host or Root volume of the vm to a different storage pool", + description = "Attempts Migration of a VM to a different host or Root volume of the vm to a different storage pool", responseObject = UserVmResponse.class, entityType = {VirtualMachine.class}, - requestHasSensitiveInfo = false, - responseHasSensitiveInfo = true) + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = true) public class MigrateVMCmd extends BaseAsyncCmd { public static final Logger s_logger = Logger.getLogger(MigrateVMCmd.class.getName()); @@ -57,24 +57,24 @@ public class MigrateVMCmd extends BaseAsyncCmd { ///////////////////////////////////////////////////// @Parameter(name = ApiConstants.HOST_ID, - type = CommandType.UUID, - entityType = HostResponse.class, - required = false, - description = "Destination Host ID to migrate VM to. Required for live migrating a VM from host to host") + type = CommandType.UUID, + entityType = HostResponse.class, + required = false, + description = "Destination Host ID to migrate VM to. Required for live migrating a VM from host to host") private Long hostId; @Parameter(name = ApiConstants.VIRTUAL_MACHINE_ID, - type = CommandType.UUID, - entityType = UserVmResponse.class, - required = true, - description = "the ID of the virtual machine") + type = CommandType.UUID, + entityType = UserVmResponse.class, + required = true, + description = "the ID of the virtual machine") private Long virtualMachineId; @Parameter(name = ApiConstants.STORAGE_ID, - type = CommandType.UUID, - entityType = StoragePoolResponse.class, - required = false, - description = "Destination storage pool ID to migrate VM volumes to. Required for migrating the root disk volume") + type = CommandType.UUID, + entityType = StoragePoolResponse.class, + required = false, + description = "Destination storage pool ID to migrate VM volumes to. Required for migrating the root disk volume") private Long storageId; ///////////////////////////////////////////////////// @@ -119,13 +119,15 @@ public class MigrateVMCmd extends BaseAsyncCmd { @Override public String getEventDescription() { + String eventDescription; if (getHostId() != null) { - return "Attempting to migrate VM Id: " + this._uuidMgr.getUuid(VirtualMachine.class, getVirtualMachineId()) + " to host Id: " + this._uuidMgr.getUuid(Host.class, getHostId()); + eventDescription = String.format("Attempting to migrate VM id: %s to host Id: %s", getVirtualMachineId(), getHostId()); } else if (getStoragePoolId() != null) { - return "Attempting to migrate VM Id: " + this._uuidMgr.getUuid(VirtualMachine.class, getVirtualMachineId()) + " to storage pool Id: " + this._uuidMgr.getUuid(StoragePool.class, getStoragePoolId()); + eventDescription = String.format("Attempting to migrate VM id: %s to storage pool Id: %s", getVirtualMachineId(), getStoragePoolId()); } else { - return "Attempting to migrate VM Id: " + this._uuidMgr.getUuid(VirtualMachine.class, getVirtualMachineId()); + eventDescription = String.format("Attempting to migrate VM id: %s", getVirtualMachineId()); } + return eventDescription; } @Override @@ -152,16 +154,17 @@ public class MigrateVMCmd extends BaseAsyncCmd { if (destinationHost.getType() != Host.Type.Routing) { throw new InvalidParameterValueException("The specified host(" + destinationHost.getName() + ") is not suitable to migrate the VM, please specify another one"); } - CallContext.current().setEventDetails("VM Id: " + this._uuidMgr.getUuid(VirtualMachine.class, getVirtualMachineId()) + ((getHostId() != null) ? " to host Id: " + this._uuidMgr.getUuid(Host.class, getHostId()) : "" )); + CallContext.current().setEventDetails("VM Id: " + getVirtualMachineId() + " to host Id: " + getHostId()); } + // OfflineMigration performed when this parameter is specified StoragePool destStoragePool = null; if (getStoragePoolId() != null) { destStoragePool = _storageService.getStoragePool(getStoragePoolId()); if (destStoragePool == null) { throw new InvalidParameterValueException("Unable to find the storage pool to migrate the VM"); } - CallContext.current().setEventDetails("VM Id: " + this._uuidMgr.getUuid(VirtualMachine.class, getVirtualMachineId()) + " to storage pool Id: " + this._uuidMgr.getUuid(StoragePool.class, getStoragePoolId())); + CallContext.current().setEventDetails("VM Id: " + getVirtualMachineId() + " to storage pool Id: " + getStoragePoolId()); } try { @@ -172,7 +175,7 @@ public class MigrateVMCmd extends BaseAsyncCmd { migratedVm = _userVmService.vmStorageMigration(getVirtualMachineId(), destStoragePool); } if (migratedVm != null) { - UserVmResponse response = _responseGenerator.createUserVmResponse(ResponseView.Full, "virtualmachine", (UserVm)migratedVm).get(0); + UserVmResponse response = _responseGenerator.createUserVmResponse(ResponseView.Full, "virtualmachine", (UserVm) migratedVm).get(0); response.setResponseName(getCommandName()); setResponseObject(response); } else { @@ -181,15 +184,27 @@ public class MigrateVMCmd extends BaseAsyncCmd { } catch (ResourceUnavailableException ex) { s_logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); - } catch (ConcurrentOperationException e) { - s_logger.warn("Exception: ", e); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); - } catch (ManagementServerException e) { - s_logger.warn("Exception: ", e); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); - } catch (VirtualMachineMigrationException e) { + } catch (VirtualMachineMigrationException | ConcurrentOperationException | ManagementServerException e) { s_logger.warn("Exception: ", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } + + @Override + public String getSyncObjType() { + return (getSyncObjId() != null) ? BaseAsyncCmd.migrationSyncObject : null; + } + + @Override + public Long getSyncObjId() { + if (getStoragePoolId() != null) { + return getStoragePoolId(); + } + // OfflineVmwareMigrations: undocumented feature; + // OfflineVmwareMigrations: on implementing a maximum queue size for per storage migrations it seems counter intuitive for the user to not enforce it for hosts as well. + if (getHostId() != null) { + return getHostId(); + } + return null; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java index f9d01f6afc8..65d71cc1300 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java @@ -46,7 +46,7 @@ import com.cloud.vm.VirtualMachine; @APICommand(name = "migrateVirtualMachineWithVolume", description = "Attempts Migration of a VM with its volumes to a different host", - responseObject = UserVmResponse.class, entityType = {VirtualMachine.class}, + responseObject = UserVmResponse.class, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class MigrateVirtualMachineWithVolumeCmd extends BaseAsyncCmd { @@ -147,6 +147,7 @@ public class MigrateVirtualMachineWithVolumeCmd extends BaseAsyncCmd { } Host destinationHost = _resourceService.getHost(getHostId()); + // OfflineVmwareMigration: destination host would have to not be a required parameter for stopped VMs if (destinationHost == null) { throw new InvalidParameterValueException("Unable to find the host to migrate the VM, host id =" + getHostId()); } @@ -163,13 +164,7 @@ public class MigrateVirtualMachineWithVolumeCmd extends BaseAsyncCmd { } catch (ResourceUnavailableException ex) { s_logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); - } catch (ConcurrentOperationException e) { - s_logger.warn("Exception: ", e); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); - } catch (ManagementServerException e) { - s_logger.warn("Exception: ", e); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); - } catch (VirtualMachineMigrationException e) { + } catch (ConcurrentOperationException | ManagementServerException | VirtualMachineMigrationException e) { s_logger.warn("Exception: ", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java index 0e1f7e08a17..f5d5e8c86ee 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java @@ -120,4 +120,16 @@ public class MigrateVolumeCmd extends BaseAsyncCmd { } } + @Override + public String getSyncObjType() { + return (getSyncObjId() != null) ? BaseAsyncCmd.migrationSyncObject : null; + } + + @Override + public Long getSyncObjId() { + if (getStoragePoolId() != null) { + return getStoragePoolId(); + } + return null; + } } diff --git a/api/test/org/apache/cloudstack/api/command/test/UpdateRoleCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/test/UpdateRoleCmdTest.java similarity index 100% rename from api/test/org/apache/cloudstack/api/command/test/UpdateRoleCmdTest.java rename to api/src/test/java/org/apache/cloudstack/api/command/test/UpdateRoleCmdTest.java diff --git a/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolAnswer.java b/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolAnswer.java new file mode 100644 index 00000000000..bc9ae6fd082 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolAnswer.java @@ -0,0 +1,43 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import org.apache.cloudstack.storage.to.VolumeObjectTO; + +import java.util.List; + +public class MigrateVmToPoolAnswer extends Answer { + + List volumeTos; + + public MigrateVmToPoolAnswer(MigrateVmToPoolCommand cmd, Exception ex) { + super(cmd, ex); + volumeTos = null; + } + + public MigrateVmToPoolAnswer(MigrateVmToPoolCommand cmd, List volumeTos) { + super(cmd, true, null); + this.volumeTos = volumeTos; + } + + public List getVolumeTos() { + return volumeTos; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolCommand.java b/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolCommand.java new file mode 100644 index 00000000000..91a911d7c18 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolCommand.java @@ -0,0 +1,70 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +package com.cloud.agent.api; + +import com.cloud.agent.api.to.VolumeTO; + +import java.util.Collection; + +/** + * used to tell the agent to migrate a vm to a different primary storage pool. + * It is for now only implemented on Vmware and is supposed to work irrespective of whether the VM is started or not. + * + */ +public class MigrateVmToPoolCommand extends Command { + private Collection volumes; + private String vmName; + private String destinationPool; + private boolean executeInSequence = false; + + protected MigrateVmToPoolCommand() { + } + + /** + * + * @param vmName the name of the VM to migrate + * @param volumes used to supply feedback on vmware generated names + * @param destinationPool the primary storage pool to migrate the VM to + * @param executeInSequence + */ + public MigrateVmToPoolCommand(String vmName, Collection volumes, String destinationPool, boolean executeInSequence) { + this.vmName = vmName; + this.volumes = volumes; + this.destinationPool = destinationPool; + this.executeInSequence = executeInSequence; + } + + public Collection getVolumes() { + return volumes; + } + + public String getDestinationPool() { + return destinationPool; + } + + public String getVmName() { + return vmName; + } + + @Override + public boolean executeInSequence() { + return executeInSequence; + } + +} diff --git a/core/src/main/java/com/cloud/agent/api/UnregisterVMCommand.java b/core/src/main/java/com/cloud/agent/api/UnregisterVMCommand.java index 16eb4bac0e8..4c5f138a63c 100644 --- a/core/src/main/java/com/cloud/agent/api/UnregisterVMCommand.java +++ b/core/src/main/java/com/cloud/agent/api/UnregisterVMCommand.java @@ -22,14 +22,19 @@ package com.cloud.agent.api; public class UnregisterVMCommand extends Command { String vmName; boolean cleanupVmFiles = false; + boolean executeInSequence; public UnregisterVMCommand(String vmName) { + this(vmName, false); + } + public UnregisterVMCommand(String vmName, boolean executeInSequence) { this.vmName = vmName; + this.executeInSequence = executeInSequence; } @Override public boolean executeInSequence() { - return false; + return executeInSequence; } public String getVmName() { diff --git a/core/src/main/java/com/cloud/agent/api/storage/MigrateVolumeCommand.java b/core/src/main/java/com/cloud/agent/api/storage/MigrateVolumeCommand.java index e5838451dd0..9902a86fb89 100644 --- a/core/src/main/java/com/cloud/agent/api/storage/MigrateVolumeCommand.java +++ b/core/src/main/java/com/cloud/agent/api/storage/MigrateVolumeCommand.java @@ -31,6 +31,7 @@ public class MigrateVolumeCommand extends Command { long volumeId; String volumePath; StorageFilerTO pool; + StorageFilerTO sourcePool; String attachedVmName; Volume.Type volumeType; @@ -47,14 +48,17 @@ public class MigrateVolumeCommand extends Command { } public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool pool, String attachedVmName, Volume.Type volumeType, int timeout) { - this.volumeId = volumeId; - this.volumePath = volumePath; - this.pool = new StorageFilerTO(pool); + this(volumeId,volumePath,pool,timeout); this.attachedVmName = attachedVmName; this.volumeType = volumeType; this.setWait(timeout); } + public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool sourcePool, StoragePool targetPool) { + this(volumeId,volumePath,targetPool, null, Volume.Type.UNKNOWN, -1); + this.sourcePool = new StorageFilerTO(sourcePool); + } + public MigrateVolumeCommand(DataTO srcData, DataTO destData, Map srcDetails, Map destDetails, int timeout) { this.srcData = srcData; this.destData = destData; @@ -81,6 +85,14 @@ public class MigrateVolumeCommand extends Command { return pool; } + public StorageFilerTO getSourcePool() { + return sourcePool; + } + + public StorageFilerTO getTargetPool() { + return pool; + } + public String getAttachedVmName() { return attachedVmName; } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataMotionStrategy.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataMotionStrategy.java index 5d552c4013f..2afece483c6 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataMotionStrategy.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataMotionStrategy.java @@ -25,11 +25,28 @@ import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.host.Host; +/** + * Interface to query how to move data around and to commision the moving + */ public interface DataMotionStrategy { + /** + * Reports whether this instance can do a move from source to destination + * @param srcData object to move + * @param destData location to move it to + * @return the expertise level with which this instance knows how to handle the move + */ StrategyPriority canHandle(DataObject srcData, DataObject destData); StrategyPriority canHandle(Map volumeMap, Host srcHost, Host destHost); + /** + * Copy the source volume to its destination (on a host if not null) + * + * @param srcData volume to move + * @param destData volume description as intended after the move + * @param destHost if not null destData should be reachable from here + * @param callback where to report completion or failure to + */ void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback); void copyAsync(Map volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost, AsyncCompletionCallback callback); diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index a8a566ad755..c9c24d8ad73 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -106,7 +106,14 @@ public interface StorageManager extends StorageService { * @param poolId * @return comma separated list of tags */ - public String getStoragePoolTags(long poolId); + String getStoragePoolTags(long poolId); + + /** + * Returns a list of Strings with tags for the specified storage pool + * @param poolId + * @return comma separated list of tags + */ + List getStoragePoolTagList(long poolId); Answer sendToPool(long poolId, Command cmd) throws StorageUnavailableException; diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 7c4a2ef451e..7d218e226d5 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -41,6 +41,9 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; +import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd; +import org.apache.cloudstack.api.command.admin.volume.MigrateVolumeCmdByAdmin; +import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd; import org.apache.cloudstack.ca.CAManager; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; @@ -86,6 +89,7 @@ import com.cloud.agent.api.ClusterVMMetaDataSyncAnswer; import com.cloud.agent.api.ClusterVMMetaDataSyncCommand; import com.cloud.agent.api.Command; import com.cloud.agent.api.MigrateCommand; +import com.cloud.agent.api.MigrateVmToPoolAnswer; import com.cloud.agent.api.ModifyTargetsCommand; import com.cloud.agent.api.PingRoutingCommand; import com.cloud.agent.api.PlugNicAnswer; @@ -138,10 +142,8 @@ import com.cloud.exception.AffinityConflictException; import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.ConnectionException; -import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InsufficientServerCapacityException; -import com.cloud.exception.InsufficientVirtualNetworkCapacityException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.ResourceUnavailableException; @@ -171,10 +173,12 @@ import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.ScopeType; import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; import com.cloud.storage.Volume.Type; +import com.cloud.storage.VolumeApiService; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.GuestOSCategoryDao; @@ -314,6 +318,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private VmWorkJobDao _workJobDao; @Inject private AsyncJobManager _jobMgr; + @Inject + private StorageManager storageMgr; VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this); @@ -1820,14 +1826,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac protected boolean stateTransitTo(final VMInstanceVO vm, final VirtualMachine.Event e, final Long hostId, final String reservationId) throws NoTransitionException { // if there are active vm snapshots task, state change is not allowed - // Disable this hacking thing, VM snapshot task need to be managed by its orchestartion flow istelf instead of - // hacking it here at general VM manager - /* - if (_vmSnapshotMgr.hasActiveVMSnapshotTasks(vm.getId())) { - s_logger.error("State transit with event: " + e + " failed due to: " + vm.getInstanceName() + " has active VM snapshots tasks"); - return false; - } - */ vm.setReservationId(reservationId); return _stateMachine.transitTo(vm, e, new Pair(vm.getHostId(), hostId), _vmDao); } @@ -1836,15 +1834,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac public boolean stateTransitTo(final VirtualMachine vm1, final VirtualMachine.Event e, final Long hostId) throws NoTransitionException { final VMInstanceVO vm = (VMInstanceVO)vm1; - /* - * Remove the hacking logic here. - // if there are active vm snapshots task, state change is not allowed - if (_vmSnapshotMgr.hasActiveVMSnapshotTasks(vm.getId())) { - s_logger.error("State transit with event: " + e + " failed due to: " + vm.getInstanceName() + " has active VM snapshots tasks"); - return false; - } - */ - final State oldState = vm.getState(); if (oldState == State.Starting) { if (e == Event.OperationSucceeded) { @@ -1988,89 +1977,243 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private void orchestrateStorageMigration(final String vmUuid, final StoragePool destPool) { final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); + preStorageMigrationStateCheck(destPool, vm); + + try { + if(s_logger.isDebugEnabled()) { + s_logger.debug(String.format("Offline migration of %s vm %s with volumes", + vm.getHypervisorType().toString(), + vm.getInstanceName())); + } + + migrateThroughHypervisorOrStorage(destPool, vm); + + } catch (ConcurrentOperationException + | InsufficientCapacityException // possibly InsufficientVirtualNetworkCapacityException or InsufficientAddressCapacityException + | StorageUnavailableException e) { + String msg = String.format("Failed to migrate VM: %s", vmUuid); + s_logger.debug(msg); + throw new CloudRuntimeException(msg, e); + } finally { + try { + stateTransitTo(vm, Event.AgentReportStopped, null); + } catch (final NoTransitionException e) { + String anotherMEssage = String.format("failed to change vm state of VM: %s", vmUuid); + s_logger.debug(anotherMEssage); + throw new CloudRuntimeException(anotherMEssage, e); + } + } + } + + private Answer[] attemptHypervisorMigration(StoragePool destPool, VMInstanceVO vm) { + final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType()); + // OfflineVmwareMigration: in case of vmware call vcenter to do it for us. + // OfflineVmwareMigration: should we check the proximity of source and destination + // OfflineVmwareMigration: if we are in the same cluster/datacentre/pool or whatever? + // OfflineVmwareMigration: we are checking on success to optionally delete an old vm if we are not + List commandsToSend = hvGuru.finalizeMigrate(vm, destPool); + + Long hostId = vm.getHostId(); + // OfflineVmwareMigration: probably this is null when vm is stopped + if(hostId == null) { + hostId = vm.getLastHostId(); + if (s_logger.isDebugEnabled()) { + s_logger.debug(String.format("host id is null, using last host id %d", hostId) ); + } + } + + if(CollectionUtils.isNotEmpty(commandsToSend)) { + Commands commandsContainer = new Commands(Command.OnError.Stop); + commandsContainer.addCommands(commandsToSend); + try { + // OfflineVmwareMigration: change to the call back variety? + // OfflineVmwareMigration: getting a Long seq to be filled with _agentMgr.send(hostId, commandsContainer, this) + return _agentMgr.send(hostId, commandsContainer); + } catch (AgentUnavailableException | OperationTimedoutException e) { + throw new CloudRuntimeException(String.format("Failed to migrate VM: %s", vm.getUuid()),e); + } + } + return null; + } + + private void afterHypervisorMigrationCleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException { + boolean isDebugEnabled = s_logger.isDebugEnabled(); + if(isDebugEnabled) { + String msg = String.format("cleaning up after hypervisor pool migration volumes for VM %s(%s) to pool %s(%s)", vm.getInstanceName(), vm.getUuid(), destPool.getName(), destPool.getUuid()); + s_logger.debug(msg); + } + setDestinationPoolAndReallocateNetwork(destPool, vm); + // OfflineVmwareMigration: don't set this to null or have another way to address the command; twice migrating will lead to an NPE + Long destPodId = destPool.getPodId(); + Long vmPodId = vm.getPodIdToDeployIn(); + if (destPodId == null || ! destPodId.equals(vmPodId)) { + if(isDebugEnabled) { + String msg = String.format("resetting lasHost for VM %s(%s) as pod (%s) is no good.", vm.getInstanceName(), vm.getUuid(), destPodId); + s_logger.debug(msg); + } + + vm.setLastHostId(null); + vm.setPodIdToDeployIn(destPodId); + // OfflineVmwareMigration: a consecutive migration will fail probably (no host not pod) + }// else keep last host set for this vm + markVolumesInPool(vm,destPool, hypervisorMigrationResults); + // OfflineVmwareMigration: deal with answers, if (hypervisorMigrationResults.length > 0) + // OfflineVmwareMigration: iterate over the volumes for data updates + } + + private void markVolumesInPool(VMInstanceVO vm, StoragePool destPool, Answer[] hypervisorMigrationResults) { + MigrateVmToPoolAnswer relevantAnswer = null; + for (Answer answer : hypervisorMigrationResults) { + if (s_logger.isTraceEnabled()) { + s_logger.trace(String.format("received an %s: %s", answer.getClass().getSimpleName(), answer)); + } + if (answer instanceof MigrateVmToPoolAnswer) { + relevantAnswer = (MigrateVmToPoolAnswer) answer; + } + } + if (relevantAnswer == null) { + throw new CloudRuntimeException("no relevant migration results found"); + } + List volumes = _volsDao.findUsableVolumesForInstance(vm.getId()); + if(s_logger.isDebugEnabled()) { + String msg = String.format("found %d volumes for VM %s(uuid:%s, id:%d)", volumes.size(), vm.getInstanceName(), vm.getUuid(), vm.getId()); + s_logger.debug(msg); + } + for (VolumeObjectTO result : relevantAnswer.getVolumeTos() ) { + if(s_logger.isDebugEnabled()) { + s_logger.debug(String.format("updating volume (%d) with path '%s' on pool '%d'", result.getId(), result.getPath(), destPool.getId())); + } + VolumeVO volume = _volsDao.findById(result.getId()); + volume.setPath(result.getPath()); + volume.setPoolId(destPool.getId()); + _volsDao.update(volume.getId(), volume); + } + } + + private void migrateThroughHypervisorOrStorage(StoragePool destPool, VMInstanceVO vm) throws StorageUnavailableException, InsufficientCapacityException { + final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); + final Long srchostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId(); + final HostVO srcHost = _hostDao.findById(srchostId); + final Long srcClusterId = srcHost.getClusterId(); + Answer[] hypervisorMigrationResults = attemptHypervisorMigration(destPool, vm); + boolean migrationResult = false; + if (hypervisorMigrationResults == null) { + // OfflineVmwareMigration: if the HypervisorGuru can't do it, let the volume manager take care of it. + migrationResult = volumeMgr.storageMigration(profile, destPool); + if (migrationResult) { + afterStorageMigrationCleanup(destPool, vm, srcHost, srcClusterId); + } else { + s_logger.debug("Storage migration failed"); + } + } else { + afterHypervisorMigrationCleanup(destPool, vm, srcHost, srcClusterId, hypervisorMigrationResults); + } + } + + private void preStorageMigrationStateCheck(StoragePool destPool, VMInstanceVO vm) { if (destPool == null) { throw new CloudRuntimeException("Unable to migrate vm: missing destination storage pool"); } + checkDestinationForTags(destPool, vm); try { - stateTransitTo(vm, VirtualMachine.Event.StorageMigrationRequested, null); + stateTransitTo(vm, Event.StorageMigrationRequested, null); } catch (final NoTransitionException e) { - s_logger.debug("Unable to migrate vm: " + e.toString()); - throw new CloudRuntimeException("Unable to migrate vm: " + e.toString()); + String msg = String.format("Unable to migrate vm: %s", vm.getUuid()); + s_logger.debug(msg); + throw new CloudRuntimeException(msg, e); } + } - final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); - boolean migrationResult = false; + private void checkDestinationForTags(StoragePool destPool, VMInstanceVO vm) { + List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); + // OfflineVmwareMigration: iterate over volumes + // OfflineVmwareMigration: get disk offering + List storageTags = storageMgr.getStoragePoolTagList(destPool.getId()); + for(Volume vol : vols) { + DiskOfferingVO diskOffering = _diskOfferingDao.findById(vol.getDiskOfferingId()); + List volumeTags = StringUtils.csvTagsToList(diskOffering.getTags()); + if(! matches(volumeTags, storageTags)) { + String msg = String.format("destination pool '%s' with tags '%s', does not support the volume diskoffering for volume '%s' (tags: '%s') ", + destPool.getName(), + StringUtils.listToCsvTags(storageTags), + vol.getName(), + StringUtils.listToCsvTags(volumeTags) + ); + throw new CloudRuntimeException(msg); + } + } + } + + static boolean matches(List volumeTags, List storagePoolTags) { + // OfflineVmwareMigration: commons collections 4 allows for Collections.containsAll(volumeTags,storagePoolTags); + boolean result = true; + if (volumeTags != null) { + for (String tag : volumeTags) { + // there is a volume tags so + if (storagePoolTags == null || !storagePoolTags.contains(tag)) { + result = false; + break; + } + } + } + return result; + } + + + private void afterStorageMigrationCleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) throws InsufficientCapacityException { + setDestinationPoolAndReallocateNetwork(destPool, vm); + + //when start the vm next time, don;'t look at last_host_id, only choose the host based on volume/storage pool + vm.setLastHostId(null); + vm.setPodIdToDeployIn(destPool.getPodId()); + + // If VM was cold migrated between clusters belonging to two different VMware DCs, + // unregister the VM from the source host and cleanup the associated VM files. + if (vm.getHypervisorType().equals(HypervisorType.VMware)) { + afterStorageMigrationVmwareVMcleanup(destPool, vm, srcHost, srcClusterId); + } + } + + private void setDestinationPoolAndReallocateNetwork(StoragePool destPool, VMInstanceVO vm) throws InsufficientCapacityException { + //if the vm is migrated to different pod in basic mode, need to reallocate ip + + if (destPool.getPodId() != null && !destPool.getPodId().equals(vm.getPodIdToDeployIn())) { + if (s_logger.isDebugEnabled()) { + String msg = String.format("as the pod for vm %s has changed we are reallocating its network", vm.getInstanceName()); + s_logger.debug(msg); + } + final DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), destPool.getPodId(), null, null, null, null); + final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, null, null, null, null); + _networkMgr.reallocate(vmProfile, plan); + } + } + + private void afterStorageMigrationVmwareVMcleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) { + // OfflineVmwareMigration: this should only happen on storage migration, else the guru would already have issued the command + final Long destClusterId = destPool.getClusterId(); + if (srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId)) { + final String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId); + final String destDcName = _clusterDetailsDao.getVmwareDcName(destClusterId); + if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) { + removeStaleVmFromSource(vm, srcHost); + } + } + } + + // OfflineVmwareMigration: on port forward refator this to be done in two + // OfflineVmwareMigration: command creation in the guru.migrat method + // OfflineVmwareMigration: sending up in the attemptHypevisorMigration with execute in sequence (responsibility of the guru) + private void removeStaleVmFromSource(VMInstanceVO vm, HostVO srcHost) { + s_logger.debug("Since VM's storage was successfully migrated across VMware Datacenters, unregistering VM: " + vm.getInstanceName() + + " from source host: " + srcHost.getId()); + final UnregisterVMCommand uvc = new UnregisterVMCommand(vm.getInstanceName()); + uvc.setCleanupVmFiles(true); try { - migrationResult = volumeMgr.storageMigration(profile, destPool); - - if (migrationResult) { - //if the vm is migrated to different pod in basic mode, need to reallocate ip - - if (destPool.getPodId() != null && !destPool.getPodId().equals(vm.getPodIdToDeployIn())) { - final DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), destPool.getPodId(), null, null, null, null); - final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, null, null, null, null); - _networkMgr.reallocate(vmProfile, plan); - } - - //when start the vm next time, don;'t look at last_host_id, only choose the host based on volume/storage pool - vm.setLastHostId(null); - vm.setPodIdToDeployIn(destPool.getPodId()); - - // If VM was cold migrated between clusters belonging to two different VMware DCs, - // unregister the VM from the source host and cleanup the associated VM files. - if (vm.getHypervisorType().equals(HypervisorType.VMware)) { - Long srcClusterId = null; - Long srcHostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId(); - if (srcHostId != null) { - HostVO srcHost = _hostDao.findById(srcHostId); - srcClusterId = srcHost.getClusterId(); - } - - final Long destClusterId = destPool.getClusterId(); - if (srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId)) { - final String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId); - final String destDcName = _clusterDetailsDao.getVmwareDcName(destClusterId); - if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) { - s_logger.debug("Since VM's storage was successfully migrated across VMware Datacenters, unregistering VM: " + vm.getInstanceName() + - " from source host: " + srcHostId); - final UnregisterVMCommand uvc = new UnregisterVMCommand(vm.getInstanceName()); - uvc.setCleanupVmFiles(true); - try { - _agentMgr.send(srcHostId, uvc); - } catch (final AgentUnavailableException | OperationTimedoutException e) { - throw new CloudRuntimeException("Failed to unregister VM: " + vm.getInstanceName() + " from source host: " + srcHostId + - " after successfully migrating VM's storage across VMware Datacenters"); - } - } - } - } - - } else { - s_logger.debug("Storage migration failed"); - } - } catch (final ConcurrentOperationException e) { - s_logger.debug("Failed to migration: " + e.toString()); - throw new CloudRuntimeException("Failed to migration: " + e.toString()); - } catch (final InsufficientVirtualNetworkCapacityException e) { - s_logger.debug("Failed to migration: " + e.toString()); - throw new CloudRuntimeException("Failed to migration: " + e.toString()); - } catch (final InsufficientAddressCapacityException e) { - s_logger.debug("Failed to migration: " + e.toString()); - throw new CloudRuntimeException("Failed to migration: " + e.toString()); - } catch (final InsufficientCapacityException e) { - s_logger.debug("Failed to migration: " + e.toString()); - throw new CloudRuntimeException("Failed to migration: " + e.toString()); - } catch (final StorageUnavailableException e) { - s_logger.debug("Failed to migration: " + e.toString()); - throw new CloudRuntimeException("Failed to migration: " + e.toString()); - } finally { - try { - stateTransitTo(vm, VirtualMachine.Event.AgentReportStopped, null); - } catch (final NoTransitionException e) { - s_logger.debug("Failed to change vm state: " + e.toString()); - throw new CloudRuntimeException("Failed to change vm state: " + e.toString()); - } + _agentMgr.send(srcHost.getId(), uvc); + } catch (final Exception e) { + throw new CloudRuntimeException("Failed to unregister VM: " + vm.getInstanceName() + " from source host: " + srcHost.getId() + + " after successfully migrating VM's storage across VMware Datacenters"); } } @@ -4577,6 +4720,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final User user = context.getCallingUser(); final Account account = context.getCallingAccount(); + Map volumeStorageMap = dest.getStorageForDisks(); + if (volumeStorageMap != null) { + for (Volume vol : volumeStorageMap.keySet()) { + checkConcurrentJobsPerDatastoreThreshhold(volumeStorageMap.get(vol)); + } + } + final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); final List pendingWorkJobs = _workJobDao.listPendingWorkJobs( @@ -4738,6 +4888,16 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return new VmJobVirtualMachineOutcome(workJob, vm.getId()); } + private void checkConcurrentJobsPerDatastoreThreshhold(final StoragePool destPool) { + final Long threshold = VolumeApiService.ConcurrentMigrationsThresholdPerDatastore.value(); + if (threshold != null && threshold > 0) { + long count = _jobMgr.countPendingJobs("\"storageid\":\"" + destPool.getUuid() + "\"", MigrateVMCmd.class.getName(), MigrateVolumeCmd.class.getName(), MigrateVolumeCmdByAdmin.class.getName()); + if (count > threshold) { + throw new CloudRuntimeException("Number of concurrent migration jobs per datastore exceeded the threshold: " + threshold.toString() + ". Please try again after some time."); + } + } + } + public Outcome migrateVmStorageThroughJobQueue( final String vmUuid, final StoragePool destPool) { diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 30f89fc8ab2..6e71864c447 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -30,6 +30,10 @@ import java.util.concurrent.ExecutionException; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.storage.VolumeApiService; +import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd; +import org.apache.cloudstack.api.command.admin.volume.MigrateVolumeCmdByAdmin; +import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; @@ -953,10 +957,29 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati } } + private void checkConcurrentJobsPerDatastoreThreshhold(final StoragePool destPool) { + final Long threshold = VolumeApiService.ConcurrentMigrationsThresholdPerDatastore.value(); + if (threshold != null && threshold > 0) { + long count = _jobMgr.countPendingJobs("\"storageid\":\"" + destPool.getUuid() + "\"", MigrateVMCmd.class.getName(), MigrateVolumeCmd.class.getName(), MigrateVolumeCmdByAdmin.class.getName()); + if (count > threshold) { + throw new CloudRuntimeException("Number of concurrent migration jobs per datastore exceeded the threshold: " + threshold.toString() + ". Please try again after some time."); + } + } + } + + @Override @DB public Volume migrateVolume(Volume volume, StoragePool destPool) throws StorageUnavailableException { VolumeInfo vol = volFactory.getVolume(volume.getId()); + if (vol == null){ + throw new CloudRuntimeException("Migrate volume failed because volume object of volume " + volume.getName()+ "is null"); + } + if (destPool == null) { + throw new CloudRuntimeException("Migrate volume failed because destination storage pool is not available!!"); + } + + checkConcurrentJobsPerDatastoreThreshhold(destPool); DataStore dataStoreTarget = dataStoreMgr.getDataStore(destPool.getId(), DataStoreRole.Primary); AsyncCallFuture future = volService.copyVolume(vol, dataStoreTarget); @@ -1062,6 +1085,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati return true; } + // OfflineVmwareMigration: in case we can (vmware?) don't itterate over volumes but tell the hypervisor to do the thing + if (s_logger.isDebugEnabled()) { + s_logger.debug("Offline vm migration was not done up the stack in VirtualMachineManager so trying here."); + } for (Volume vol : volumesNeedToMigrate) { Volume result = migrateVolume(vol, destPool); if (result == null) { diff --git a/engine/orchestration/src/test/java/com/cloud/vm/VirtualMachineManagerImplTest.java b/engine/orchestration/src/test/java/com/cloud/vm/VirtualMachineManagerImplTest.java index 6b8d95603ac..0e7579ea5fd 100644 --- a/engine/orchestration/src/test/java/com/cloud/vm/VirtualMachineManagerImplTest.java +++ b/engine/orchestration/src/test/java/com/cloud/vm/VirtualMachineManagerImplTest.java @@ -17,6 +17,7 @@ package com.cloud.vm; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyLong; @@ -25,6 +26,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -178,7 +180,7 @@ public class VirtualMachineManagerImplTest { boolean actual = virtualMachineManagerImpl.sendStop(guru, profile, false, false); - Assert.assertFalse(actual); + assertFalse(actual); } @Test @@ -192,7 +194,7 @@ public class VirtualMachineManagerImplTest { boolean actual = virtualMachineManagerImpl.sendStop(guru, profile, false, false); - Assert.assertFalse(actual); + assertFalse(actual); } @Test @@ -242,7 +244,7 @@ public class VirtualMachineManagerImplTest { boolean returnedValue = virtualMachineManagerImpl.isStorageCrossClusterMigration(hostMock, storagePoolVoMock); - Assert.assertFalse(returnedValue); + assertFalse(returnedValue); } @Test @@ -253,7 +255,7 @@ public class VirtualMachineManagerImplTest { boolean returnedValue = virtualMachineManagerImpl.isStorageCrossClusterMigration(hostMock, storagePoolVoMock); - Assert.assertFalse(returnedValue); + assertFalse(returnedValue); } @Test @@ -317,7 +319,7 @@ public class VirtualMachineManagerImplTest { Map volumeToPoolObjectMap = virtualMachineManagerImpl.buildMapUsingUserInformation(virtualMachineProfileMock, hostMock, userDefinedVolumeToStoragePoolMap); - Assert.assertFalse(volumeToPoolObjectMap.isEmpty()); + assertFalse(volumeToPoolObjectMap.isEmpty()); Assert.assertEquals(storagePoolVoMock, volumeToPoolObjectMap.get(volumeVoMock)); Mockito.verify(userDefinedVolumeToStoragePoolMap, times(1)).keySet(); @@ -501,7 +503,7 @@ public class VirtualMachineManagerImplTest { HashMap volumeToPoolObjectMap = new HashMap<>(); virtualMachineManagerImpl.createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock); - Assert.assertFalse(volumeToPoolObjectMap.isEmpty()); + assertFalse(volumeToPoolObjectMap.isEmpty()); Assert.assertEquals(storagePoolMockOther, volumeToPoolObjectMap.get(volumeVoMock)); } @@ -558,7 +560,7 @@ public class VirtualMachineManagerImplTest { virtualMachineManagerImpl.createStoragePoolMappingsForVolumes(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, allVolumes); - Assert.assertFalse(volumeToPoolObjectMap.isEmpty()); + assertFalse(volumeToPoolObjectMap.isEmpty()); Assert.assertEquals(storagePoolVoMock, volumeToPoolObjectMap.get(volumeVoMock)); Mockito.verify(virtualMachineManagerImpl).executeManagedStorageChecksWhenTargetStoragePoolNotProvided(hostMock, storagePoolVoMock, volumeVoMock); @@ -587,4 +589,38 @@ public class VirtualMachineManagerImplTest { inOrder.verify(virtualMachineManagerImpl).findVolumesThatWereNotMappedByTheUser(virtualMachineProfileMock, volumeToPoolObjectMap); inOrder.verify(virtualMachineManagerImpl).createStoragePoolMappingsForVolumes(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumesNotMapped); } -} \ No newline at end of file + + @Test + public void matchesOfSorts() { + List nothing = null; + List empty = new ArrayList<>(); + List tag = Arrays.asList("bla"); + List tags = Arrays.asList("bla", "blob"); + List others = Arrays.asList("bla", "blieb"); + List three = Arrays.asList("bla", "blob", "blieb"); + + // single match + assertTrue(VirtualMachineManagerImpl.matches(tag,tags)); + assertTrue(VirtualMachineManagerImpl.matches(tag,others)); + + // no requirements + assertTrue(VirtualMachineManagerImpl.matches(nothing,tags)); + assertTrue(VirtualMachineManagerImpl.matches(empty,tag)); + + // mis(sing)match + assertFalse(VirtualMachineManagerImpl.matches(tags,tag)); + assertFalse(VirtualMachineManagerImpl.matches(tag,nothing)); + assertFalse(VirtualMachineManagerImpl.matches(tag,empty)); + + // disjunct sets + assertFalse(VirtualMachineManagerImpl.matches(tags,others)); + assertFalse(VirtualMachineManagerImpl.matches(others,tags)); + + // everything matches the larger set + assertTrue(VirtualMachineManagerImpl.matches(nothing,three)); + assertTrue(VirtualMachineManagerImpl.matches(empty,three)); + assertTrue(VirtualMachineManagerImpl.matches(tag,three)); + assertTrue(VirtualMachineManagerImpl.matches(tags,three)); + assertTrue(VirtualMachineManagerImpl.matches(others,three)); + } +} diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java index eed1e08005f..c2724e64824 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java @@ -18,12 +18,17 @@ */ package org.apache.cloudstack.storage.motion; +import java.util.Date; import java.util.LinkedList; import java.util.List; import java.util.Map; import javax.inject.Inject; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; +import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; @@ -40,10 +45,15 @@ import com.cloud.host.Host; import com.cloud.utils.StringUtils; import com.cloud.utils.exception.CloudRuntimeException; + @Component public class DataMotionServiceImpl implements DataMotionService { + private static final Logger LOGGER = Logger.getLogger(DataMotionServiceImpl.class); + @Inject StorageStrategyFactory storageStrategyFactory; + @Inject + VolumeDao volDao; @Override public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { @@ -61,13 +71,33 @@ public class DataMotionServiceImpl implements DataMotionService { DataMotionStrategy strategy = storageStrategyFactory.getDataMotionStrategy(srcData, destData); if (strategy == null) { + // OfflineVmware volume migration + // Cleanup volumes from target and reset the state of volume at source + cleanUpVolumesForFailedMigrations(srcData, destData); throw new CloudRuntimeException("Can't find strategy to move data. " + "Source: " + srcData.getType().name() + " '" + srcData.getUuid() + ", Destination: " + - destData.getType().name() + " '" + destData.getUuid() + "'"); + destData.getType().name() + " '" + destData.getUuid() + "'"); } strategy.copyAsync(srcData, destData, destHost, callback); } + /** + * Offline Vmware volume migration + * Cleanup volumes after failed migrations and reset state of source volume + * + * @param srcData + * @param destData + */ + private void cleanUpVolumesForFailedMigrations(DataObject srcData, DataObject destData) { + VolumeVO destinationVO = volDao.findById(destData.getId()); + VolumeVO sourceVO = volDao.findById(srcData.getId()); + sourceVO.setState(Volume.State.Ready); + volDao.update(sourceVO.getId(), sourceVO); + destinationVO.setState(Volume.State.Expunged); + destinationVO.setRemoved(new Date()); + volDao.update(destinationVO.getId(), destinationVO); + } + @Override public void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback callback) { copyAsync(srcData, destData, null, callback); @@ -84,7 +114,7 @@ public class DataMotionServiceImpl implements DataMotionService { } throw new CloudRuntimeException("Can't find strategy to move data. " + "Source Host: " + srcHost.getName() + ", Destination Host: " + destHost.getName() + - ", Volume UUIDs: " + StringUtils.join(volumeIds, ",")); + ", Volume UUIDs: " + StringUtils.join(volumeIds, ",")); } strategy.copyAsync(volumeMap, vmTo, srcHost, destHost, callback); diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index d2979f7415d..8ff0bd20815 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -1408,6 +1408,19 @@ public class VolumeServiceImpl implements VolumeService { @Override public AsyncCallFuture copyVolume(VolumeInfo srcVolume, DataStore destStore) { + if (s_logger.isDebugEnabled()) { + DataStore srcStore = srcVolume.getDataStore(); + String srcRole = (srcStore != null && srcStore.getRole() != null ? srcVolume.getDataStore().getRole().toString() : ""); + + String msg = String.format("copying %s(id=%d, role=%s) to %s (id=%d, role=%s)" + , srcVolume.getName() + , srcVolume.getId() + , srcRole + , destStore.getName() + , destStore.getId() + , destStore.getRole()); + s_logger.debug(msg); + } if (srcVolume.getState() == Volume.State.Uploaded) { return copyVolumeFromImageToPrimary(srcVolume, destStore); @@ -1417,6 +1430,8 @@ public class VolumeServiceImpl implements VolumeService { return copyVolumeFromPrimaryToImage(srcVolume, destStore); } + // OfflineVmwareMigration: aren't we missing secondary to secondary in this logic? + AsyncCallFuture future = new AsyncCallFuture(); VolumeApiResult res = new VolumeApiResult(srcVolume); try { @@ -1438,7 +1453,10 @@ public class VolumeServiceImpl implements VolumeService { caller.setCallback(caller.getTarget().copyVolumeCallBack(null, null)).setContext(context); motionSrv.copyAsync(srcVolume, destVolume, caller); } catch (Exception e) { - s_logger.debug("Failed to copy volume" + e); + s_logger.error("Failed to copy volume:" + e); + if(s_logger.isDebugEnabled()) { + s_logger.debug("Failed to copy volume.", e); + } res.setResult(e.toString()); future.complete(res); } @@ -1461,27 +1479,25 @@ public class VolumeServiceImpl implements VolumeService { AsyncCallFuture destroyFuture = expungeVolumeAsync(destVolume); destroyFuture.get(); future.complete(res); - return null; - } - srcVolume.processEvent(Event.OperationSuccessed); - destVolume.processEvent(Event.MigrationCopySucceeded, result.getAnswer()); - volDao.updateUuid(srcVolume.getId(), destVolume.getId()); - _volumeStoreDao.updateVolumeId(srcVolume.getId(), destVolume.getId()); - try { - destroyVolume(srcVolume.getId()); - srcVolume = volFactory.getVolume(srcVolume.getId()); - AsyncCallFuture destroyFuture = expungeVolumeAsync(srcVolume); - // If volume destroy fails, this could be because of vdi is still in use state, so wait and retry. - if (destroyFuture.get().isFailed()) { - Thread.sleep(5 * 1000); - destroyFuture = expungeVolumeAsync(srcVolume); - destroyFuture.get(); + } else { + srcVolume.processEvent(Event.OperationSuccessed); + destVolume.processEvent(Event.MigrationCopySucceeded, result.getAnswer()); + volDao.updateUuid(srcVolume.getId(), destVolume.getId()); + try { + destroyVolume(srcVolume.getId()); + srcVolume = volFactory.getVolume(srcVolume.getId()); + AsyncCallFuture destroyFuture = expungeVolumeAsync(srcVolume); + // If volume destroy fails, this could be because of vdi is still in use state, so wait and retry. + if (destroyFuture.get().isFailed()) { + Thread.sleep(5 * 1000); + destroyFuture = expungeVolumeAsync(srcVolume); + destroyFuture.get(); + } + future.complete(res); + } catch (Exception e) { + s_logger.debug("failed to clean up volume on storage", e); } - future.complete(res); - } catch (Exception e) { - s_logger.debug("failed to clean up volume on storage", e); } - return null; } catch (Exception e) { s_logger.debug("Failed to process copy volume callback", e); res.setResult(e.toString()); diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/AsyncJobManager.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/AsyncJobManager.java index bce99d084a5..8542407524b 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/AsyncJobManager.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/AsyncJobManager.java @@ -131,4 +131,6 @@ public interface AsyncJobManager extends Manager { Object unmarshallResultObject(AsyncJob job); List findFailureAsyncJobs(String... cmds); + + long countPendingJobs(String havingInfo, String... cmds); } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDao.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDao.java index b2b685d07e1..2696e105cce 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDao.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDao.java @@ -44,4 +44,6 @@ public interface AsyncJobDao extends GenericDao { List getResetJobs(long msid); List getFailureJobsSinceLastMsStart(long msId, String... cmds); + + long countPendingJobs(String havingInfo, String... cmds); } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java index ef992ff5cc0..6ca698b7589 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java @@ -30,6 +30,7 @@ import org.apache.cloudstack.jobs.JobInfo; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @@ -46,6 +47,7 @@ public class AsyncJobDaoImpl extends GenericDaoBase implements private final SearchBuilder expiringUnfinishedAsyncJobSearch; private final SearchBuilder expiringCompletedAsyncJobSearch; private final SearchBuilder failureMsidAsyncJobSearch; + private final GenericSearchBuilder asyncJobTypeSearch; public AsyncJobDaoImpl() { pendingAsyncJobSearch = createSearchBuilder(); @@ -94,6 +96,13 @@ public class AsyncJobDaoImpl extends GenericDaoBase implements failureMsidAsyncJobSearch.and("job_cmd", failureMsidAsyncJobSearch.entity().getCmd(), Op.IN); failureMsidAsyncJobSearch.done(); + asyncJobTypeSearch = createSearchBuilder(Long.class); + asyncJobTypeSearch.select(null, SearchCriteria.Func.COUNT, asyncJobTypeSearch.entity().getId()); + asyncJobTypeSearch.and("job_info", asyncJobTypeSearch.entity().getCmdInfo(),Op.LIKE); + asyncJobTypeSearch.and("job_cmd", asyncJobTypeSearch.entity().getCmd(), Op.IN); + asyncJobTypeSearch.and("status", asyncJobTypeSearch.entity().getStatus(), SearchCriteria.Op.EQ); + asyncJobTypeSearch.done(); + } @Override @@ -227,4 +236,14 @@ public class AsyncJobDaoImpl extends GenericDaoBase implements sc.setParameters("job_cmd", (Object[])cmds); return listBy(sc); } + + @Override + public long countPendingJobs(String havingInfo, String... cmds) { + SearchCriteria sc = asyncJobTypeSearch.create(); + sc.setParameters("status", JobInfo.Status.IN_PROGRESS); + sc.setParameters("job_cmd", (Object[])cmds); + sc.setParameters("job_info", "%" + havingInfo + "%"); + List results = customSearch(sc, null); + return results.get(0); + } } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java index 1845dbf8aa1..1be3eedaf23 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java @@ -1122,4 +1122,9 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, public List findFailureAsyncJobs(String... cmds) { return _jobDao.getFailureJobsSinceLastMsStart(getMsid(), cmds); } + + @Override + public long countPendingJobs(String havingInfo, String... cmds) { + return _jobDao.countPendingJobs(havingInfo, cmds); + } } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java index 81dfc33bb88..10c3feb2609 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java @@ -26,6 +26,11 @@ import java.util.UUID; import javax.inject.Inject; +import com.cloud.agent.api.MigrateVmToPoolCommand; +import com.cloud.agent.api.UnregisterVMCommand; +import com.cloud.agent.api.to.VolumeTO; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.storage.StoragePool; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; @@ -115,12 +120,14 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co @Inject private GuestOSDao _guestOsDao; @Inject - GuestOSHypervisorDao _guestOsHypervisorDao; + private GuestOSHypervisorDao _guestOsHypervisorDao; @Inject private HostDao _hostDao; @Inject private HostDetailsDao _hostDetailsDao; @Inject + private ClusterDetailsDao _clusterDetailsDao; + @Inject private CommandExecLogDao _cmdExecLogDao; @Inject private VmwareManager _vmwareMgr; @@ -640,4 +647,35 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co details.put(VmwareReserveMemory.key(), VmwareReserveMemory.valueIn(clusterId).toString()); return details; } + + @Override + public List finalizeMigrate(VirtualMachine vm, StoragePool destination) { + List commands = new ArrayList(); + + // OfflineVmwareMigration: specialised migration command + List volumes = _volumeDao.findByInstance(vm.getId()); + List vols = new ArrayList<>(); + for (Volume volume : volumes) { + VolumeTO vol = new VolumeTO(volume,destination); + vols.add(vol); + } + MigrateVmToPoolCommand migrateVmToPoolCommand = new MigrateVmToPoolCommand(vm.getInstanceName(), vols, destination.getUuid(), true); + commands.add(migrateVmToPoolCommand); + + // OfflineVmwareMigration: cleanup if needed + final Long destClusterId = destination.getClusterId(); + final Long srcClusterId = getClusterId(vm.getId()); + + if (srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId)) { + final String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId); + final String destDcName = _clusterDetailsDao.getVmwareDcName(destClusterId); + if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) { + final UnregisterVMCommand unregisterVMCommand = new UnregisterVMCommand(vm.getInstanceName(), true); + unregisterVMCommand.setCleanupVmFiles(true); + + commands.add(unregisterVMCommand); + } + } + return commands; + } } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index ee14077eeca..37d27c855bc 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -43,8 +43,8 @@ import java.util.UUID; import javax.naming.ConfigurationException; -import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.math.NumberUtils; +import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import org.apache.log4j.NDC; import org.joda.time.Duration; @@ -163,6 +163,8 @@ import com.cloud.agent.api.ManageSnapshotAnswer; import com.cloud.agent.api.ManageSnapshotCommand; import com.cloud.agent.api.MigrateAnswer; import com.cloud.agent.api.MigrateCommand; +import com.cloud.agent.api.MigrateVmToPoolAnswer; +import com.cloud.agent.api.MigrateVmToPoolCommand; import com.cloud.agent.api.MigrateWithStorageAnswer; import com.cloud.agent.api.MigrateWithStorageCommand; import com.cloud.agent.api.ModifySshKeysCommand; @@ -311,6 +313,7 @@ import com.cloud.vm.VmDetailConstants; public class VmwareResource implements StoragePoolResource, ServerResource, VmwareHostService, VirtualRouterDeployer { private static final Logger s_logger = Logger.getLogger(VmwareResource.class); + public static final String VMDK_EXTENSION = ".vmdk"; private static final Random RANDOM = new Random(System.nanoTime()); @@ -442,6 +445,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa answer = execute((PrepareForMigrationCommand)cmd); } else if (clz == MigrateCommand.class) { answer = execute((MigrateCommand)cmd); + } else if (clz == MigrateVmToPoolCommand.class) { + answer = execute((MigrateVmToPoolCommand)cmd); } else if (clz == MigrateWithStorageCommand.class) { answer = execute((MigrateWithStorageCommand)cmd); } else if (clz == MigrateVolumeCommand.class) { @@ -699,30 +704,38 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } if (vmName.equalsIgnoreCase("none")) { + // OfflineVmwareMigration: we need to refactor the worker vm creation out for use in migration methods as well as here + // OfflineVmwareMigration: this method is 100 lines and needs refactorring anyway // we need to spawn a worker VM to attach the volume to and resize the volume. useWorkerVm = true; vmName = getWorkerName(getServiceContext(), cmd, 0); String poolId = cmd.getPoolUuid(); + // OfflineVmwareMigration: refactor for re-use + // OfflineVmwareMigration: 1. find data(store) ManagedObjectReference morDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolId); DatastoreMO dsMo = new DatastoreMO(hyperHost.getContext(), morDS); s_logger.info("Create worker VM " + vmName); + // OfflineVmwareMigration: 2. create the worker with access to the data(store) vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, vmName); if (vmMo == null) { + // OfflineVmwareMigration: don't throw a general Exception but think of a specific one throw new Exception("Unable to create a worker VM for volume resize"); } synchronized (this) { - vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, path + ".vmdk"); + // OfflineVmwareMigration: 3. attach the disk to the worker + vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, path + VMDK_EXTENSION); vmMo.attachDisk(new String[] { vmdkDataStorePath }, morDS); } } + // OfflineVmwareMigration: 4. find the (worker-) VM // find VM through datacenter (VM is not at the target host yet) vmMo = hyperHost.findVmOnPeerHyperHost(vmName); @@ -734,6 +747,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa throw new Exception(msg); } + // OfflineVmwareMigration: 5. ignore/replace the rest of the try-block; It is the functional bit Pair vdisk = vmMo.getDiskDevice(path); if (vdisk == null) { @@ -813,6 +827,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return new ResizeVolumeAnswer(cmd, false, error); } finally { + // OfflineVmwareMigration: 6. check if a worker was used and destroy it if needed try { if (useWorkerVm) { s_logger.info("Destroy worker VM after volume resize"); @@ -2313,7 +2328,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } private void resizeRootDiskOnVMStart(VirtualMachineMO vmMo, DiskTO rootDiskTO, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception { - final Pair vdisk = getVirtualDiskInfo(vmMo, appendFileType(rootDiskTO.getPath(), ".vmdk")); + final Pair vdisk = getVirtualDiskInfo(vmMo, appendFileType(rootDiskTO.getPath(), VMDK_EXTENSION)); assert(vdisk != null); Long reqSize = 0L; @@ -2536,7 +2551,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa vmdkPath = dsMo.getName(); } - datastoreDiskPath = dsMo.getDatastorePath(vmdkPath + ".vmdk"); + datastoreDiskPath = dsMo.getDatastorePath(vmdkPath + VMDK_EXTENSION); } } else { datastoreDiskPath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmMo.getName(), dsMo, volumeTO.getPath(), VmwareManager.s_vmwareSearchExcludeFolder.value()); @@ -3061,7 +3076,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa * Ex. "[-iqn.2010-01.com.solidfire:4nhe.vol-1.27-0] i-2-18-VM/ROOT-18.vmdk" should return "i-2-18-VM/ROOT-18" */ public String getVmdkPath(String path) { - if (!com.cloud.utils.StringUtils.isNotBlank(path)) { + if (!StringUtils.isNotBlank(path)) { return null; } @@ -3075,7 +3090,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa path = path.substring(startIndex + search.length()); - final String search2 = ".vmdk"; + final String search2 = VMDK_EXTENSION; int endIndex = path.indexOf(search2); @@ -3128,10 +3143,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa final String datastoreVolumePath; if (vmdkPath != null) { - datastoreVolumePath = dsMo.getDatastorePath(vmdkPath + ".vmdk"); + datastoreVolumePath = dsMo.getDatastorePath(vmdkPath + VMDK_EXTENSION); } else { - datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk"); + datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + VMDK_EXTENSION); } volumeTO.setPath(datastoreVolumePath); @@ -3780,12 +3795,172 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa invalidateServiceContext(); } - String msg = "Unexcpeted exception " + VmwareHelper.getExceptionMessage(e); + String msg = "Unexpected exception " + VmwareHelper.getExceptionMessage(e); s_logger.error(msg, e); return new PrepareForMigrationAnswer(cmd, msg); } } + protected Answer execute(MigrateVmToPoolCommand cmd) { + if (s_logger.isInfoEnabled()) { + s_logger.info(String.format("excuting MigrateVmToPoolCommand %s -> %s", cmd.getVmName(), cmd.getDestinationPool())); + if (s_logger.isDebugEnabled()) { + s_logger.debug("MigrateVmToPoolCommand: " + _gson.toJson(cmd)); + } + } + + final String vmName = cmd.getVmName(); + + VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); + try { + VirtualMachineMO vmMo = getVirtualMachineMO(vmName, hyperHost); + if (vmMo == null) { + String msg = "VM " + vmName + " does not exist in VMware datacenter"; + s_logger.error(msg); + throw new CloudRuntimeException(msg); + } + + String poolUuid = cmd.getDestinationPool(); + return migrateAndAnswer(vmMo, poolUuid, hyperHost, cmd); + } catch (Throwable e) { // hopefully only CloudRuntimeException :/ + if (e instanceof Exception) { + return new Answer(cmd, (Exception) e); + } + if (s_logger.isDebugEnabled()) { + s_logger.debug("problem" , e); + } + s_logger.error(e.getLocalizedMessage()); + return new Answer(cmd, false, "unknown problem: " + e.getLocalizedMessage()); + } + } + + private Answer migrateAndAnswer(VirtualMachineMO vmMo, String poolUuid, VmwareHypervisorHost hyperHost, Command cmd) throws Exception { + ManagedObjectReference morDs = getTargetDatastoreMOReference(poolUuid, hyperHost); + + try { + // OfflineVmwareMigration: getVolumesFromCommand(cmd); + Map volumeDeviceKey = getVolumesFromCommand(vmMo, cmd); + if (s_logger.isTraceEnabled()) { + for (Integer diskId: volumeDeviceKey.keySet()) { + s_logger.trace(String.format("disk to migrate has disk id %d and volumeId %d", diskId, volumeDeviceKey.get(diskId))); + } + } + if (vmMo.changeDatastore(morDs)) { + // OfflineVmwareMigration: create target specification to include in answer + // Consolidate VM disks after successful VM migration + // In case of a linked clone VM, if VM's disks are not consolidated, further VM operations such as volume snapshot, VM snapshot etc. will result in DB inconsistencies. + if (!vmMo.consolidateVmDisks()) { + s_logger.warn("VM disk consolidation failed after storage migration. Yet proceeding with VM migration."); + } else { + s_logger.debug("Successfully consolidated disks of VM " + vmMo.getVmName() + "."); + } + return createAnswerForCmd(vmMo, poolUuid, cmd, volumeDeviceKey); + } else { + return new Answer(cmd, false, "failed to changes data store for VM" + vmMo.getVmName()); + } + } catch (Exception e) { + String msg = "change data store for VM " + vmMo.getVmName() + " failed"; + s_logger.error(msg + ": " + e.getLocalizedMessage()); + throw new CloudRuntimeException(msg,e); + } + } + + Answer createAnswerForCmd(VirtualMachineMO vmMo, String poolUuid, Command cmd, Map volumeDeviceKey) throws Exception { + List volumeToList = new ArrayList<>(); + VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder(); + VirtualDisk[] disks = vmMo.getAllDiskDevice(); + Answer answer; + if (s_logger.isTraceEnabled()) { + s_logger.trace(String.format("creating answer for %s", cmd.getClass().getSimpleName())); + } + if (cmd instanceof MigrateVolumeCommand) { + if (disks.length == 1) { + String volumePath = vmMo.getVmdkFileBaseName(disks[0]); + return new MigrateVolumeAnswer(cmd, true, null, volumePath); + } + throw new CloudRuntimeException("not expecting more then one disk after migrate volume command"); + } else if (cmd instanceof MigrateVmToPoolCommand) { + for (VirtualDisk disk : disks) { + VolumeObjectTO newVol = new VolumeObjectTO(); + String newPath = vmMo.getVmdkFileBaseName(disk); + VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(newPath, poolUuid); + newVol.setId(volumeDeviceKey.get(disk.getKey())); + newVol.setPath(newPath); + newVol.setChainInfo(_gson.toJson(diskInfo)); + volumeToList.add(newVol); + } + return new MigrateVmToPoolAnswer((MigrateVmToPoolCommand)cmd, volumeToList); + } + return new Answer(cmd, false, null); + } + + private Map getVolumesFromCommand(VirtualMachineMO vmMo, Command cmd) throws Exception { + Map volumeDeviceKey = new HashMap(); + if (cmd instanceof MigrateVmToPoolCommand) { + MigrateVmToPoolCommand mcmd = (MigrateVmToPoolCommand)cmd; + for (VolumeTO volume : mcmd.getVolumes()) { + addVolumeDiskmapping(vmMo, volumeDeviceKey, volume.getPath(), volume.getId()); + } + } else if (cmd instanceof MigrateVolumeCommand) { + MigrateVolumeCommand mcmd = (MigrateVolumeCommand)cmd; + addVolumeDiskmapping(vmMo, volumeDeviceKey, mcmd.getVolumePath(), mcmd.getVolumeId()); + } + return volumeDeviceKey; + } + + private void addVolumeDiskmapping(VirtualMachineMO vmMo, Map volumeDeviceKey, String volumePath, long volumeId) throws Exception { + if (s_logger.isDebugEnabled()) { + s_logger.debug(String.format("locating disk for volume (%d) using path %s", volumeId, volumePath)); + } + Pair diskInfo = getVirtualDiskInfo(vmMo, volumePath + VMDK_EXTENSION); + String vmdkAbsFile = getAbsoluteVmdkFile(diskInfo.first()); + if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) { + vmMo.updateAdapterTypeIfRequired(vmdkAbsFile); + } + int diskId = diskInfo.first().getKey(); + volumeDeviceKey.put(diskId, volumeId); + } + + private ManagedObjectReference getTargetDatastoreMOReference(String destinationPool, VmwareHypervisorHost hyperHost) { + ManagedObjectReference morDs; + try { + if(s_logger.isDebugEnabled()) { + s_logger.debug(String.format("finding datastore %s", destinationPool)); + } + morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, destinationPool); + } catch (Exception e) { + String msg = "exception while finding data store " + destinationPool; + s_logger.error(msg); + throw new CloudRuntimeException(msg + ": " + e.getLocalizedMessage()); + } + return morDs; + } + + private ManagedObjectReference getDataCenterMOReference(String vmName, VmwareHypervisorHost hyperHost) { + ManagedObjectReference morDc; + try { + morDc = hyperHost.getHyperHostDatacenter(); + } catch (Exception e) { + String msg = "exception while finding VMware datacenter to search for VM " + vmName; + s_logger.error(msg); + throw new CloudRuntimeException(msg + ": " + e.getLocalizedMessage()); + } + return morDc; + } + + private VirtualMachineMO getVirtualMachineMO(String vmName, VmwareHypervisorHost hyperHost) { + VirtualMachineMO vmMo = null; + try { + // find VM through datacenter (VM is not at the target host yet) + vmMo = hyperHost.findVmOnPeerHyperHost(vmName); + } catch (Exception e) { + String msg = "exception while searching for VM " + vmName + " in VMware datacenter"; + s_logger.error(msg); + throw new CloudRuntimeException(msg + ": " + e.getLocalizedMessage()); + } + return vmMo; + } + protected Answer execute(MigrateCommand cmd) { if (s_logger.isInfoEnabled()) { s_logger.info("Executing resource MigrateCommand: " + _gson.toJson(cmd)); @@ -3946,7 +4121,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } diskLocator = new VirtualMachineRelocateSpecDiskLocator(); diskLocator.setDatastore(morDsAtSource); - Pair diskInfo = getVirtualDiskInfo(vmMo, appendFileType(volume.getPath(), ".vmdk")); + Pair diskInfo = getVirtualDiskInfo(vmMo, appendFileType(volume.getPath(), VMDK_EXTENSION)); String vmdkAbsFile = getAbsoluteVmdkFile(diskInfo.first()); if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) { vmMo.updateAdapterTypeIfRequired(vmdkAbsFile); @@ -4074,6 +4249,141 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } + private Answer migrateVolume(MigrateVolumeCommand cmd) { + Answer answer = null; + String path = cmd.getVolumePath(); + + VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); + VirtualMachineMO vmMo = null; + DatastoreMO dsMo = null; + ManagedObjectReference morSourceDS = null; + String vmdkDataStorePath = null; + + String vmName = null; + try { + // OfflineVmwareMigration: we need to refactor the worker vm creation out for use in migration methods as well as here + // OfflineVmwareMigration: this method is 100 lines and needs refactorring anyway + // we need to spawn a worker VM to attach the volume to and move it + vmName = getWorkerName(getServiceContext(), cmd, 0); + + // OfflineVmwareMigration: refactor for re-use + // OfflineVmwareMigration: 1. find data(store) + // OfflineVmwareMigration: more robust would be to find the store given the volume as it might have been moved out of band or due to error +// example: DatastoreMO existingVmDsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(fileInDatastore.getDatastoreName())); + + morSourceDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getSourcePool().getUuid()); + dsMo = new DatastoreMO(hyperHost.getContext(), morSourceDS); + s_logger.info("Create worker VM " + vmName); + // OfflineVmwareMigration: 2. create the worker with access to the data(store) + vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, vmName); + if (vmMo == null) { + // OfflineVmwareMigration: don't throw a general Exception but think of a specific one + throw new CloudRuntimeException("Unable to create a worker VM for volume operation"); + } + + synchronized (this) { + // OfflineVmwareMigration: 3. attach the disk to the worker + String vmdkFileName = path + VMDK_EXTENSION; + vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, vmdkFileName); + if (!dsMo.fileExists(vmdkDataStorePath)) { + if(s_logger.isDebugEnabled()) { + s_logger.debug(String.format("path not found (%s), trying under '%s'", vmdkFileName, path)); + } + vmdkDataStorePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(dsMo, path, vmdkFileName); + } + if (!dsMo.fileExists(vmdkDataStorePath)) { + if(s_logger.isDebugEnabled()) { + s_logger.debug(String.format("path not found (%s), trying under '%s'", vmdkFileName, vmName)); + } + vmdkDataStorePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, vmdkFileName); + } + if(s_logger.isDebugEnabled()) { + s_logger.debug(String.format("attaching %s to %s for migration", vmdkDataStorePath, vmMo.getVmName())); + } + vmMo.attachDisk(new String[] { vmdkDataStorePath }, morSourceDS); + } + + // OfflineVmwareMigration: 4. find the (worker-) VM + // find VM through datacenter (VM is not at the target host yet) + vmMo = hyperHost.findVmOnPeerHyperHost(vmName); + if (vmMo == null) { + String msg = "VM " + vmName + " does not exist in VMware datacenter"; + s_logger.error(msg); + throw new Exception(msg); + } + + if (s_logger.isTraceEnabled()) { + VirtualDisk[] disks = vmMo.getAllDiskDevice(); + String format = "disk %d is attached as %s"; + for (VirtualDisk disk : disks) { + s_logger.trace(String.format(format,disk.getKey(),vmMo.getVmdkFileBaseName(disk))); + } + } + + // OfflineVmwareMigration: 5. create a relocate spec and perform + Pair vdisk = vmMo.getDiskDevice(path); + if (vdisk == null) { + if (s_logger.isTraceEnabled()) + s_logger.trace("migrate volume done (failed)"); + throw new CloudRuntimeException("No such disk device: " + path); + } + + VirtualDisk disk = vdisk.first(); + String vmdkAbsFile = getAbsoluteVmdkFile(disk); + if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) { + vmMo.updateAdapterTypeIfRequired(vmdkAbsFile); + } + + // OfflineVmwareMigration: this may have to be disected and executed in separate steps + answer = migrateAndAnswer(vmMo, cmd.getTargetPool().getUuid(), hyperHost, cmd); + } catch (Exception e) { + String msg = String.format("Migration of volume '%s' failed due to %s", cmd.getVolumePath(), e.getLocalizedMessage()); + s_logger.error(msg, e); + answer = new Answer(cmd, false, msg); + } finally { + try { + // OfflineVmwareMigration: worker *may* have been renamed + vmName = vmMo.getVmName(); + morSourceDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getTargetPool().getUuid()); + dsMo = new DatastoreMO(hyperHost.getContext(), morSourceDS); + s_logger.info("Dettaching disks before destroying worker VM '" + vmName + "' after volume migration"); + VirtualDisk[] disks = vmMo.getAllDiskDevice(); + String format = "disk %d was migrated to %s"; + for (VirtualDisk disk : disks) { + if (s_logger.isTraceEnabled()) { + s_logger.trace(String.format(format, disk.getKey(), vmMo.getVmdkFileBaseName(disk))); + } + vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, vmMo.getVmdkFileBaseName(disk) + VMDK_EXTENSION); + vmMo.detachDisk(vmdkDataStorePath, false); + } + s_logger.info("Destroy worker VM '" + vmName + "' after volume migration"); + vmMo.destroy(); + } catch (Throwable e) { + s_logger.info("Failed to destroy worker VM: " + vmName); + } + } + if (answer instanceof MigrateVolumeAnswer) { + String newPath = ((MigrateVolumeAnswer)answer).getVolumePath(); + String vmdkFileName = newPath + VMDK_EXTENSION; + try { + VmwareStorageLayoutHelper.syncVolumeToRootFolder(dsMo.getOwnerDatacenter().first(), dsMo, newPath, vmName); + vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, vmdkFileName); + + if (!dsMo.fileExists(vmdkDataStorePath)) { + String msg = String.format("Migration of volume '%s' failed; file (%s) not found as path '%s'", cmd.getVolumePath(), vmdkFileName, vmdkDataStorePath); + s_logger.error(msg); + answer = new Answer(cmd, false, msg); + } + } catch (Exception e) { + String msg = String.format("Migration of volume '%s' failed due to %s", cmd.getVolumePath(), e.getLocalizedMessage()); + s_logger.error(msg, e); + answer = new Answer(cmd, false, msg); + } + } + return answer; + } + + // OfflineVmwareMigration: refactor to be able to handle a detached volume private Answer execute(MigrateVolumeCommand cmd) { String volumePath = cmd.getVolumePath(); StorageFilerTO poolTo = cmd.getPool(); @@ -4087,6 +4397,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VirtualMachineMO vmMo = null; VmwareHypervisorHost srcHyperHost = null; + // OfflineVmwareMigration: ifhost is null ??? + if (org.apache.commons.lang.StringUtils.isBlank(cmd.getAttachedVmName())) { + return migrateVolume(cmd); + } ManagedObjectReference morDs = null; ManagedObjectReference morDc = null; VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec(); @@ -4107,7 +4421,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (vmMo == null) { String msg = "VM " + vmName + " does not exist in VMware datacenter " + morDc.getValue(); s_logger.error(msg); - throw new Exception(msg); + throw new CloudRuntimeException(msg); } vmName = vmMo.getName(); morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, tgtDsName); @@ -4119,8 +4433,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } DatastoreMO targetDsMo = new DatastoreMO(srcHyperHost.getContext(), morDs); - String fullVolumePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(targetDsMo, vmName, volumePath + ".vmdk"); - Pair diskInfo = getVirtualDiskInfo(vmMo, appendFileType(volumePath, ".vmdk")); + String fullVolumePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(targetDsMo, vmName, volumePath + VMDK_EXTENSION); + Pair diskInfo = getVirtualDiskInfo(vmMo, appendFileType(volumePath, VMDK_EXTENSION)); String vmdkAbsFile = getAbsoluteVmdkFile(diskInfo.first()); if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) { vmMo.updateAdapterTypeIfRequired(vmdkAbsFile); diff --git a/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java b/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java index 23b32a3d255..2463e75c01d 100644 --- a/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java +++ b/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java @@ -20,11 +20,39 @@ package org.apache.cloudstack.storage.motion; import java.util.ArrayList; +import java.util.Date; import java.util.List; import java.util.Map; import javax.inject.Inject; +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.MigrateWithStorageAnswer; +import com.cloud.agent.api.MigrateWithStorageCommand; +import com.cloud.agent.api.storage.MigrateVolumeAnswer; +import com.cloud.agent.api.storage.MigrateVolumeCommand; +import com.cloud.agent.api.to.DataObjectType; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.agent.api.to.VolumeTO; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.ScopeType; +import com.cloud.storage.StoragePool; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.dao.VMInstanceDao; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; @@ -38,25 +66,6 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import com.cloud.agent.AgentManager; -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.MigrateWithStorageAnswer; -import com.cloud.agent.api.MigrateWithStorageCommand; -import com.cloud.agent.api.to.StorageFilerTO; -import com.cloud.agent.api.to.VirtualMachineTO; -import com.cloud.agent.api.to.VolumeTO; -import com.cloud.exception.AgentUnavailableException; -import com.cloud.exception.OperationTimedoutException; -import com.cloud.host.Host; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.StoragePool; -import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.utils.Pair; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.dao.VMInstanceDao; - @Component public class VmwareStorageMotionStrategy implements DataMotionStrategy { private static final Logger s_logger = Logger.getLogger(VmwareStorageMotionStrategy.class); @@ -70,12 +79,77 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy { PrimaryDataStoreDao storagePoolDao; @Inject VMInstanceDao instanceDao; + @Inject + private HostDao hostDao; @Override public StrategyPriority canHandle(DataObject srcData, DataObject destData) { + // OfflineVmwareMigration: return StrategyPriority.HYPERVISOR when destData is in a storage pool in the same vmware-cluster and both are volumes + if (isOnVmware(srcData, destData) + && isOnPrimary(srcData, destData) + && isVolumesOnly(srcData, destData) + && isDettached(srcData) + && isIntraCluster(srcData, destData) + && isStoreScopeEqual(srcData, destData)) { + if (s_logger.isDebugEnabled()) { + String msg = String.format("%s can handle the request because %d(%s) and %d(%s) share the VMware cluster %s (== %s)" + , this.getClass() + , srcData.getId() + , srcData.getUuid() + , destData.getId() + , destData.getUuid() + , storagePoolDao.findById(srcData.getDataStore().getId()).getClusterId() + , storagePoolDao.findById(destData.getDataStore().getId()).getClusterId()); + s_logger.debug(msg); + } + return StrategyPriority.HYPERVISOR; + } return StrategyPriority.CANT_HANDLE; } + private boolean isDettached(DataObject srcData) { + VolumeVO volume = volDao.findById(srcData.getId()); + return volume.getInstanceId() == null; + } + + private boolean isVolumesOnly(DataObject srcData, DataObject destData) { + return DataObjectType.VOLUME.equals(srcData.getType()) + && DataObjectType.VOLUME.equals(destData.getType()); + } + + private boolean isOnPrimary(DataObject srcData, DataObject destData) { + return DataStoreRole.Primary.equals(srcData.getDataStore().getRole()) + && DataStoreRole.Primary.equals(destData.getDataStore().getRole()); + } + + private boolean isOnVmware(DataObject srcData, DataObject destData) { + return HypervisorType.VMware.equals(srcData.getTO().getHypervisorType()) + && HypervisorType.VMware.equals(destData.getTO().getHypervisorType()); + } + + private boolean isIntraCluster(DataObject srcData, DataObject destData) { + DataStore srcStore = srcData.getDataStore(); + StoragePool srcPool = storagePoolDao.findById(srcStore.getId()); + DataStore destStore = destData.getDataStore(); + StoragePool destPool = storagePoolDao.findById(destStore.getId()); + return srcPool.getClusterId().equals(destPool.getClusterId()); + } + + /** + * Ensure that the scope of source and destination storage pools match + * + * @param srcData + * @param destData + * @return + */ + private boolean isStoreScopeEqual(DataObject srcData, DataObject destData) { + DataStore srcStore = srcData.getDataStore(); + DataStore destStore = destData.getDataStore(); + String msg = String.format("Storage scope of source pool is %s and of destination pool is %s", srcStore.getScope().toString(), destStore.getScope().toString()); + s_logger.debug(msg); + return srcStore.getScope().getScopeType() == (destStore.getScope().getScopeType()); + } + @Override public StrategyPriority canHandle(Map volumeMap, Host srcHost, Host destHost) { if (srcHost.getHypervisorType() == HypervisorType.VMware && destHost.getHypervisorType() == HypervisorType.VMware) { @@ -85,9 +159,96 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy { return StrategyPriority.CANT_HANDLE; } + /** + * the Vmware storageMotion strategy allows to copy to a destination pool but not to a destination host + * + * @param srcData volume to move + * @param destData volume description as intended after the move + * @param destHost null or else + * @param callback where to report completion or failure to + */ @Override public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { - throw new UnsupportedOperationException(); + if (destHost != null) { + String format = "%s cannot target a host in moving an object from {%s}\n to {%s}"; + String msg = String.format(format + , this.getClass().getName() + , srcData.toString() + , destData.toString() + ); + s_logger.error(msg); + throw new CloudRuntimeException(msg); + } + // OfflineVmwareMigration: extract the destination pool from destData and construct a migrateVolume command + if (!isOnPrimary(srcData, destData)) { + // OfflineVmwareMigration: we shouldn't be here as we would have refused in the canHandle call + throw new UnsupportedOperationException(); + } + StoragePool sourcePool = (StoragePool) srcData.getDataStore(); + StoragePool targetPool = (StoragePool) destData.getDataStore(); + MigrateVolumeCommand cmd = new MigrateVolumeCommand(srcData.getId() + , srcData.getTO().getPath() + , sourcePool + , targetPool); + // OfflineVmwareMigration: should be ((StoragePool)srcData.getDataStore()).getHypervisor() but that is NULL, so hardcoding + Answer answer; + ScopeType scopeType = srcData.getDataStore().getScope().getScopeType(); + if (ScopeType.CLUSTER == scopeType) { + // Find Volume source cluster and select any Vmware hypervisor host to attach worker VM + Long hostId = findSuitableHostIdForWorkerVmPlacement(sourcePool.getClusterId()); + if (hostId == null) { + throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in cluster: " + sourcePool.getName()); + } + answer = agentMgr.easySend(hostId, cmd); + } else { + answer = agentMgr.sendTo(sourcePool.getDataCenterId(), HypervisorType.VMware, cmd); + } + updateVolumeAfterMigration(answer, srcData, destData); + CopyCommandResult result = new CopyCommandResult(null, answer); + callback.complete(result); + } + + /** + * Selects a host from the cluster housing the source storage pool + * Assumption is that Primary Storage is cluster-wide + *

+ * returns any host ID within the cluster if storage-pool is cluster-wide, and exception is thrown otherwise + * + * @param clusterId + * @return + */ + private Long findSuitableHostIdForWorkerVmPlacement(Long clusterId) { + List hostLists = hostDao.findByClusterId(clusterId); + Long hostId = null; + for (HostVO hostVO : hostLists) { + if (hostVO.getHypervisorType().equals(HypervisorType.VMware) && hostVO.getStatus() == Status.Up) { + hostId = hostVO.getId(); + break; + } + } + return hostId; + } + + private void updateVolumeAfterMigration(Answer answer, DataObject srcData, DataObject destData) { + VolumeVO destinationVO = volDao.findById(destData.getId()); + if (!(answer instanceof MigrateVolumeAnswer)) { + // OfflineVmwareMigration: reset states and such + VolumeVO sourceVO = volDao.findById(srcData.getId()); + sourceVO.setState(Volume.State.Ready); + volDao.update(sourceVO.getId(), sourceVO); + destinationVO.setState(Volume.State.Expunged); + destinationVO.setRemoved(new Date()); + volDao.update(destinationVO.getId(), destinationVO); + throw new CloudRuntimeException("unexpected answer from hypervisor agent: " + answer.getDetails()); + } + MigrateVolumeAnswer ans = (MigrateVolumeAnswer) answer; + if (s_logger.isDebugEnabled()) { + String format = "retrieved '%s' as new path for volume(%d)"; + s_logger.debug(String.format(format, ans.getVolumePath(), destData.getId())); + } + // OfflineVmwareMigration: update the volume with new pool/volume path + destinationVO.setPath(ans.getVolumePath()); + volDao.update(destinationVO.getId(), destinationVO); } @Override @@ -124,7 +285,7 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy { for (Map.Entry entry : volumeToPool.entrySet()) { VolumeInfo volume = entry.getKey(); VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId())); - StorageFilerTO filerTo = new StorageFilerTO((StoragePool)entry.getValue()); + StorageFilerTO filerTo = new StorageFilerTO((StoragePool) entry.getValue()); volumeToFilerto.add(new Pair(volumeTo, filerTo)); } @@ -133,7 +294,7 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy { // Run validations against target!! // 2. Complete the process. Update the volume details. MigrateWithStorageCommand migrateWithStorageCmd = new MigrateWithStorageCommand(to, volumeToFilerto, destHost.getGuid()); - MigrateWithStorageAnswer migrateWithStorageAnswer = (MigrateWithStorageAnswer)agentMgr.send(srcHost.getId(), migrateWithStorageCmd); + MigrateWithStorageAnswer migrateWithStorageAnswer = (MigrateWithStorageAnswer) agentMgr.send(srcHost.getId(), migrateWithStorageCmd); if (migrateWithStorageAnswer == null) { s_logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed."); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); @@ -162,12 +323,12 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy { for (Map.Entry entry : volumeToPool.entrySet()) { VolumeInfo volume = entry.getKey(); VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId())); - StorageFilerTO filerTo = new StorageFilerTO((StoragePool)entry.getValue()); + StorageFilerTO filerTo = new StorageFilerTO((StoragePool) entry.getValue()); volumeToFilerto.add(new Pair(volumeTo, filerTo)); } MigrateWithStorageCommand command = new MigrateWithStorageCommand(to, volumeToFilerto, destHost.getGuid()); - MigrateWithStorageAnswer answer = (MigrateWithStorageAnswer)agentMgr.send(srcHost.getId(), command); + MigrateWithStorageAnswer answer = (MigrateWithStorageAnswer) agentMgr.send(srcHost.getId(), command); if (answer == null) { s_logger.error("Migration with storage of vm " + vm + " failed."); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); @@ -190,7 +351,7 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy { for (Map.Entry entry : volumeToPool.entrySet()) { boolean updated = false; VolumeInfo volume = entry.getKey(); - StoragePool pool = (StoragePool)entry.getValue(); + StoragePool pool = (StoragePool) entry.getValue(); for (VolumeObjectTO volumeTo : volumeTos) { if (volume.getId() == volumeTo.getId()) { VolumeVO volumeVO = volDao.findById(volume.getId()); diff --git a/plugins/hypervisors/vmware/src/test/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java b/plugins/hypervisors/vmware/src/test/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java index e3cc2f6056f..4cc3a77baaa 100644 --- a/plugins/hypervisors/vmware/src/test/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java +++ b/plugins/hypervisors/vmware/src/test/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java @@ -16,13 +16,6 @@ // under the License. package org.apache.cloudstack.storage.motion; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Matchers.isA; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - import java.io.IOException; import java.util.HashMap; import java.util.Map; @@ -30,6 +23,29 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.MigrateWithStorageAnswer; +import com.cloud.agent.api.MigrateWithStorageCommand; +import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.host.Host; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.component.ComponentContext; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.dao.VMInstanceDao; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.framework.async.AsyncRpcContext; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.test.utils.SpringUtils; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @@ -47,29 +63,12 @@ import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.framework.async.AsyncCallFuture; -import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.framework.async.AsyncRpcContext; -import org.apache.cloudstack.storage.command.CommandResult; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.test.utils.SpringUtils; - -import com.cloud.agent.AgentManager; -import com.cloud.agent.api.MigrateWithStorageAnswer; -import com.cloud.agent.api.MigrateWithStorageCommand; -import com.cloud.agent.api.to.VirtualMachineTO; -import com.cloud.host.Host; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.utils.component.ComponentContext; -import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.dao.VMInstanceDao; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.isA; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(loader = AnnotationConfigContextLoader.class) @@ -87,6 +86,8 @@ public class VmwareStorageMotionStrategyTest { PrimaryDataStoreDao storagePoolDao; @Inject VMInstanceDao instanceDao; + @Inject + private HostDao hostDao; CopyCommandResult result; @@ -262,6 +263,11 @@ public class VmwareStorageMotionStrategyTest { return Mockito.mock(AgentManager.class); } + @Bean + public HostDao hostDao() { + return Mockito.mock(HostDao.class); + } + public static class Library implements TypeFilter { @Override public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException { diff --git a/server/src/main/java/com/cloud/api/ApiDispatcher.java b/server/src/main/java/com/cloud/api/ApiDispatcher.java index 73755883697..11615ea3f54 100644 --- a/server/src/main/java/com/cloud/api/ApiDispatcher.java +++ b/server/src/main/java/com/cloud/api/ApiDispatcher.java @@ -49,6 +49,7 @@ public class ApiDispatcher { private static final Logger s_logger = Logger.getLogger(ApiDispatcher.class.getName()); Long _createSnapshotQueueSizeLimit; + Long migrateQueueSizeLimit; @Inject AsyncJobManager _asyncMgr; @@ -79,6 +80,9 @@ public class ApiDispatcher { _createSnapshotQueueSizeLimit = snapshotLimit; } + public void setMigrateQueueSizeLimit(final Long migrateLimit) { + migrateQueueSizeLimit = migrateLimit; + } public void dispatchCreateCmd(final BaseAsyncCreateCmd cmd, final Map params) throws Exception { asyncCreationDispatchChain.dispatch(new DispatchTask(cmd, params)); @@ -123,7 +127,9 @@ public class ApiDispatcher { if (asyncCmd.getJob() != null && asyncCmd.getSyncObjId() != null && asyncCmd.getSyncObjType() != null) { Long queueSizeLimit = null; if (asyncCmd.getSyncObjType() != null && asyncCmd.getSyncObjType().equalsIgnoreCase(BaseAsyncCmd.snapshotHostSyncObject)) { - queueSizeLimit = _createSnapshotQueueSizeLimit; + queueSizeLimit = _createSnapshotQueueSizeLimit; + } else if (asyncCmd.getSyncObjType() != null && asyncCmd.getSyncObjType().equalsIgnoreCase(BaseAsyncCmd.migrationSyncObject)) { + queueSizeLimit = migrateQueueSizeLimit; } else { queueSizeLimit = 1L; } @@ -148,6 +154,6 @@ public class ApiDispatcher { } cmd.execute(); - } + } } diff --git a/server/src/main/java/com/cloud/api/ApiServer.java b/server/src/main/java/com/cloud/api/ApiServer.java index 95c8cc986fd..a8ab7b095c6 100644 --- a/server/src/main/java/com/cloud/api/ApiServer.java +++ b/server/src/main/java/com/cloud/api/ApiServer.java @@ -19,7 +19,6 @@ package com.cloud.api; import com.cloud.api.dispatch.DispatchChainFactory; import com.cloud.api.dispatch.DispatchTask; import com.cloud.api.response.ApiResponseSerializer; -import com.cloud.configuration.Config; import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; @@ -35,6 +34,7 @@ import com.cloud.exception.RequestLimitException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.exception.UnavailableCommandException; +import com.cloud.storage.VolumeApiService; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.DomainManager; @@ -44,7 +44,6 @@ import com.cloud.user.UserVO; import com.cloud.utils.ConstantTimeComparator; import com.cloud.utils.DateUtil; import com.cloud.utils.HttpUtils; -import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.ReflectUtil; import com.cloud.utils.StringUtils; @@ -54,7 +53,6 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.utils.component.PluggableService; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.EntityManager; -import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UUIDManager; import com.cloud.utils.exception.CloudRuntimeException; @@ -100,8 +98,6 @@ import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.framework.config.impl.ConfigurationVO; import org.apache.cloudstack.framework.events.EventBus; import org.apache.cloudstack.framework.events.EventBusException; import org.apache.cloudstack.framework.jobs.AsyncJob; @@ -209,8 +205,6 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer @Inject private AsyncJobManager asyncMgr; @Inject - private ConfigurationDao configDao; - @Inject private EntityManager entityMgr; @Inject private APIAuthenticationManager authManager; @@ -228,14 +222,60 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer private static ExecutorService s_executor = new ThreadPoolExecutor(10, 150, 60, TimeUnit.SECONDS, new LinkedBlockingQueue(), new NamedThreadFactory( "ApiServer")); - static final ConfigKey EnableSecureSessionCookie = new ConfigKey("Advanced", Boolean.class, "enable.secure.session.cookie", "false", - "Session cookie is marked as secure if this is enabled. Secure cookies only work when HTTPS is used.", false); - - static final ConfigKey JSONcontentType = new ConfigKey(String.class, "json.content.type", "Advanced", "application/json; charset=UTF-8", - "Http response content type for .js files (default is text/javascript)", false, ConfigKey.Scope.Global, null); @Inject private MessageBus messageBus; + private static final ConfigKey IntegrationAPIPort = new ConfigKey("Advanced" + , Integer.class + , "integration.api.port" + , "8096" + , "Default API port" + , false + , ConfigKey.Scope.Global); + private static final ConfigKey ConcurrentSnapshotsThresholdPerHost = new ConfigKey("Advanced" + , Long.class + , "concurrent.snapshots.threshold.perhost" + , null + , "Limits number of snapshots that can be handled by the host concurrently; default is NULL - unlimited" + , true // not sure if this is to be dynamic + , ConfigKey.Scope.Global); + private static final ConfigKey EncodeApiResponse = new ConfigKey("Advanced" + , Boolean.class + , "encode.api.response" + , "false" + , "Do URL encoding for the api response, false by default" + , false + , ConfigKey.Scope.Global); + static final ConfigKey JSONcontentType = new ConfigKey( "Advanced" + , String.class + , "json.content.type" + , "application/json; charset=UTF-8" + , "Http response content type for .js files (default is text/javascript)" + , false + , ConfigKey.Scope.Global); + static final ConfigKey EnableSecureSessionCookie = new ConfigKey("Advanced" + , Boolean.class + , "enable.secure.session.cookie" + , "false" + , "Session cookie is marked as secure if this is enabled. Secure cookies only work when HTTPS is used." + , false + , ConfigKey.Scope.Global); + private static final ConfigKey JSONDefaultContentType = new ConfigKey ("Advanced" + , String.class + , "json.content.type" + , "application/json; charset=UTF-8" + , "Http response content type for JSON" + , false + , ConfigKey.Scope.Global); + + private static final ConfigKey UseEventAccountInfo = new ConfigKey( "advanced" + , Boolean.class + , "event.accountinfo" + , "false" + , "use account info in event logging" + , true + , ConfigKey.Scope.Global); + @Override public boolean configure(final String name, final Map params) throws ConfigurationException { messageBus.subscribe(AsyncJob.Topics.JOB_EVENT_PUBLISH, MessageDispatcher.getDispatcher(this)); @@ -305,8 +345,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer eventDescription.put("cmdInfo", job.getCmdInfo()); eventDescription.put("status", "" + job.getStatus() ); // If the event.accountinfo boolean value is set, get the human readable value for the username / domainname - Map configs = configDao.getConfiguration("management-server", new HashMap()); - if (Boolean.valueOf(configs.get("event.accountinfo"))) { + if (UseEventAccountInfo.value()) { DomainVO domain = domainDao.findById(jobOwner.getDomainId()); eventDescription.put("username", userJobOwner.getUsername()); eventDescription.put("accountname", jobOwner.getAccountName()); @@ -325,27 +364,20 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer @Override public boolean start() { Security.addProvider(new BouncyCastleProvider()); - Integer apiPort = null; // api port, null by default - final SearchCriteria sc = configDao.createSearchCriteria(); - sc.addAnd("name", SearchCriteria.Op.EQ, Config.IntegrationAPIPort.key()); - final List values = configDao.search(sc, null); - if ((values != null) && (values.size() > 0)) { - final ConfigurationVO apiPortConfig = values.get(0); - if (apiPortConfig.getValue() != null) { - apiPort = Integer.parseInt(apiPortConfig.getValue()); - apiPort = (apiPort <= 0) ? null : apiPort; - } + Integer apiPort = IntegrationAPIPort.value(); // api port, null by default + + final Long snapshotLimit = ConcurrentSnapshotsThresholdPerHost.value(); + if (snapshotLimit == null || snapshotLimit.longValue() <= 0) { + s_logger.debug("Global concurrent snapshot config parameter " + ConcurrentSnapshotsThresholdPerHost.value() + " is less or equal 0; defaulting to unlimited"); + } else { + dispatcher.setCreateSnapshotQueueSizeLimit(snapshotLimit); } - final Map configs = configDao.getConfiguration(); - final String strSnapshotLimit = configs.get(Config.ConcurrentSnapshotsThresholdPerHost.key()); - if (strSnapshotLimit != null) { - final Long snapshotLimit = NumbersUtil.parseLong(strSnapshotLimit, 1L); - if (snapshotLimit.longValue() <= 0) { - s_logger.debug("Global config parameter " + Config.ConcurrentSnapshotsThresholdPerHost.toString() + " is less or equal 0; defaulting to unlimited"); - } else { - dispatcher.setCreateSnapshotQueueSizeLimit(snapshotLimit); - } + final Long migrationLimit = VolumeApiService.ConcurrentMigrationsThresholdPerDatastore.value(); + if (migrationLimit == null || migrationLimit.longValue() <= 0) { + s_logger.debug("Global concurrent migration config parameter " + VolumeApiService.ConcurrentMigrationsThresholdPerDatastore.value() + " is less or equal 0; defaulting to unlimited"); + } else { + dispatcher.setMigrateQueueSizeLimit(migrationLimit); } final Set> cmdClasses = new HashSet>(); @@ -372,7 +404,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer } - setEncodeApiResponse(Boolean.valueOf(configDao.getValue(Config.EncodeApiResponse.key()))); + setEncodeApiResponse(EncodeApiResponse.value()); if (apiPort != null) { final ListenerThread listenerThread = new ListenerThread(this, apiPort); @@ -1200,16 +1232,6 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer } } - @Override - public String getConfigComponentName() { - return ApiServer.class.getSimpleName(); - } - - @Override - public ConfigKey[] getConfigKeys() { - return new ConfigKey[] { EnableSecureSessionCookie, JSONcontentType }; - } - // FIXME: the following two threads are copied from // http://svn.apache.org/repos/asf/httpcomponents/httpcore/trunk/httpcore/src/examples/org/apache/http/examples/ElementalHttpServer.java // we have to cite a license if we are using this code directly, so we need to add the appropriate citation or @@ -1413,4 +1435,19 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer ApiServer.encodeApiResponse = encodeApiResponse; } + @Override + public String getConfigComponentName() { + return ApiServer.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[] { + IntegrationAPIPort, + ConcurrentSnapshotsThresholdPerHost, + EncodeApiResponse, + EnableSecureSessionCookie, + JSONDefaultContentType + }; + } } diff --git a/server/src/main/java/com/cloud/configuration/Config.java b/server/src/main/java/com/cloud/configuration/Config.java index fee4f0aee35..eda34e59704 100644 --- a/server/src/main/java/com/cloud/configuration/Config.java +++ b/server/src/main/java/com/cloud/configuration/Config.java @@ -566,7 +566,6 @@ public enum Config { "The interval (in milliseconds) when host stats are retrieved from agents.", null), HostRetry("Advanced", AgentManager.class, Integer.class, "host.retry", "2", "Number of times to retry hosts for creating a volume", null), - IntegrationAPIPort("Advanced", ManagementServer.class, Integer.class, "integration.api.port", null, "Default API port. To disable set it to 0 or negative.", null), InvestigateRetryInterval( "Advanced", HighAvailabilityManager.class, @@ -1439,7 +1438,6 @@ public enum Config { "true", "Allow subdomains to use networks dedicated to their parent domain(s)", null), - EncodeApiResponse("Advanced", ManagementServer.class, Boolean.class, "encode.api.response", "false", "Do URL encoding for the api response, false by default", null), DnsBasicZoneUpdates( "Advanced", NetworkOrchestrationService.class, @@ -1693,14 +1691,6 @@ public enum Config { null), VpcMaxNetworks("Advanced", ManagementServer.class, Integer.class, "vpc.max.networks", "3", "Maximum number of networks per vpc", null), DetailBatchQuerySize("Advanced", ManagementServer.class, Integer.class, "detail.batch.query.size", "2000", "Default entity detail batch query size for listing", null), - ConcurrentSnapshotsThresholdPerHost( - "Advanced", - ManagementServer.class, - Long.class, - "concurrent.snapshots.threshold.perhost", - null, - "Limits number of snapshots that can be handled by the host concurrently; default is NULL - unlimited", - null), NetworkIPv6SearchRetryMax( "Network", ManagementServer.class, diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index e6695d07ac7..a95f4ef2d69 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -1342,7 +1342,8 @@ StateListener { // There should be atleast the ROOT volume of the VM in usable state if (volumesTobeCreated.isEmpty()) { - throw new CloudRuntimeException("Unable to create deployment, no usable volumes found for the VM"); + // OfflineVmwareMigration: find out what is wrong with the id of the vm we try to start + throw new CloudRuntimeException("Unable to create deployment, no usable volumes found for the VM: " + vmProfile.getId()); } // don't allow to start vm that doesn't have a root volume diff --git a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java index 52c5a7218ef..445997a6d06 100644 --- a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java +++ b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java @@ -40,6 +40,7 @@ import com.cloud.resource.ResourceManager; import com.cloud.service.ServiceOfferingDetailsVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.service.dao.ServiceOfferingDetailsDao; +import com.cloud.storage.StoragePool; import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; import com.cloud.vm.NicProfile; @@ -225,4 +226,8 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis return null; } + @Override + public List finalizeMigrate(VirtualMachine vm, StoragePool destination) { + return null; + } } diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index d9e53b152f2..d54e84fd6d2 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -37,6 +37,7 @@ import javax.crypto.spec.SecretKeySpec; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.storage.ScopeType; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.affinity.AffinityGroupProcessor; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; @@ -1103,6 +1104,32 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe return new Pair, Integer>(result.first(), result.second()); } + private HypervisorType getHypervisorType(VMInstanceVO vm, StoragePool srcVolumePool, VirtualMachineProfile profile) { + HypervisorType type = null; + if (vm == null) { + StoragePoolVO poolVo = _poolDao.findById(srcVolumePool.getId()); + if (ScopeType.CLUSTER.equals(poolVo.getScope())) { + Long clusterId = poolVo.getClusterId(); + if (clusterId != null) { + ClusterVO cluster = _clusterDao.findById(clusterId); + type = cluster.getHypervisorType(); + } + } else if (ScopeType.ZONE.equals(poolVo.getScope())) { + Long zoneId = poolVo.getDataCenterId(); + if (zoneId != null) { + DataCenterVO dc = _dcDao.findById(zoneId); + } + } + + if (null == type) { + type = srcVolumePool.getHypervisor(); + } + } else { + type = profile.getHypervisorType(); + } + return type; + } + @Override public Pair, Integer> searchForServers(final ListHostsCmd cmd) { @@ -1433,10 +1460,12 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe DataCenterDeployment plan = new DataCenterDeployment(volume.getDataCenterId(), srcVolumePool.getPodId(), srcVolumePool.getClusterId(), null, null, null); VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); + // OfflineVmwareMigration: vm might be null here; deal! + HypervisorType type = getHypervisorType(vm, srcVolumePool, profile); DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); //This is an override mechanism so we can list the possible local storage pools that a volume in a shared pool might be able to be migrated to - DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType()); + DiskProfile diskProfile = new DiskProfile(volume, diskOffering, type); diskProfile.setUseLocalStorage(true); for (StoragePoolAllocator allocator : _storagePoolAllocators) { diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index c9e34835b62..1f704eb8aec 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -522,7 +522,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override public String getStoragePoolTags(long poolId) { - return com.cloud.utils.StringUtils.listToCsvTags(_storagePoolDao.searchForStoragePoolTags(poolId)); + return StringUtils.listToCsvTags(getStoragePoolTagList(poolId)); + } + + @Override + public List getStoragePoolTagList(long poolId) { + return _storagePoolDao.searchForStoragePoolTags(poolId); } @Override diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 21e35046143..23b56e98da3 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -56,6 +56,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.jobs.AsyncJob; import org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext; @@ -178,7 +179,7 @@ import com.google.gson.Gson; import com.google.gson.GsonBuilder; import com.google.gson.JsonParseException; -public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiService, VmWorkJobHandler { +public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiService, VmWorkJobHandler, Configurable { private final static Logger s_logger = Logger.getLogger(VolumeApiServiceImpl.class); public static final String VM_WORK_JOB_HANDLER = VolumeApiServiceImpl.class.getSimpleName(); @@ -2028,10 +2029,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } // Check that Vm to which this volume is attached does not have VM Snapshots + // OfflineVmwareMigration: considder if this is needed and desirable if (vm != null && _vmSnapshotDao.findByVm(vm.getId()).size() > 0) { throw new InvalidParameterValueException("Volume cannot be migrated, please remove all VM snapshots for VM to which this volume is attached"); } + // OfflineVmwareMigration: extract this block as method and check if it is subject to regression if (vm != null && vm.getState() == State.Running) { // Check if the VM is GPU enabled. if (_serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) { @@ -2073,6 +2076,16 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic throw new CloudRuntimeException("Storage pool " + destPool.getName() + " does not have enough space to migrate volume " + vol.getName()); } + // OfflineVmwareMigration: check storage tags on disk(offering)s in comparison to destination storage pool + // OfflineVmwareMigration: if no match return a proper error now + DiskOfferingVO diskOffering = _diskOfferingDao.findById(vol.getDiskOfferingId()); + if(diskOffering.equals(null)) { + throw new CloudRuntimeException("volume '" + vol.getUuid() +"', has no diskoffering. Migration target cannot be checked."); + } + if(! doesTargetStorageSupportDiskOffering(destPool, diskOffering)) { + throw new CloudRuntimeException("Migration target has no matching tags for volume '" +vol.getName() + "(" + vol.getUuid() + ")'"); + } + if (liveMigrateVolume && destPool.getClusterId() != null && srcClusterId != null) { if (!srcClusterId.equals(destPool.getClusterId())) { throw new InvalidParameterValueException("Cannot migrate a volume of a virtual machine to a storage pool in a different cluster"); @@ -2191,7 +2204,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic if ((destPool.isShared() && newDiskOffering.isUseLocalStorage()) || destPool.isLocal() && newDiskOffering.isShared()) { throw new InvalidParameterValueException("You cannot move the volume to a shared storage and assing a disk offering for local storage and vice versa."); } - if (!doesTargetStorageSupportNewDiskOffering(destPool, newDiskOffering)) { + if (!doesTargetStorageSupportDiskOffering(destPool, newDiskOffering)) { throw new InvalidParameterValueException(String.format("Target Storage [id=%s] tags [%s] does not match new disk offering [id=%s] tags [%s].", destPool.getUuid(), getStoragePoolTags(destPool), newDiskOffering.getUuid(), newDiskOffering.getTags())); } @@ -2236,9 +2249,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic * * */ - protected boolean doesTargetStorageSupportNewDiskOffering(StoragePool destPool, DiskOfferingVO newDiskOffering) { - String newDiskOfferingTags = newDiskOffering.getTags(); - return doesTargetStorageSupportDiskOffering(destPool, newDiskOfferingTags); + protected boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, DiskOfferingVO diskOffering) { + String targetStoreTags = diskOffering.getTags(); + return doesTargetStorageSupportDiskOffering(destPool, targetStoreTags); } @Override @@ -3350,4 +3363,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic return workJob; } + @Override + public String getConfigComponentName() { + return VolumeApiService.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[] {ConcurrentMigrationsThresholdPerDatastore}; + } } \ No newline at end of file diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index ebde735ef7c..68b45e1af7c 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -5065,12 +5065,18 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (vm.getType() != VirtualMachine.Type.User) { + // OffLineVmwareMigration: *WHY* ? throw new InvalidParameterValueException("can only do storage migration on user vm"); } List vols = _volsDao.findByInstance(vm.getId()); if (vols.size() > 1) { - throw new InvalidParameterValueException("Data disks attached to the vm, can not migrate. Need to detach data disks first"); + // OffLineVmwareMigration: data disks are not permitted, here! + if (vols.size() > 1 && + // OffLineVmwareMigration: allow multiple disks for vmware + !HypervisorType.VMware.equals(vm.getHypervisorType())) { + throw new InvalidParameterValueException("Data disks attached to the vm, can not migrate. Need to detach data disks first"); + } } // Check that Vm does not have VM Snapshots @@ -5078,6 +5084,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("VM's disk cannot be migrated, please remove all the VM Snapshots for this VM"); } + checkDestinationHypervisorType(destPool, vm); + + _itMgr.storageMigration(vm.getUuid(), destPool); + return _vmDao.findById(vm.getId()); + + } + + private void checkDestinationHypervisorType(StoragePool destPool, VMInstanceVO vm) { HypervisorType destHypervisorType = destPool.getHypervisor(); if (destHypervisorType == null) { destHypervisorType = _clusterDao.findById( @@ -5087,8 +5101,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (vm.getHypervisorType() != destHypervisorType && destHypervisorType != HypervisorType.Any) { throw new InvalidParameterValueException("hypervisor is not compatible: dest: " + destHypervisorType.toString() + ", vm: " + vm.getHypervisorType().toString()); } - _itMgr.storageMigration(vm.getUuid(), destPool); - return _vmDao.findById(vm.getId()); } @@ -5144,12 +5156,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported"); } - if (!vm.getHypervisorType().equals(HypervisorType.XenServer) && !vm.getHypervisorType().equals(HypervisorType.VMware) && !vm.getHypervisorType().equals(HypervisorType.KVM) - && !vm.getHypervisorType().equals(HypervisorType.Ovm) && !vm.getHypervisorType().equals(HypervisorType.Hyperv) - && !vm.getHypervisorType().equals(HypervisorType.LXC) && !vm.getHypervisorType().equals(HypervisorType.Simulator) - && !vm.getHypervisorType().equals(HypervisorType.Ovm3)) { + if (!isOnSupportedHypevisorForMigration(vm)) { if (s_logger.isDebugEnabled()) { - s_logger.debug(vm + " is not XenServer/VMware/KVM/Ovm/Hyperv, cannot migrate this VM."); + s_logger.debug(vm + " is not XenServer/VMware/KVM/Ovm/Hyperv, cannot migrate this VM form hypervisor type " + vm.getHypervisorType()); } throw new InvalidParameterValueException("Unsupported Hypervisor Type for VM migration, we support XenServer/VMware/KVM/Ovm/Hyperv/Ovm3 only"); } @@ -5227,6 +5236,17 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } + private boolean isOnSupportedHypevisorForMigration(VMInstanceVO vm) { + return (vm.getHypervisorType().equals(HypervisorType.XenServer) || + vm.getHypervisorType().equals(HypervisorType.VMware) || + vm.getHypervisorType().equals(HypervisorType.KVM) || + vm.getHypervisorType().equals(HypervisorType.Ovm) || + vm.getHypervisorType().equals(HypervisorType.Hyperv) || + vm.getHypervisorType().equals(HypervisorType.LXC) || + vm.getHypervisorType().equals(HypervisorType.Simulator) || + vm.getHypervisorType().equals(HypervisorType.Ovm3)); + } + private boolean checkIfHostIsDedicated(HostVO host) { long hostId = host.getId(); DedicatedResourceVO dedicatedHost = _dedicatedDao.findByHostId(hostId); @@ -5469,7 +5489,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Unable to find the vm by id " + vmId); } + // OfflineVmwareMigration: this would be it ;) if multiple paths exist: unify if (vm.getState() != State.Running) { + // OfflineVmwareMigration: and not vmware if (s_logger.isDebugEnabled()) { s_logger.debug("VM is not Running, unable to migrate the vm " + vm); } @@ -5482,6 +5504,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported"); } + // OfflineVmwareMigration: this condition is to complicated. (already a method somewhere) if (!vm.getHypervisorType().equals(HypervisorType.XenServer) && !vm.getHypervisorType().equals(HypervisorType.VMware) && !vm.getHypervisorType().equals(HypervisorType.KVM) && !vm.getHypervisorType().equals(HypervisorType.Ovm) && !vm.getHypervisorType().equals(HypervisorType.Hyperv) && !vm.getHypervisorType().equals(HypervisorType.Simulator)) { diff --git a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java index af727e42b92..693b437079b 100644 --- a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java +++ b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java @@ -1004,7 +1004,7 @@ public class VolumeApiServiceImplTest { StoragePool storagePoolMock = Mockito.mock(StoragePool.class); Mockito.doReturn("A").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock); - boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock); + boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock); Assert.assertFalse(result); } @@ -1017,7 +1017,7 @@ public class VolumeApiServiceImplTest { StoragePool storagePoolMock = Mockito.mock(StoragePool.class); Mockito.doReturn("A,B,C,D,X,Y").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock); - boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock); + boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock); Assert.assertTrue(result); } @@ -1030,7 +1030,7 @@ public class VolumeApiServiceImplTest { StoragePool storagePoolMock = Mockito.mock(StoragePool.class); Mockito.doReturn("A,B,C,D,X,Y").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock); - boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock); + boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock); Assert.assertTrue(result); } @@ -1043,7 +1043,7 @@ public class VolumeApiServiceImplTest { StoragePool storagePoolMock = Mockito.mock(StoragePool.class); Mockito.doReturn("").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock); - boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock); + boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock); Assert.assertFalse(result); } @@ -1056,7 +1056,7 @@ public class VolumeApiServiceImplTest { StoragePool storagePoolMock = Mockito.mock(StoragePool.class); Mockito.doReturn("").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock); - boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock); + boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock); Assert.assertTrue(result); } @@ -1069,7 +1069,7 @@ public class VolumeApiServiceImplTest { StoragePool storagePoolMock = Mockito.mock(StoragePool.class); Mockito.doReturn("C,D").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock); - boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock); + boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock); Assert.assertFalse(result); } @@ -1082,7 +1082,7 @@ public class VolumeApiServiceImplTest { StoragePool storagePoolMock = Mockito.mock(StoragePool.class); Mockito.doReturn("A").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock); - boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock); + boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock); Assert.assertTrue(result); } diff --git a/test/integration/smoke/test_primary_storage.py b/test/integration/smoke/test_primary_storage.py index 707d0b95fcd..d397c773b12 100644 --- a/test/integration/smoke/test_primary_storage.py +++ b/test/integration/smoke/test_primary_storage.py @@ -16,21 +16,19 @@ # under the License. """ BVT tests for Primary Storage """ -#Import Local Modules -import marvin + +# Import System modules +# Import Local Modules from marvin.cloudstackTestCase import * -from marvin.cloudstackAPI import * -from marvin.lib.utils import * from marvin.lib.base import * from marvin.lib.common import * -from nose.plugins.attrib import attr -import logging from marvin.lib.decoratorGenerators import skipTestIf +from marvin.lib.utils import * +from nose.plugins.attrib import attr -#Import System modules -import time _multiprocess_shared_ = True + class TestPrimaryStorageServices(cloudstackTestCase): def setUp(self): @@ -49,14 +47,14 @@ class TestPrimaryStorageServices(cloudstackTestCase): def tearDown(self): try: - #Clean up, terminate the created templates + # Clean up, terminate the created templates cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return - @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + @attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") def test_01_primary_storage_nfs(self): """Test primary storage pools - XEN, KVM, VMWare. Not Supported for hyperv """ @@ -64,39 +62,36 @@ class TestPrimaryStorageServices(cloudstackTestCase): if self.hypervisor.lower() in ["hyperv"]: raise self.skipTest("NFS primary storage not supported for Hyper-V") - # Validate the following: # 1. List Clusters # 2. verify that the cluster is in 'Enabled' allocation state # 3. verify that the host is added successfully and # in Up state with listHosts api response - #Create NFS storage pools with on XEN/KVM/VMWare clusters - + # Create NFS storage pools with on XEN/KVM/VMWare clusters clusters = list_clusters( self.apiclient, zoneid=self.zone.id ) - assert isinstance(clusters,list) and len(clusters)>0 + assert isinstance(clusters, list) and len(clusters) > 0 for cluster in clusters: - - #Host should be present before adding primary storage + # Host should be present before adding primary storage list_hosts_response = list_hosts( - self.apiclient, - clusterid=cluster.id - ) + self.apiclient, + clusterid=cluster.id + ) self.assertEqual( - isinstance(list_hosts_response, list), - True, - "Check list response returns a valid list" - ) + isinstance(list_hosts_response, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - len(list_hosts_response), - 0, - "Check list Hosts in the cluster: " + cluster.name - ) + len(list_hosts_response), + 0, + "Check list Hosts in the cluster: " + cluster.name + ) storage = StoragePool.create(self.apiclient, self.services["nfs"], @@ -112,53 +107,52 @@ class TestPrimaryStorageServices(cloudstackTestCase): storage.state, 'Up', "Check primary storage state " - ) + ) self.assertEqual( storage.type, 'NetworkFilesystem', "Check storage pool type " - ) + ) - #Verify List Storage pool Response has newly added storage pool + # Verify List Storage pool Response has newly added storage pool storage_pools_response = list_storage_pools( - self.apiclient, - id=storage.id, - ) + self.apiclient, + id=storage.id, + ) self.assertEqual( - isinstance(storage_pools_response, list), - True, - "Check list response returns a valid list" - ) + isinstance(storage_pools_response, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - len(storage_pools_response), - 0, - "Check list Hosts response" - ) + len(storage_pools_response), + 0, + "Check list Hosts response" + ) storage_response = storage_pools_response[0] self.assertEqual( - storage_response.id, - storage.id, - "Check storage pool ID" - ) + storage_response.id, + storage.id, + "Check storage pool ID" + ) self.assertEqual( - storage.type, - storage_response.type, - "Check storage pool type " - ) + storage.type, + storage_response.type, + "Check storage pool type " + ) # Call cleanup for reusing primary storage cleanup_resources(self.apiclient, self.cleanup) self.cleanup = [] return - - @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true") + @attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true") def test_01_primary_storage_iscsi(self): """Test primary storage pools - XEN. Not Supported for kvm,hyperv,vmware """ - if self.hypervisor.lower() in ["kvm","hyperv", "vmware", "lxc"]: + if self.hypervisor.lower() in ["kvm", "hyperv", "vmware", "lxc"]: raise self.skipTest("iscsi primary storage not supported on kvm, VMWare, Hyper-V, or LXC") if not self.services["configurableData"]["iscsi"]["url"]: @@ -175,26 +169,24 @@ class TestPrimaryStorageServices(cloudstackTestCase): self.apiclient, zoneid=self.zone.id ) - assert isinstance(clusters,list) and len(clusters)>0 + assert isinstance(clusters, list) and len(clusters) > 0 for cluster in clusters: - - #Host should be present before adding primary storage + # Host should be present before adding primary storage list_hosts_response = list_hosts( - self.apiclient, - clusterid=cluster.id - ) + self.apiclient, + clusterid=cluster.id + ) self.assertEqual( - isinstance(list_hosts_response, list), - True, - "Check list response returns a valid list" - ) + isinstance(list_hosts_response, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - len(list_hosts_response), - 0, - "Check list Hosts in the cluster: " + cluster.name - ) - + len(list_hosts_response), + 0, + "Check list Hosts in the cluster: " + cluster.name + ) storage = StoragePool.create(self.apiclient, self.services["configurableData"]["iscsi"], @@ -210,58 +202,58 @@ class TestPrimaryStorageServices(cloudstackTestCase): storage.state, 'Up', "Check primary storage state " - ) + ) self.assertEqual( storage.type, 'IscsiLUN', "Check storage pool type " - ) + ) - #Verify List Storage pool Response has newly added storage pool + # Verify List Storage pool Response has newly added storage pool storage_pools_response = list_storage_pools( - self.apiclient, - id=storage.id, - ) + self.apiclient, + id=storage.id, + ) self.assertEqual( - isinstance(storage_pools_response, list), - True, - "Check list response returns a valid list" - ) + isinstance(storage_pools_response, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - len(storage_pools_response), - 0, - "Check list Hosts response" - ) + len(storage_pools_response), + 0, + "Check list Hosts response" + ) storage_response = storage_pools_response[0] self.assertEqual( - storage_response.id, - storage.id, - "Check storage pool ID" - ) + storage_response.id, + storage.id, + "Check storage pool ID" + ) self.assertEqual( - storage.type, - storage_response.type, - "Check storage pool type " - ) + storage.type, + storage_response.type, + "Check storage pool type " + ) # Call cleanup for reusing primary storage cleanup_resources(self.apiclient, self.cleanup) self.cleanup = [] return - @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + @attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") def test_01_add_primary_storage_disabled_host(self): """Test add primary storage pool with disabled host """ - #Disable a host + # Disable a host clusters = list_clusters( self.apiclient, zoneid=self.zone.id ) - assert isinstance(clusters,list) and len(clusters)>0 + assert isinstance(clusters, list) and len(clusters) > 0 for cluster in clusters: list_hosts_response = list_hosts( @@ -269,15 +261,14 @@ class TestPrimaryStorageServices(cloudstackTestCase): clusterid=cluster.id, type="Routing" ) - assert isinstance(list_hosts_response,list) + assert isinstance(list_hosts_response, list) if len(list_hosts_response) < 2: continue selected_cluster = cluster selected_host = list_hosts_response[0] Host.update(self.apiclient, id=selected_host.id, allocationstate="Disable") - - #create a pool + # create a pool storage_pool_2 = StoragePool.create( self.apiclient, self.services["nfs2"], @@ -285,24 +276,23 @@ class TestPrimaryStorageServices(cloudstackTestCase): zoneid=self.zone.id, podid=self.pod.id ) - #self.cleanup.append(storage_pool_2) + # self.cleanup.append(storage_pool_2) - #Enable host and disable others + # Enable host and disable others Host.update(self.apiclient, id=selected_host.id, allocationstate="Enable") - for host in list_hosts_response : - if(host.id == selected_host.id) : + for host in list_hosts_response: + if (host.id == selected_host.id): continue Host.update(self.apiclient, id=host.id, allocationstate="Disable") - - #put other pools in maintenance - storage_pool_list = StoragePool.list(self.apiclient, zoneid = self.zone.id) - for pool in storage_pool_list : - if(pool.id == storage_pool_2.id) : + # put other pools in maintenance + storage_pool_list = StoragePool.list(self.apiclient, zoneid=self.zone.id) + for pool in storage_pool_list: + if (pool.id == storage_pool_2.id): continue - StoragePool.update(self.apiclient,id=pool.id, enabled=False) + StoragePool.update(self.apiclient, id=pool.id, enabled=False) - #deployvm + # deployvm try: # Create Account account = Account.create( @@ -329,20 +319,20 @@ class TestPrimaryStorageServices(cloudstackTestCase): self.cleanup.append(self.virtual_machine) self.cleanup.append(account) finally: - #cancel maintenance - for pool in storage_pool_list : - if(pool.id == storage_pool_2.id) : + # cancel maintenance + for pool in storage_pool_list: + if (pool.id == storage_pool_2.id): continue - StoragePool.update(self.apiclient,id=pool.id, enabled=True) - #Enable all hosts - for host in list_hosts_response : - if(host.id == selected_host.id) : + StoragePool.update(self.apiclient, id=pool.id, enabled=True) + # Enable all hosts + for host in list_hosts_response: + if (host.id == selected_host.id): continue Host.update(self.apiclient, id=host.id, allocationstate="Enable") cleanup_resources(self.apiclient, self.cleanup) self.cleanup = [] - StoragePool.enableMaintenance(self.apiclient,storage_pool_2.id) + StoragePool.enableMaintenance(self.apiclient, storage_pool_2.id) time.sleep(30); cmd = deleteStoragePool.deleteStoragePoolCmd() cmd.id = storage_pool_2.id @@ -355,21 +345,23 @@ class TestPrimaryStorageServices(cloudstackTestCase): class StorageTagsServices: """Test Storage Tags Data Class. """ + def __init__(self): self.storage_tags = { - "a" : "NFS-A", - "b" : "NFS-B" + "a": "NFS-A", + "b": "NFS-B" } - + + class TestStorageTags(cloudstackTestCase): - + @classmethod def setUpClass(cls): cls.logger = logging.getLogger('TestStorageTags') cls.stream_handler = logging.StreamHandler() cls.logger.setLevel(logging.DEBUG) cls.logger.addHandler(cls.stream_handler) - + test_case = super(TestStorageTags, cls) testClient = test_case.getClsTestClient() cls.config = test_case.getClsConfig() @@ -383,36 +375,35 @@ class TestStorageTags(cloudstackTestCase): cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id cls.services["storage_tags"] = StorageTagsServices().storage_tags - + cls.hypervisorNotSupported = False if cls.hypervisor.lower() in ["hyperv"]: cls.hypervisorNotSupported = True cls._cleanup = [] - + if not cls.hypervisorNotSupported: - cls.clusters = list_clusters( cls.apiclient, zoneid=cls.zone.id ) assert isinstance(cls.clusters, list) and len(cls.clusters) > 0 - + # Create PS with Storage Tag cls.storage_pool_1 = StoragePool.create(cls.apiclient, - cls.services["nfs"], - clusterid=cls.clusters[0].id, - zoneid=cls.zone.id, - podid=cls.pod.id, - tags=cls.services["storage_tags"]["a"] - ) - #PS not appended to _cleanup, it is removed on tearDownClass before cleaning up resources + cls.services["nfs"], + clusterid=cls.clusters[0].id, + zoneid=cls.zone.id, + podid=cls.pod.id, + tags=cls.services["storage_tags"]["a"] + ) + # PS not appended to _cleanup, it is removed on tearDownClass before cleaning up resources assert cls.storage_pool_1.state == 'Up' storage_pools_response = list_storage_pools(cls.apiclient, id=cls.storage_pool_1.id) assert isinstance(storage_pools_response, list) and len(storage_pools_response) > 0 storage_response = storage_pools_response[0] assert storage_response.id == cls.storage_pool_1.id and storage_response.type == cls.storage_pool_1.type - + # Create Service Offerings with different Storage Tags cls.service_offering_1 = ServiceOffering.create( cls.apiclient, @@ -426,7 +417,7 @@ class TestStorageTags(cloudstackTestCase): tags=cls.services["storage_tags"]["b"] ) cls._cleanup.append(cls.service_offering_2) - + # Create Disk Offerings with different Storage Tags cls.disk_offering_1 = DiskOffering.create( cls.apiclient, @@ -440,7 +431,7 @@ class TestStorageTags(cloudstackTestCase): tags=cls.services["storage_tags"]["b"] ) cls._cleanup.append(cls.disk_offering_2) - + # Create Account cls.account = Account.create( cls.apiclient, @@ -448,7 +439,7 @@ class TestStorageTags(cloudstackTestCase): domainid=cls.domain.id ) cls._cleanup.append(cls.account) - + # Create VM-1 with using Service Offering 1 cls.virtual_machine_1 = VirtualMachine.create( cls.apiclient, @@ -461,9 +452,9 @@ class TestStorageTags(cloudstackTestCase): mode=cls.zone.networktype ) # VM-1 not appended to _cleanup, it is expunged on tearDownClass before cleaning up resources - + return - + @classmethod def tearDownClass(cls): try: @@ -484,7 +475,7 @@ class TestStorageTags(cloudstackTestCase): cleanup_resources(cls.apiclient, cls._cleanup) except Exception as e: raise Exception("Cleanup failed with %s" % e) - + def setUp(self): self.dbclient = self.testClient.getDbConnection() self.cleanup = [] @@ -495,16 +486,16 @@ class TestStorageTags(cloudstackTestCase): cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) - + @attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") @skipTestIf("hypervisorNotSupported") def test_01_deploy_vms_storage_tags(self): """Test Deploy VMS using different Service Offerings with Storage Tags """ - + # Save cleanup size before trying to deploy VM-2 cleanup_size = len(self.cleanup) - + # Try deploying VM-2 using CO-2 -> Should fail to find storage and fail deployment try: self.virtual_machine_2 = VirtualMachine.create( @@ -519,32 +510,32 @@ class TestStorageTags(cloudstackTestCase): self.cleanup.append(self.virtual_machine_2) except Exception as e: self.debug("Expected exception %s: " % e) - + self.debug("Asssert that vm2 was not deployed, so it couldn't be appended to cleanup") self.assertEquals(cleanup_size, len(self.cleanup)) - + # Create V-1 using DO-1 self.volume_1 = Volume.create( - self.apiclient, - self.services, - zoneid=self.zone.id, - account=self.account.name, - domainid=self.account.domainid, - diskofferingid=self.disk_offering_1.id + self.apiclient, + self.services, + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid, + diskofferingid=self.disk_offering_1.id ) self.cleanup.append(self.volume_1) - + # Create V-2 using DO-2 self.volume_2 = Volume.create( - self.apiclient, - self.services, - zoneid=self.zone.id, - account=self.account.name, - domainid=self.account.domainid, - diskofferingid=self.disk_offering_2.id + self.apiclient, + self.services, + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid, + diskofferingid=self.disk_offering_2.id ) self.cleanup.append(self.volume_2) - + # Try attaching V-2 to VM-1 -> Should fail finding storage and fail attachment try: self.virtual_machine_1.attach_volume( @@ -553,7 +544,7 @@ class TestStorageTags(cloudstackTestCase): ) except Exception as e: self.debug("Expected exception %s: " % e) - + vm_1_volumes = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine_1.id, @@ -562,9 +553,9 @@ class TestStorageTags(cloudstackTestCase): ) self.debug("VM-1 Volumes: %s" % vm_1_volumes) self.assertEquals(None, vm_1_volumes, "Check that volume V-2 has not been attached to VM-1") - + # Attach V_1 to VM_1 - self.virtual_machine_1.attach_volume(self.apiclient,self.volume_1) + self.virtual_machine_1.attach_volume(self.apiclient, self.volume_1) vm_1_volumes = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine_1.id, @@ -574,22 +565,22 @@ class TestStorageTags(cloudstackTestCase): self.debug("VM-1 Volumes: %s" % vm_1_volumes) self.assertEquals(vm_1_volumes[0].id, self.volume_1.id, "Check that volume V-1 has been attached to VM-1") self.virtual_machine_1.detach_volume(self.apiclient, self.volume_1) - + return - + def check_storage_pool_tag(self, poolid, tag): cmd = listStorageTags.listStorageTagsCmd() storage_tags_response = self.apiclient.listStorageTags(cmd) pool_tags = filter(lambda x: x.poolid == poolid, storage_tags_response) self.assertEquals(1, len(pool_tags), "Check storage tags size") self.assertEquals(tag, pool_tags[0].name, "Check storage tag on storage pool") - + @attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") @skipTestIf("hypervisorNotSupported") def test_02_edit_primary_storage_tags(self): """ Test Edit Storage Tags """ - + qresultset = self.dbclient.execute( "select id from storage_pool where uuid = '%s';" % str(self.storage_pool_1.id) @@ -597,43 +588,43 @@ class TestStorageTags(cloudstackTestCase): self.assertEquals(1, len(qresultset), "Check DB Query result set") qresult = qresultset[0] storage_pool_db_id = qresult[0] - + self.check_storage_pool_tag(storage_pool_db_id, self.services["storage_tags"]["a"]) - + # Update Storage Tag StoragePool.update( self.apiclient, id=self.storage_pool_1.id, tags=self.services["storage_tags"]["b"] ) - + self.check_storage_pool_tag(storage_pool_db_id, self.services["storage_tags"]["b"]) - + # Revert Storage Tag StoragePool.update( self.apiclient, id=self.storage_pool_1.id, tags=self.services["storage_tags"]["a"] ) - + self.check_storage_pool_tag(storage_pool_db_id, self.services["storage_tags"]["a"]) - + return - + @attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") @skipTestIf("hypervisorNotSupported") def test_03_migration_options_storage_tags(self): """ Test Volume migration options for Storage Pools with different Storage Tags """ - + # Create PS-2 using Storage Tag storage_pool_2 = StoragePool.create(self.apiclient, - self.services["nfs2"], - clusterid=self.clusters[0].id, - zoneid=self.zone.id, - podid=self.pod.id, - tags=self.services["storage_tags"]["a"] - ) + self.services["nfs2"], + clusterid=self.clusters[0].id, + zoneid=self.zone.id, + podid=self.pod.id, + tags=self.services["storage_tags"]["a"] + ) self.cleanup.append(storage_pool_2) assert storage_pool_2.state == 'Up' storage_pools_response = list_storage_pools(self.apiclient, @@ -641,7 +632,7 @@ class TestStorageTags(cloudstackTestCase): assert isinstance(storage_pools_response, list) and len(storage_pools_response) > 0 storage_response = storage_pools_response[0] assert storage_response.id == storage_pool_2.id and storage_response.type == storage_pool_2.type - + vm_1_volumes = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine_1.id, @@ -667,27 +658,27 @@ class TestStorageTags(cloudstackTestCase): self.apiclient, id=vol.id ) - pools_suitable = filter(lambda p : p.suitableformigration, pools_response) - + pools_suitable = filter(lambda p: p.suitableformigration, pools_response) + self.debug("Suitable storage pools found: %s" % len(pools_suitable)) self.assertEquals(1, len(pools_suitable), "Check that there is only one item on the list") self.assertEquals(pools_suitable[0].id, storage_pool_2.id, "Check that PS-2 is the migration option for volume") - + # Update PS-2 Storage Tags StoragePool.update( self.apiclient, id=storage_pool_2.id, tags=self.services["storage_tags"]["b"] ) - + # Check migration options for volume after updating PS-2 Storage Tags pools_response = StoragePool.listForMigration( self.apiclient, id=vol.id ) - pools_suitable = filter(lambda p : p.suitableformigration, pools_response) - + pools_suitable = filter(lambda p: p.suitableformigration, pools_response) + self.debug("Suitable storage pools found: %s" % len(pools_suitable)) self.assertEquals(0, len(pools_suitable), "Check that there is no migration option for volume") - - return \ No newline at end of file + + return diff --git a/test/integration/smoke/test_vm_life_cycle.py b/test/integration/smoke/test_vm_life_cycle.py index 3ba43ac6245..32e917444c0 100644 --- a/test/integration/smoke/test_vm_life_cycle.py +++ b/test/integration/smoke/test_vm_life_cycle.py @@ -16,16 +16,18 @@ # under the License. """ BVT tests for Virtual Machine Life Cycle """ -#Import Local Modules +# Import Local Modules from marvin.cloudstackTestCase import cloudstackTestCase from marvin.cloudstackAPI import (recoverVirtualMachine, destroyVirtualMachine, attachIso, detachIso, provisionCertificate, - updateConfiguration) -from marvin.lib.utils import * - + updateConfiguration, + migrateVirtualMachine) +from marvin.lib.utils import (cleanup_resources, + validateList, + SshClient) from marvin.lib.base import (Account, ServiceOffering, VirtualMachine, @@ -33,19 +35,21 @@ from marvin.lib.base import (Account, Iso, Router, Configurations, + StoragePool, Volume, DiskOffering) from marvin.lib.common import (get_domain, - get_zone, - get_template, + get_zone, + get_template, list_hosts) from marvin.codes import FAILED, PASS from nose.plugins.attrib import attr -#Import System modules +# Import System modules import time -import re _multiprocess_shared_ = True + + class TestDeployVM(cloudstackTestCase): @classmethod @@ -59,8 +63,8 @@ class TestDeployVM(cloudstackTestCase): cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype - #If local storage is enabled, alter the offerings to use localstorage - #this step is needed for devcloud + # If local storage is enabled, alter the offerings to use localstorage + # this step is needed for devcloud if cls.zone.localstorageenabled == True: cls.services["service_offerings"]["tiny"]["storagetype"] = 'local' cls.services["service_offerings"]["small"]["storagetype"] = 'local' @@ -118,8 +122,7 @@ class TestDeployVM(cloudstackTestCase): self.dbclient = self.testClient.getDbConnection() self.cleanup = [] - - @attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") def test_deploy_vm(self): """Test Deploy Virtual Machine """ @@ -127,47 +130,46 @@ class TestDeployVM(cloudstackTestCase): # 1. Virtual Machine is accessible via SSH # 2. listVirtualMachines returns accurate information list_vm_response = VirtualMachine.list( - self.apiclient, - id=self.virtual_machine.id - ) + self.apiclient, + id=self.virtual_machine.id + ) self.debug( - "Verify listVirtualMachines response for virtual machine: %s" \ - % self.virtual_machine.id - ) + "Verify listVirtualMachines response for virtual machine: %s" \ + % self.virtual_machine.id + ) self.assertEqual( - isinstance(list_vm_response, list), - True, - "Check list response returns a valid list" - ) + isinstance(list_vm_response, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - len(list_vm_response), - 0, - "Check VM available in List Virtual Machines" - ) + len(list_vm_response), + 0, + "Check VM available in List Virtual Machines" + ) vm_response = list_vm_response[0] self.assertEqual( - vm_response.id, - self.virtual_machine.id, - "Check virtual machine id in listVirtualMachines" - ) + vm_response.id, + self.virtual_machine.id, + "Check virtual machine id in listVirtualMachines" + ) self.assertEqual( - vm_response.name, - self.virtual_machine.name, - "Check virtual machine name in listVirtualMachines" - ) + vm_response.name, + self.virtual_machine.name, + "Check virtual machine name in listVirtualMachines" + ) self.assertEqual( vm_response.state, 'Running', - msg="VM is not in Running state" + msg="VM is not in Running state" ) return - - @attr(tags = ["advanced"], required_hardware="false") + @attr(tags=["advanced"], required_hardware="false") def test_advZoneVirtualRouter(self): - #TODO: SIMENH: duplicate test, remove it + # TODO: SIMENH: duplicate test, remove it """ Test advanced zone virtual router 1. Is Running @@ -176,21 +178,20 @@ class TestDeployVM(cloudstackTestCase): @return: """ routers = Router.list(self.apiclient, account=self.account.name) - self.assertTrue(len(routers) > 0, msg = "No virtual router found") + self.assertTrue(len(routers) > 0, msg="No virtual router found") router = routers[0] self.assertEqual(router.state, 'Running', msg="Router is not in running state") self.assertEqual(router.account, self.account.name, msg="Router does not belong to the account") - #Has linklocal, public and guest ips + # Has linklocal, public and guest ips self.assertIsNotNone(router.linklocalip, msg="Router has no linklocal ip") self.assertIsNotNone(router.publicip, msg="Router has no public ip") self.assertIsNotNone(router.guestipaddress, msg="Router has no guest ip") - - @attr(mode = ["basic"], required_hardware="false") + @attr(mode=["basic"], required_hardware="false") def test_basicZoneVirtualRouter(self): - #TODO: SIMENH: duplicate test, remove it + # TODO: SIMENH: duplicate test, remove it """ Tests for basic zone virtual router 1. Is Running @@ -198,13 +199,13 @@ class TestDeployVM(cloudstackTestCase): @return: """ routers = Router.list(self.apiclient, account=self.account.name) - self.assertTrue(len(routers) > 0, msg = "No virtual router found") + self.assertTrue(len(routers) > 0, msg="No virtual router found") router = routers[0] self.assertEqual(router.state, 'Running', msg="Router is not in running state") self.assertEqual(router.account, self.account.name, msg="Router does not belong to the account") - @attr(tags = ['advanced','basic','sg'], required_hardware="false") + @attr(tags=['advanced', 'basic', 'sg'], required_hardware="false") def test_deploy_vm_multiple(self): """Test Multiple Deploy Virtual Machine @@ -236,7 +237,8 @@ class TestDeployVM(cloudstackTestCase): list_vms = VirtualMachine.list(self.apiclient, ids=[virtual_machine1.id, virtual_machine2.id], listAll=True) self.debug( - "Verify listVirtualMachines response for virtual machines: %s, %s" % (virtual_machine1.id, virtual_machine2.id) + "Verify listVirtualMachines response for virtual machines: %s, %s" % ( + virtual_machine1.id, virtual_machine2.id) ) self.assertEqual( isinstance(list_vms, list), @@ -271,18 +273,18 @@ class TestVMLifeCycle(cloudstackTestCase): cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype - #if local storage is enabled, alter the offerings to use localstorage - #this step is needed for devcloud + # if local storage is enabled, alter the offerings to use localstorage + # this step is needed for devcloud if cls.zone.localstorageenabled == True: cls.services["service_offerings"]["tiny"]["storagetype"] = 'local' cls.services["service_offerings"]["small"]["storagetype"] = 'local' cls.services["service_offerings"]["medium"]["storagetype"] = 'local' template = get_template( - cls.apiclient, - cls.zone.id, - cls.services["ostype"] - ) + cls.apiclient, + cls.zone.id, + cls.services["ostype"] + ) if template == FAILED: assert False, "get_template() failed to return template with description %s" % cls.services["ostype"] @@ -294,50 +296,50 @@ class TestVMLifeCycle(cloudstackTestCase): # Create VMs, NAT Rules etc cls.account = Account.create( - cls.apiclient, - cls.services["account"], - domainid=domain.id - ) + cls.apiclient, + cls.services["account"], + domainid=domain.id + ) cls.small_offering = ServiceOffering.create( - cls.apiclient, - cls.services["service_offerings"]["small"] - ) + cls.apiclient, + cls.services["service_offerings"]["small"] + ) cls.medium_offering = ServiceOffering.create( - cls.apiclient, - cls.services["service_offerings"]["medium"] - ) - #create small and large virtual machines + cls.apiclient, + cls.services["service_offerings"]["medium"] + ) + # create small and large virtual machines cls.small_virtual_machine = VirtualMachine.create( - cls.apiclient, - cls.services["small"], - accountid=cls.account.name, - domainid=cls.account.domainid, - serviceofferingid=cls.small_offering.id, - mode=cls.services["mode"] - ) + cls.apiclient, + cls.services["small"], + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.small_offering.id, + mode=cls.services["mode"] + ) cls.medium_virtual_machine = VirtualMachine.create( - cls.apiclient, - cls.services["small"], - accountid=cls.account.name, - domainid=cls.account.domainid, - serviceofferingid=cls.medium_offering.id, - mode=cls.services["mode"] - ) + cls.apiclient, + cls.services["small"], + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.medium_offering.id, + mode=cls.services["mode"] + ) cls.virtual_machine = VirtualMachine.create( - cls.apiclient, - cls.services["small"], - accountid=cls.account.name, - domainid=cls.account.domainid, - serviceofferingid=cls.small_offering.id, - mode=cls.services["mode"] - ) + cls.apiclient, + cls.services["small"], + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.small_offering.id, + mode=cls.services["mode"] + ) cls._cleanup = [ - cls.small_offering, - cls.medium_offering, - cls.account - ] + cls.small_offering, + cls.medium_offering, + cls.account + ] @classmethod def tearDownClass(cls): @@ -355,14 +357,13 @@ class TestVMLifeCycle(cloudstackTestCase): def tearDown(self): try: - #Clean up, terminate the created ISOs + # Clean up, terminate the created ISOs cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return - - @attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") def test_01_stop_vm(self): """Test Stop Virtual Machine """ @@ -377,8 +378,7 @@ class TestVMLifeCycle(cloudstackTestCase): self.fail("Failed to stop VM: %s" % e) return - - @attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") def test_01_stop_vm_forced(self): """Test Force Stop Virtual Machine """ @@ -388,30 +388,29 @@ class TestVMLifeCycle(cloudstackTestCase): self.fail("Failed to stop VM: %s" % e) list_vm_response = VirtualMachine.list( - self.apiclient, - id=self.small_virtual_machine.id - ) + self.apiclient, + id=self.small_virtual_machine.id + ) self.assertEqual( - isinstance(list_vm_response, list), - True, - "Check list response returns a valid list" - ) + isinstance(list_vm_response, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - len(list_vm_response), - 0, - "Check VM avaliable in List Virtual Machines" - ) + len(list_vm_response), + 0, + "Check VM avaliable in List Virtual Machines" + ) self.assertEqual( - list_vm_response[0].state, - "Stopped", - "Check virtual machine is in stopped state" - ) + list_vm_response[0].state, + "Stopped", + "Check virtual machine is in stopped state" + ) return - - @attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") def test_02_start_vm(self): """Test Start Virtual Machine """ @@ -423,33 +422,33 @@ class TestVMLifeCycle(cloudstackTestCase): self.small_virtual_machine.start(self.apiclient) list_vm_response = VirtualMachine.list( - self.apiclient, - id=self.small_virtual_machine.id - ) + self.apiclient, + id=self.small_virtual_machine.id + ) self.assertEqual( - isinstance(list_vm_response, list), - True, - "Check list response returns a valid list" - ) + isinstance(list_vm_response, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - len(list_vm_response), - 0, - "Check VM avaliable in List Virtual Machines" - ) + len(list_vm_response), + 0, + "Check VM avaliable in List Virtual Machines" + ) self.debug( - "Verify listVirtualMachines response for virtual machine: %s" \ - % self.small_virtual_machine.id - ) + "Verify listVirtualMachines response for virtual machine: %s" \ + % self.small_virtual_machine.id + ) self.assertEqual( - list_vm_response[0].state, - "Running", - "Check virtual machine is in running state" - ) + list_vm_response[0].state, + "Running", + "Check virtual machine is in running state" + ) return - @attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") def test_03_reboot_vm(self): """Test Reboot Virtual Machine """ @@ -463,30 +462,29 @@ class TestVMLifeCycle(cloudstackTestCase): self.small_virtual_machine.reboot(self.apiclient) list_vm_response = VirtualMachine.list( - self.apiclient, - id=self.small_virtual_machine.id - ) + self.apiclient, + id=self.small_virtual_machine.id + ) self.assertEqual( - isinstance(list_vm_response, list), - True, - "Check list response returns a valid list" - ) + isinstance(list_vm_response, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - len(list_vm_response), - 0, - "Check VM avaliable in List Virtual Machines" - ) + len(list_vm_response), + 0, + "Check VM available in List Virtual Machines" + ) self.assertEqual( - list_vm_response[0].state, - "Running", - "Check virtual machine is in running state" - ) + list_vm_response[0].state, + "Running", + "Check virtual machine is in running state" + ) return - - @attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") def test_06_destroy_vm(self): """Test destroy Virtual Machine """ @@ -500,31 +498,31 @@ class TestVMLifeCycle(cloudstackTestCase): self.small_virtual_machine.delete(self.apiclient, expunge=False) list_vm_response = VirtualMachine.list( - self.apiclient, - id=self.small_virtual_machine.id - ) + self.apiclient, + id=self.small_virtual_machine.id + ) self.assertEqual( - isinstance(list_vm_response, list), - True, - "Check list response returns a valid list" - ) + isinstance(list_vm_response, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - len(list_vm_response), - 0, - "Check VM avaliable in List Virtual Machines" - ) + len(list_vm_response), + 0, + "Check VM avaliable in List Virtual Machines" + ) self.assertEqual( - list_vm_response[0].state, - "Destroyed", - "Check virtual machine is in destroyed state" - ) + list_vm_response[0].state, + "Destroyed", + "Check virtual machine is in destroyed state" + ) return - @attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") def test_07_restore_vm(self): - #TODO: SIMENH: add another test the data on the restored VM. + # TODO: SIMENH: add another test the data on the restored VM. """Test recover Virtual Machine """ @@ -540,30 +538,30 @@ class TestVMLifeCycle(cloudstackTestCase): self.apiclient.recoverVirtualMachine(cmd) list_vm_response = VirtualMachine.list( - self.apiclient, - id=self.small_virtual_machine.id - ) + self.apiclient, + id=self.small_virtual_machine.id + ) self.assertEqual( - isinstance(list_vm_response, list), - True, - "Check list response returns a valid list" - ) + isinstance(list_vm_response, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - len(list_vm_response), - 0, - "Check VM avaliable in List Virtual Machines" - ) + len(list_vm_response), + 0, + "Check VM avaliable in List Virtual Machines" + ) self.assertEqual( - list_vm_response[0].state, - "Stopped", - "Check virtual machine is in Stopped state" - ) + list_vm_response[0].state, + "Stopped", + "Check virtual machine is in Stopped state" + ) return - @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg", "multihost"], required_hardware="false") + @attr(tags=["advanced", "advancedns", "smoke", "basic", "sg", "multihost"], required_hardware="false") def test_08_migrate_vm(self): """Test migrate VM """ @@ -591,10 +589,10 @@ class TestVMLifeCycle(cloudstackTestCase): # For XenServer and VMware, migration is possible between hosts belonging to different clusters # with the help of XenMotion and Vmotion respectively. - if self.hypervisor.lower() in ["kvm","simulator"]: - #identify suitable host + if self.hypervisor.lower() in ["kvm", "simulator"]: + # identify suitable host clusters = [h.clusterid for h in hosts] - #find hosts withe same clusterid + # find hosts withe same clusterid clusters = [cluster for index, cluster in enumerate(clusters) if clusters.count(cluster) > 1] if len(clusters) <= 1: @@ -607,8 +605,8 @@ class TestVMLifeCycle(cloudstackTestCase): target_host = suitable_hosts[0] migrate_host = suitable_hosts[1] - #deploy VM on target host - self.vm_to_migrate = VirtualMachine.create( + # deploy VM on target host + vm_to_migrate = VirtualMachine.create( self.apiclient, self.services["small"], accountid=self.account.name, @@ -618,30 +616,30 @@ class TestVMLifeCycle(cloudstackTestCase): hostid=target_host.id ) self.debug("Migrating VM-ID: %s to Host: %s" % ( - self.vm_to_migrate.id, - migrate_host.id - )) + vm_to_migrate.id, + migrate_host.id + )) - self.vm_to_migrate.migrate(self.apiclient, migrate_host.id) + vm_to_migrate.migrate(self.apiclient, migrate_host.id) retries_cnt = 3 - while retries_cnt >=0: + while retries_cnt >= 0: list_vm_response = VirtualMachine.list(self.apiclient, - id=self.vm_to_migrate.id) + id=vm_to_migrate.id) self.assertNotEqual( - list_vm_response, - None, - "Check virtual machine is listed" - ) + list_vm_response, + None, + "Check virtual machine is listed" + ) vm_response = list_vm_response[0] - self.assertEqual(vm_response.id,self.vm_to_migrate.id,"Check virtual machine ID of migrated VM") - self.assertEqual(vm_response.hostid,migrate_host.id,"Check destination hostID of migrated VM") + self.assertEqual(vm_response.id, vm_to_migrate.id, "Check virtual machine ID of migrated VM") + self.assertEqual(vm_response.hostid, migrate_host.id, "Check destination hostID of migrated VM") retries_cnt = retries_cnt - 1 return - @attr(configuration = "expunge.interval") - @attr(configuration = "expunge.delay") - @attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + @attr(configuration="expunge.interval") + @attr(configuration="expunge.delay") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") def test_09_expunge_vm(self): """Test destroy(expunge) Virtual Machine """ @@ -655,26 +653,26 @@ class TestVMLifeCycle(cloudstackTestCase): self.apiclient.destroyVirtualMachine(cmd) config = Configurations.list( - self.apiclient, - name='expunge.delay' - ) + self.apiclient, + name='expunge.delay' + ) expunge_delay = int(config[0].value) time.sleep(expunge_delay * 2) - #VM should be destroyed unless expunge thread hasn't run - #Wait for two cycles of the expunge thread + # VM should be destroyed unless expunge thread hasn't run + # Wait for two cycles of the expunge thread config = Configurations.list( - self.apiclient, - name='expunge.interval' - ) + self.apiclient, + name='expunge.interval' + ) expunge_cycle = int(config[0].value) wait_time = expunge_cycle * 4 while wait_time >= 0: list_vm_response = VirtualMachine.list( - self.apiclient, - id=self.small_virtual_machine.id - ) + self.apiclient, + id=self.small_virtual_machine.id + ) if not list_vm_response: break self.debug("Waiting for VM to expunge") @@ -683,10 +681,10 @@ class TestVMLifeCycle(cloudstackTestCase): self.debug("listVirtualMachines response: %s" % list_vm_response) - self.assertEqual(list_vm_response,None,"Check Expunged virtual machine is in listVirtualMachines response") + self.assertEqual(list_vm_response, None, "Check Expunged virtual machine is in listVirtualMachines response") return - @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true") + @attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true") def test_10_attachAndDetach_iso(self): """Test for attach and detach ISO to virtual machine""" @@ -702,24 +700,24 @@ class TestVMLifeCycle(cloudstackTestCase): self.skipTest("ISOs are not supported on LXC") iso = Iso.create( - self.apiclient, - self.services["iso1"], - account=self.account.name, - domainid=self.account.domainid - ) + self.apiclient, + self.services["iso1"], + account=self.account.name, + domainid=self.account.domainid + ) self.debug("Successfully created ISO with ID: %s" % iso.id) try: iso.download(self.apiclient) except Exception as e: - self.fail("Exception while downloading ISO %s: %s"\ + self.fail("Exception while downloading ISO %s: %s" \ % (iso.id, e)) self.debug("Attach ISO with ID: %s to VM ID: %s" % ( - iso.id, - self.virtual_machine.id - )) - #Attach ISO to virtual machine + iso.id, + self.virtual_machine.id + )) + # Attach ISO to virtual machine cmd = attachIso.attachIsoCmd() cmd.id = iso.id cmd.virtualmachineid = self.virtual_machine.id @@ -729,7 +727,7 @@ class TestVMLifeCycle(cloudstackTestCase): ssh_client = self.virtual_machine.get_ssh_client() except Exception as e: self.fail("SSH failed for virtual machine: %s - %s" % - (self.virtual_machine.ipaddress, e)) + (self.virtual_machine.ipaddress, e)) mount_dir = "/mnt/tmp" cmds = "mkdir -p %s" % mount_dir @@ -750,24 +748,24 @@ class TestVMLifeCycle(cloudstackTestCase): # Get ISO size iso_response = Iso.list( - self.apiclient, - id=iso.id - ) + self.apiclient, + id=iso.id + ) self.assertEqual( - isinstance(iso_response, list), - True, - "Check list response returns a valid list" - ) + isinstance(iso_response, list), + True, + "Check list response returns a valid list" + ) try: - #Unmount ISO + # Unmount ISO command = "umount %s" % mount_dir ssh_client.execute(command) except Exception as e: self.fail("SSH failed for virtual machine: %s - %s" % - (self.virtual_machine.ipaddress, e)) + (self.virtual_machine.ipaddress, e)) - #Detach from VM + # Detach from VM cmd = detachIso.detachIsoCmd() cmd.virtualmachineid = self.virtual_machine.id self.apiclient.detachIso(cmd) @@ -776,16 +774,16 @@ class TestVMLifeCycle(cloudstackTestCase): res = ssh_client.execute(c) except Exception as e: self.fail("SSH failed for virtual machine: %s - %s" % - (self.virtual_machine.ipaddress, e)) + (self.virtual_machine.ipaddress, e)) # Check if ISO is properly detached from VM (using fdisk) result = self.services["mount"] in str(res) self.assertEqual( - result, - False, - "Check if ISO is detached from virtual machine" - ) + result, + False, + "Check if ISO is detached from virtual machine" + ) return @attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") @@ -825,6 +823,7 @@ class TestVMLifeCycle(cloudstackTestCase): self.assertEqual(Volume.list(self.apiclient, id=vol1.id), None, "List response contains records when it should not") + class TestSecuredVmMigration(cloudstackTestCase): @classmethod @@ -842,14 +841,15 @@ class TestSecuredVmMigration(cloudstackTestCase): domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype - cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__ + cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][ + 0].__dict__ cls.management_ip = cls.config.__dict__["mgtSvr"][0].__dict__["mgtSvrIp"] template = get_template( - cls.apiclient, - cls.zone.id, - cls.services["ostype"] - ) + cls.apiclient, + cls.zone.id, + cls.services["ostype"] + ) if template == FAILED: assert False, "get_template() failed to return template with description %s" % cls.services["ostype"] @@ -861,20 +861,20 @@ class TestSecuredVmMigration(cloudstackTestCase): # Create VMs, NAT Rules etc cls.account = Account.create( - cls.apiclient, - cls.services["account"], - domainid=domain.id - ) + cls.apiclient, + cls.services["account"], + domainid=domain.id + ) cls.small_offering = ServiceOffering.create( - cls.apiclient, - cls.services["service_offerings"]["small"] - ) + cls.apiclient, + cls.services["service_offerings"]["small"] + ) cls._cleanup = [ - cls.small_offering, - cls.account - ] + cls.small_offering, + cls.account + ] @classmethod def tearDownClass(cls): @@ -916,19 +916,20 @@ class TestSecuredVmMigration(cloudstackTestCase): target_hosts = Host.listForMigration(self.apiclient, virtualmachineid=virtualmachineid) for host in target_hosts: - h = list_hosts(self.apiclient,type='Routing', id=host.id)[0] + h = list_hosts(self.apiclient, type='Routing', id=host.id)[0] if h.details.secured == secured: return h cloudstackTestCase.skipTest(self, "No target hosts available, skipping test.") def check_migration_protocol(self, protocol, host): - resp = SshClient(host.ipaddress, port=22, user=self.hostConfig["username"],passwd=self.hostConfig["password"])\ + resp = SshClient(host.ipaddress, port=22, user=self.hostConfig["username"], passwd=self.hostConfig["password"]) \ .execute("grep -a listen_%s=1 /etc/libvirt/libvirtd.conf | tail -1" % protocol) if protocol not in resp[0]: cloudstackTestCase.fail(self, "Libvirt listen protocol expected: '" + protocol + "\n" - "does not match actual: " + resp[0]) + "does not match actual: " + + resp[0]) def migrate_and_check(self, vm, src_host, dest_host, proto='tls'): """ @@ -940,7 +941,7 @@ class TestSecuredVmMigration(cloudstackTestCase): self.assertEqual(vm_response.hostid, dest_host.id, "Check destination host ID of migrated VM") def unsecure_host(self, host): - SshClient(host.ipaddress, port=22, user=self.hostConfig["username"], passwd=self.hostConfig["password"])\ + SshClient(host.ipaddress, port=22, user=self.hostConfig["username"], passwd=self.hostConfig["password"]) \ .execute("rm -f /etc/cloudstack/agent/cloud* && \ sed -i 's/listen_tls.*/listen_tls=0/g' /etc/libvirt/libvirtd.conf && \ sed -i 's/listen_tcp.*/listen_tcp=1/g' /etc/libvirt/libvirtd.conf && \ @@ -1051,7 +1052,8 @@ class TestSecuredVmMigration(cloudstackTestCase): self.migrate_and_check(vm, secure_host, unsecure_host, proto='tls') except Exception: pass - else: self.fail("Migration succeeded, instead it should fail") + else: + self.fail("Migration succeeded, instead it should fail") @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") def test_04_nonsecured_to_secured_vm_migration(self): @@ -1072,5 +1074,217 @@ class TestSecuredVmMigration(cloudstackTestCase): self.migrate_and_check(vm, unsecure_host, secure_host, proto='tcp') except Exception: pass - else: self.fail("Migration succeeded, instead it should fail") + else: + self.fail("Migration succeeded, instead it should fail") + +class TestMigrateVMwithVolume(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestMigrateVMwithVolume, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + cls.hypervisor = testClient.getHypervisorInfo() + cls._cleanup = [] + + # Get Zone, Domain and templates + domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) + cls.services['mode'] = cls.zone.networktype + cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][ + 0].__dict__ + cls.management_ip = cls.config.__dict__["mgtSvr"][0].__dict__["mgtSvrIp"] + + template = get_template( + cls.apiclient, + cls.zone.id, + cls.services["ostype"] + ) + if template == FAILED: + assert False, "get_template() failed to return template with description %s" % cls.services["ostype"] + + # Set Zones and disk offerings + cls.services["small"]["zoneid"] = cls.zone.id + cls.services["small"]["template"] = template.id + + cls.services["iso1"]["zoneid"] = cls.zone.id + + # Create VMs, NAT Rules etc + cls.account = Account.create( + cls.apiclient, + cls.services["account"], + domainid=domain.id + ) + + cls.small_offering = ServiceOffering.create( + cls.apiclient, + cls.services["service_offerings"]["small"] + ) + + cls._cleanup = [ + cls.small_offering, + cls.account + ] + + @classmethod + def tearDownClass(cls): + cls.apiclient = super(TestMigrateVMwithVolume, cls).getClsTestClient().getApiClient() + try: + cleanup_resources(cls.apiclient, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + + if self.hypervisor.lower() not in ["vmware"]: + self.skipTest("VM Migration with Volumes is not supported on other than VMware") + + self.hosts = Host.list( + self.apiclient, + zoneid=self.zone.id, + type='Routing', + hypervisor='KVM') + + if len(self.hosts) < 2: + self.skipTest("Requires at least two hosts for performing migration related tests") + + def tearDown(self): + try: + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + def get_target_host(self, virtualmachineid): + target_hosts = Host.listForMigration(self.apiclient, + virtualmachineid=virtualmachineid)[0] + if len(target_hosts) < 1: + self.skipTest("No target hosts found") + + return target_hosts[0] + + def get_target_pool(self, volid): + target_pools = StoragePool.listForMigration(self.apiclient, id=volid) + + if len(target_pools) < 1: + self.skipTest("Not enough storage pools found") + + return target_pools[0] + + def get_vm_volumes(self, id): + return Volume.list(self.apiclient, virtualmachineid=id, listall=True) + + def deploy_vm(self): + return VirtualMachine.create( + self.apiclient, + self.services["small"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.small_offering.id, + mode=self.services["mode"]) + + def migrate_vm_with_pools(self, target_pool, id): + cmd = migrateVirtualMachine.migrateVirtualMachineCmd() + + cmd.storageid = target_pool.id + cmd.virtualmachineid = id + + return self.apiclient.migrateVirtualMachine(cmd) + + def create_volume(self): + small_disk_offering = DiskOffering.list(self.apiclient, name='Small')[0] + + return Volume.create( + self.apiclient, + self.services, + account=self.account.name, + diskofferingid=small_disk_offering.id, + domainid=self.account.domainid, + zoneid=self.zone.id + ) + + """ + BVT for Vmware Offline VM and Volume Migration + """ + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") + def test_01_migrate_VM_and_root_volume(self): + """Test VM will be migrated with it's root volume""" + # Validate the following + # 1. Deploys a VM + # 2. Finds suitable host for migration + # 3. Finds suitable storage pool for root volume + # 4. Migrate the VM to new host and storage pool and assert migration successful + + vm = self.deploy_vm() + + root_volume = self.get_vm_volumes(vm.id)[0] + + target_pool = self.get_target_pool(root_volume.id) + + vm.stop(self.apiclient) + + self.migrate_vm_with_pools(target_pool, vm.id) + + root_volume = self.get_vm_volumes(vm.id)[0] + self.assertEqual(root_volume.storageid, target_pool.id, "Pool ID was not as expected") + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") + def test_02_migrate_VM_with_two_data_disks(self): + """Test VM will be migrated with it's root volume""" + # Validate the following + # 1. Deploys a VM and attaches 2 data disks + # 2. Finds suitable host for migration + # 3. Finds suitable storage pool for volumes + # 4. Migrate the VM to new host and storage pool and assert migration successful + + vm = self.deploy_vm() + + volume1 = self.create_volume() + volume2 = self.create_volume() + + vm.attach_volume(self.apiclient, volume1) + vm.attach_volume(self.apiclient, volume2) + + root_volume = self.get_vm_volumes(vm.id)[0] + + target_pool = self.get_target_pool(root_volume.id) + + vm.stop(self.apiclient) + + self.migrate_vm_with_pools(target_pool, vm.id) + + volume1 = Volume.list(self.apiclient, id=volume1.id)[0] + volume2 = Volume.list(self.apiclient, id=volume2.id)[0] + root_volume = self.get_vm_volumes(vm.id)[0] + + self.assertEqual(root_volume.storageid, target_pool.id, "Pool ID was not as expected") + self.assertEqual(volume1.storageid, target_pool.id, "Pool ID was not as expected") + self.assertEqual(volume2.storageid, target_pool.id, "Pool ID was not as expected") + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") + def test_03_migrate_detached_volume(self): + """Test VM will be migrated with it's root volume""" + # Validate the following + # 1. Deploys a VM and attaches 1 data disk + # 2. Detaches the Disk + # 3. Finds suitable storage pool for the Disk + # 4. Migrate the storage pool and assert migration successful + + vm = self.deploy_vm() + + volume1 = self.create_volume() + + vm.attach_volume(self.apiclient, volume1) + vm.detach_volume(self.apiclient, volume1) + + target_pool = self.get_target_pool(volume1.id) + + Volume.migrate(self.apiclient, storageid=target_pool.id, volumeid=volume1.id) + + vol = Volume.list(self.apiclient, volume=volume1.id)[0] + + self.assertEqual(vol.storageid, target_pool.id, "Storage pool was not the same as expected") diff --git a/utils/src/main/java/com/cloud/utils/StringUtils.java b/utils/src/main/java/com/cloud/utils/StringUtils.java index 4c1dacbddf5..e858bee74a0 100644 --- a/utils/src/main/java/com/cloud/utils/StringUtils.java +++ b/utils/src/main/java/com/cloud/utils/StringUtils.java @@ -73,6 +73,14 @@ public class StringUtils { public static String join(final String delimiter, final Object... components) { return org.apache.commons.lang.StringUtils.join(components, delimiter); } + /** + * @deprecated + * Please use org.apache.commons.lang.StringUtils.isBlank() as a replacement + */ + @Deprecated + public static boolean isBlank(String str) { + return org.apache.commons.lang.StringUtils.isBlank(str); + } /** * @deprecated diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java index 0deb2dc29e6..52ead5c8535 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java @@ -447,6 +447,23 @@ public class VirtualMachineMO extends BaseMO { return false; } + public boolean changeDatastore(ManagedObjectReference morDataStore) throws Exception { + VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec(); + relocateSpec.setDatastore(morDataStore); + + ManagedObjectReference morTask = _context.getService().relocateVMTask(_mor, relocateSpec, null); + + boolean result = _context.getVimClient().waitForTask(morTask); + if (result) { + _context.waitForTaskProgressDone(morTask); + return true; + } else { + s_logger.error("VMware change datastore relocateVM_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); + } + + return false; + } + public boolean relocate(ManagedObjectReference morTargetHost) throws Exception { VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec(); relocateSpec.setHost(morTargetHost);