mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Vmware offline migration (#2848)
* - Offline VM and Volume migration on Vmware hypervisor hosts - Also add VM disk consolidation call on successful VM migrations * Fix indentation of marvin test file and reformat against PEP8 * * Fix few comment typos * Refactor debug messages to use String.format() when debug log level is enabled. * Send list of commands returned by hypervisor Guru instead of explicitly selecting the first one * Fix unhandled NPE during VM migration * Revert back to distinct event descriptions for VM to host or storage pool migration * Reformat test_primary_storage file against PEP-8 and Remove unused imports * Revert back the deprecation messages in the custom StringUtils class to favour the use of the ApacheUtils
This commit is contained in:
parent
d68712eb7b
commit
b363fd49f7
@ -19,6 +19,7 @@ package com.cloud.hypervisor;
|
|||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
import com.cloud.storage.StoragePool;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
|
|
||||||
import com.cloud.agent.api.Command;
|
import com.cloud.agent.api.Command;
|
||||||
@ -32,7 +33,7 @@ import com.cloud.vm.VirtualMachine;
|
|||||||
import com.cloud.vm.VirtualMachineProfile;
|
import com.cloud.vm.VirtualMachineProfile;
|
||||||
|
|
||||||
public interface HypervisorGuru extends Adapter {
|
public interface HypervisorGuru extends Adapter {
|
||||||
static final ConfigKey<Boolean> VmwareFullClone = new ConfigKey<Boolean>("Advanced", Boolean.class, "vmware.create.full.clone", "true",
|
ConfigKey<Boolean> VmwareFullClone = new ConfigKey<Boolean>("Advanced", Boolean.class, "vmware.create.full.clone", "true",
|
||||||
"If set to true, creates guest VMs as full clones on ESX", false);
|
"If set to true, creates guest VMs as full clones on ESX", false);
|
||||||
HypervisorType getHypervisorType();
|
HypervisorType getHypervisorType();
|
||||||
|
|
||||||
@ -84,4 +85,13 @@ public interface HypervisorGuru extends Adapter {
|
|||||||
List<Command> finalizeExpungeVolumes(VirtualMachine vm);
|
List<Command> finalizeExpungeVolumes(VirtualMachine vm);
|
||||||
|
|
||||||
Map<String, String> getClusterSettings(long vmId);
|
Map<String, String> getClusterSettings(long vmId);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Will generate commands to migrate a vm to a pool. For now this will only work for stopped VMs on Vmware.
|
||||||
|
*
|
||||||
|
* @param vm the stopped vm to migrate
|
||||||
|
* @param destination the primary storage pool to migrate to
|
||||||
|
* @return a list of commands to perform for a successful migration
|
||||||
|
*/
|
||||||
|
List<Command> finalizeMigrate(VirtualMachine vm, StoragePool destination);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -29,11 +29,21 @@ import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd;
|
|||||||
import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd;
|
import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd;
|
||||||
import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd;
|
import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd;
|
||||||
import org.apache.cloudstack.api.response.GetUploadParamsResponse;
|
import org.apache.cloudstack.api.response.GetUploadParamsResponse;
|
||||||
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
|
|
||||||
import com.cloud.exception.ResourceAllocationException;
|
import com.cloud.exception.ResourceAllocationException;
|
||||||
import com.cloud.user.Account;
|
import com.cloud.user.Account;
|
||||||
|
|
||||||
public interface VolumeApiService {
|
public interface VolumeApiService {
|
||||||
|
|
||||||
|
ConfigKey<Long> ConcurrentMigrationsThresholdPerDatastore = new ConfigKey<Long>("Advanced"
|
||||||
|
, Long.class
|
||||||
|
, "concurrent.migrations.per.target.datastore"
|
||||||
|
, "0"
|
||||||
|
, "Limits number of migrations that can be handled per datastore concurrently; default is 0 - unlimited"
|
||||||
|
, true // not sure if this is to be dynamic
|
||||||
|
, ConfigKey.Scope.Global);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates the database object for a volume based on the given criteria
|
* Creates the database object for a volume based on the given criteria
|
||||||
*
|
*
|
||||||
|
|||||||
@ -27,6 +27,7 @@ public abstract class BaseAsyncCmd extends BaseCmd {
|
|||||||
public static final String ipAddressSyncObject = "ipaddress";
|
public static final String ipAddressSyncObject = "ipaddress";
|
||||||
public static final String networkSyncObject = "network";
|
public static final String networkSyncObject = "network";
|
||||||
public static final String vpcSyncObject = "vpc";
|
public static final String vpcSyncObject = "vpc";
|
||||||
|
public static final String migrationSyncObject = "migration";
|
||||||
public static final String snapshotHostSyncObject = "snapshothost";
|
public static final String snapshotHostSyncObject = "snapshothost";
|
||||||
public static final String gslbSyncObject = "globalserverloadbalancer";
|
public static final String gslbSyncObject = "globalserverloadbalancer";
|
||||||
private static final Logger s_logger = Logger.getLogger(BaseAsyncCmd.class.getName());
|
private static final Logger s_logger = Logger.getLogger(BaseAsyncCmd.class.getName());
|
||||||
|
|||||||
@ -43,10 +43,10 @@ import com.cloud.uservm.UserVm;
|
|||||||
import com.cloud.vm.VirtualMachine;
|
import com.cloud.vm.VirtualMachine;
|
||||||
|
|
||||||
@APICommand(name = "migrateVirtualMachine",
|
@APICommand(name = "migrateVirtualMachine",
|
||||||
description = "Attempts Migration of a VM to a different host or Root volume of the vm to a different storage pool",
|
description = "Attempts Migration of a VM to a different host or Root volume of the vm to a different storage pool",
|
||||||
responseObject = UserVmResponse.class, entityType = {VirtualMachine.class},
|
responseObject = UserVmResponse.class, entityType = {VirtualMachine.class},
|
||||||
requestHasSensitiveInfo = false,
|
requestHasSensitiveInfo = false,
|
||||||
responseHasSensitiveInfo = true)
|
responseHasSensitiveInfo = true)
|
||||||
public class MigrateVMCmd extends BaseAsyncCmd {
|
public class MigrateVMCmd extends BaseAsyncCmd {
|
||||||
public static final Logger s_logger = Logger.getLogger(MigrateVMCmd.class.getName());
|
public static final Logger s_logger = Logger.getLogger(MigrateVMCmd.class.getName());
|
||||||
|
|
||||||
@ -57,24 +57,24 @@ public class MigrateVMCmd extends BaseAsyncCmd {
|
|||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
|
|
||||||
@Parameter(name = ApiConstants.HOST_ID,
|
@Parameter(name = ApiConstants.HOST_ID,
|
||||||
type = CommandType.UUID,
|
type = CommandType.UUID,
|
||||||
entityType = HostResponse.class,
|
entityType = HostResponse.class,
|
||||||
required = false,
|
required = false,
|
||||||
description = "Destination Host ID to migrate VM to. Required for live migrating a VM from host to host")
|
description = "Destination Host ID to migrate VM to. Required for live migrating a VM from host to host")
|
||||||
private Long hostId;
|
private Long hostId;
|
||||||
|
|
||||||
@Parameter(name = ApiConstants.VIRTUAL_MACHINE_ID,
|
@Parameter(name = ApiConstants.VIRTUAL_MACHINE_ID,
|
||||||
type = CommandType.UUID,
|
type = CommandType.UUID,
|
||||||
entityType = UserVmResponse.class,
|
entityType = UserVmResponse.class,
|
||||||
required = true,
|
required = true,
|
||||||
description = "the ID of the virtual machine")
|
description = "the ID of the virtual machine")
|
||||||
private Long virtualMachineId;
|
private Long virtualMachineId;
|
||||||
|
|
||||||
@Parameter(name = ApiConstants.STORAGE_ID,
|
@Parameter(name = ApiConstants.STORAGE_ID,
|
||||||
type = CommandType.UUID,
|
type = CommandType.UUID,
|
||||||
entityType = StoragePoolResponse.class,
|
entityType = StoragePoolResponse.class,
|
||||||
required = false,
|
required = false,
|
||||||
description = "Destination storage pool ID to migrate VM volumes to. Required for migrating the root disk volume")
|
description = "Destination storage pool ID to migrate VM volumes to. Required for migrating the root disk volume")
|
||||||
private Long storageId;
|
private Long storageId;
|
||||||
|
|
||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
@ -119,13 +119,15 @@ public class MigrateVMCmd extends BaseAsyncCmd {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getEventDescription() {
|
public String getEventDescription() {
|
||||||
|
String eventDescription;
|
||||||
if (getHostId() != null) {
|
if (getHostId() != null) {
|
||||||
return "Attempting to migrate VM Id: " + this._uuidMgr.getUuid(VirtualMachine.class, getVirtualMachineId()) + " to host Id: " + this._uuidMgr.getUuid(Host.class, getHostId());
|
eventDescription = String.format("Attempting to migrate VM id: %s to host Id: %s", getVirtualMachineId(), getHostId());
|
||||||
} else if (getStoragePoolId() != null) {
|
} else if (getStoragePoolId() != null) {
|
||||||
return "Attempting to migrate VM Id: " + this._uuidMgr.getUuid(VirtualMachine.class, getVirtualMachineId()) + " to storage pool Id: " + this._uuidMgr.getUuid(StoragePool.class, getStoragePoolId());
|
eventDescription = String.format("Attempting to migrate VM id: %s to storage pool Id: %s", getVirtualMachineId(), getStoragePoolId());
|
||||||
} else {
|
} else {
|
||||||
return "Attempting to migrate VM Id: " + this._uuidMgr.getUuid(VirtualMachine.class, getVirtualMachineId());
|
eventDescription = String.format("Attempting to migrate VM id: %s", getVirtualMachineId());
|
||||||
}
|
}
|
||||||
|
return eventDescription;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -152,16 +154,17 @@ public class MigrateVMCmd extends BaseAsyncCmd {
|
|||||||
if (destinationHost.getType() != Host.Type.Routing) {
|
if (destinationHost.getType() != Host.Type.Routing) {
|
||||||
throw new InvalidParameterValueException("The specified host(" + destinationHost.getName() + ") is not suitable to migrate the VM, please specify another one");
|
throw new InvalidParameterValueException("The specified host(" + destinationHost.getName() + ") is not suitable to migrate the VM, please specify another one");
|
||||||
}
|
}
|
||||||
CallContext.current().setEventDetails("VM Id: " + this._uuidMgr.getUuid(VirtualMachine.class, getVirtualMachineId()) + ((getHostId() != null) ? " to host Id: " + this._uuidMgr.getUuid(Host.class, getHostId()) : "" ));
|
CallContext.current().setEventDetails("VM Id: " + getVirtualMachineId() + " to host Id: " + getHostId());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OfflineMigration performed when this parameter is specified
|
||||||
StoragePool destStoragePool = null;
|
StoragePool destStoragePool = null;
|
||||||
if (getStoragePoolId() != null) {
|
if (getStoragePoolId() != null) {
|
||||||
destStoragePool = _storageService.getStoragePool(getStoragePoolId());
|
destStoragePool = _storageService.getStoragePool(getStoragePoolId());
|
||||||
if (destStoragePool == null) {
|
if (destStoragePool == null) {
|
||||||
throw new InvalidParameterValueException("Unable to find the storage pool to migrate the VM");
|
throw new InvalidParameterValueException("Unable to find the storage pool to migrate the VM");
|
||||||
}
|
}
|
||||||
CallContext.current().setEventDetails("VM Id: " + this._uuidMgr.getUuid(VirtualMachine.class, getVirtualMachineId()) + " to storage pool Id: " + this._uuidMgr.getUuid(StoragePool.class, getStoragePoolId()));
|
CallContext.current().setEventDetails("VM Id: " + getVirtualMachineId() + " to storage pool Id: " + getStoragePoolId());
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@ -172,7 +175,7 @@ public class MigrateVMCmd extends BaseAsyncCmd {
|
|||||||
migratedVm = _userVmService.vmStorageMigration(getVirtualMachineId(), destStoragePool);
|
migratedVm = _userVmService.vmStorageMigration(getVirtualMachineId(), destStoragePool);
|
||||||
}
|
}
|
||||||
if (migratedVm != null) {
|
if (migratedVm != null) {
|
||||||
UserVmResponse response = _responseGenerator.createUserVmResponse(ResponseView.Full, "virtualmachine", (UserVm)migratedVm).get(0);
|
UserVmResponse response = _responseGenerator.createUserVmResponse(ResponseView.Full, "virtualmachine", (UserVm) migratedVm).get(0);
|
||||||
response.setResponseName(getCommandName());
|
response.setResponseName(getCommandName());
|
||||||
setResponseObject(response);
|
setResponseObject(response);
|
||||||
} else {
|
} else {
|
||||||
@ -181,15 +184,27 @@ public class MigrateVMCmd extends BaseAsyncCmd {
|
|||||||
} catch (ResourceUnavailableException ex) {
|
} catch (ResourceUnavailableException ex) {
|
||||||
s_logger.warn("Exception: ", ex);
|
s_logger.warn("Exception: ", ex);
|
||||||
throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage());
|
throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage());
|
||||||
} catch (ConcurrentOperationException e) {
|
} catch (VirtualMachineMigrationException | ConcurrentOperationException | ManagementServerException e) {
|
||||||
s_logger.warn("Exception: ", e);
|
|
||||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
|
|
||||||
} catch (ManagementServerException e) {
|
|
||||||
s_logger.warn("Exception: ", e);
|
|
||||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
|
|
||||||
} catch (VirtualMachineMigrationException e) {
|
|
||||||
s_logger.warn("Exception: ", e);
|
s_logger.warn("Exception: ", e);
|
||||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
|
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getSyncObjType() {
|
||||||
|
return (getSyncObjId() != null) ? BaseAsyncCmd.migrationSyncObject : null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Long getSyncObjId() {
|
||||||
|
if (getStoragePoolId() != null) {
|
||||||
|
return getStoragePoolId();
|
||||||
|
}
|
||||||
|
// OfflineVmwareMigrations: undocumented feature;
|
||||||
|
// OfflineVmwareMigrations: on implementing a maximum queue size for per storage migrations it seems counter intuitive for the user to not enforce it for hosts as well.
|
||||||
|
if (getHostId() != null) {
|
||||||
|
return getHostId();
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -46,7 +46,7 @@ import com.cloud.vm.VirtualMachine;
|
|||||||
|
|
||||||
@APICommand(name = "migrateVirtualMachineWithVolume",
|
@APICommand(name = "migrateVirtualMachineWithVolume",
|
||||||
description = "Attempts Migration of a VM with its volumes to a different host",
|
description = "Attempts Migration of a VM with its volumes to a different host",
|
||||||
responseObject = UserVmResponse.class, entityType = {VirtualMachine.class},
|
responseObject = UserVmResponse.class, entityType = {VirtualMachine.class},
|
||||||
requestHasSensitiveInfo = false,
|
requestHasSensitiveInfo = false,
|
||||||
responseHasSensitiveInfo = true)
|
responseHasSensitiveInfo = true)
|
||||||
public class MigrateVirtualMachineWithVolumeCmd extends BaseAsyncCmd {
|
public class MigrateVirtualMachineWithVolumeCmd extends BaseAsyncCmd {
|
||||||
@ -147,6 +147,7 @@ public class MigrateVirtualMachineWithVolumeCmd extends BaseAsyncCmd {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Host destinationHost = _resourceService.getHost(getHostId());
|
Host destinationHost = _resourceService.getHost(getHostId());
|
||||||
|
// OfflineVmwareMigration: destination host would have to not be a required parameter for stopped VMs
|
||||||
if (destinationHost == null) {
|
if (destinationHost == null) {
|
||||||
throw new InvalidParameterValueException("Unable to find the host to migrate the VM, host id =" + getHostId());
|
throw new InvalidParameterValueException("Unable to find the host to migrate the VM, host id =" + getHostId());
|
||||||
}
|
}
|
||||||
@ -163,13 +164,7 @@ public class MigrateVirtualMachineWithVolumeCmd extends BaseAsyncCmd {
|
|||||||
} catch (ResourceUnavailableException ex) {
|
} catch (ResourceUnavailableException ex) {
|
||||||
s_logger.warn("Exception: ", ex);
|
s_logger.warn("Exception: ", ex);
|
||||||
throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage());
|
throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage());
|
||||||
} catch (ConcurrentOperationException e) {
|
} catch (ConcurrentOperationException | ManagementServerException | VirtualMachineMigrationException e) {
|
||||||
s_logger.warn("Exception: ", e);
|
|
||||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
|
|
||||||
} catch (ManagementServerException e) {
|
|
||||||
s_logger.warn("Exception: ", e);
|
|
||||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
|
|
||||||
} catch (VirtualMachineMigrationException e) {
|
|
||||||
s_logger.warn("Exception: ", e);
|
s_logger.warn("Exception: ", e);
|
||||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
|
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
|
||||||
}
|
}
|
||||||
|
|||||||
@ -120,4 +120,16 @@ public class MigrateVolumeCmd extends BaseAsyncCmd {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getSyncObjType() {
|
||||||
|
return (getSyncObjId() != null) ? BaseAsyncCmd.migrationSyncObject : null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Long getSyncObjId() {
|
||||||
|
if (getStoragePoolId() != null) {
|
||||||
|
return getStoragePoolId();
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -0,0 +1,43 @@
|
|||||||
|
//
|
||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package com.cloud.agent.api;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
public class MigrateVmToPoolAnswer extends Answer {
|
||||||
|
|
||||||
|
List<VolumeObjectTO> volumeTos;
|
||||||
|
|
||||||
|
public MigrateVmToPoolAnswer(MigrateVmToPoolCommand cmd, Exception ex) {
|
||||||
|
super(cmd, ex);
|
||||||
|
volumeTos = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
public MigrateVmToPoolAnswer(MigrateVmToPoolCommand cmd, List<VolumeObjectTO> volumeTos) {
|
||||||
|
super(cmd, true, null);
|
||||||
|
this.volumeTos = volumeTos;
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<VolumeObjectTO> getVolumeTos() {
|
||||||
|
return volumeTos;
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -0,0 +1,70 @@
|
|||||||
|
//
|
||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
//
|
||||||
|
package com.cloud.agent.api;
|
||||||
|
|
||||||
|
import com.cloud.agent.api.to.VolumeTO;
|
||||||
|
|
||||||
|
import java.util.Collection;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* used to tell the agent to migrate a vm to a different primary storage pool.
|
||||||
|
* It is for now only implemented on Vmware and is supposed to work irrespective of whether the VM is started or not.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public class MigrateVmToPoolCommand extends Command {
|
||||||
|
private Collection<VolumeTO> volumes;
|
||||||
|
private String vmName;
|
||||||
|
private String destinationPool;
|
||||||
|
private boolean executeInSequence = false;
|
||||||
|
|
||||||
|
protected MigrateVmToPoolCommand() {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param vmName the name of the VM to migrate
|
||||||
|
* @param volumes used to supply feedback on vmware generated names
|
||||||
|
* @param destinationPool the primary storage pool to migrate the VM to
|
||||||
|
* @param executeInSequence
|
||||||
|
*/
|
||||||
|
public MigrateVmToPoolCommand(String vmName, Collection<VolumeTO> volumes, String destinationPool, boolean executeInSequence) {
|
||||||
|
this.vmName = vmName;
|
||||||
|
this.volumes = volumes;
|
||||||
|
this.destinationPool = destinationPool;
|
||||||
|
this.executeInSequence = executeInSequence;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Collection<VolumeTO> getVolumes() {
|
||||||
|
return volumes;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getDestinationPool() {
|
||||||
|
return destinationPool;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getVmName() {
|
||||||
|
return vmName;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean executeInSequence() {
|
||||||
|
return executeInSequence;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
@ -22,14 +22,19 @@ package com.cloud.agent.api;
|
|||||||
public class UnregisterVMCommand extends Command {
|
public class UnregisterVMCommand extends Command {
|
||||||
String vmName;
|
String vmName;
|
||||||
boolean cleanupVmFiles = false;
|
boolean cleanupVmFiles = false;
|
||||||
|
boolean executeInSequence;
|
||||||
|
|
||||||
public UnregisterVMCommand(String vmName) {
|
public UnregisterVMCommand(String vmName) {
|
||||||
|
this(vmName, false);
|
||||||
|
}
|
||||||
|
public UnregisterVMCommand(String vmName, boolean executeInSequence) {
|
||||||
this.vmName = vmName;
|
this.vmName = vmName;
|
||||||
|
this.executeInSequence = executeInSequence;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean executeInSequence() {
|
public boolean executeInSequence() {
|
||||||
return false;
|
return executeInSequence;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getVmName() {
|
public String getVmName() {
|
||||||
|
|||||||
@ -31,6 +31,7 @@ public class MigrateVolumeCommand extends Command {
|
|||||||
long volumeId;
|
long volumeId;
|
||||||
String volumePath;
|
String volumePath;
|
||||||
StorageFilerTO pool;
|
StorageFilerTO pool;
|
||||||
|
StorageFilerTO sourcePool;
|
||||||
String attachedVmName;
|
String attachedVmName;
|
||||||
Volume.Type volumeType;
|
Volume.Type volumeType;
|
||||||
|
|
||||||
@ -47,14 +48,17 @@ public class MigrateVolumeCommand extends Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool pool, String attachedVmName, Volume.Type volumeType, int timeout) {
|
public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool pool, String attachedVmName, Volume.Type volumeType, int timeout) {
|
||||||
this.volumeId = volumeId;
|
this(volumeId,volumePath,pool,timeout);
|
||||||
this.volumePath = volumePath;
|
|
||||||
this.pool = new StorageFilerTO(pool);
|
|
||||||
this.attachedVmName = attachedVmName;
|
this.attachedVmName = attachedVmName;
|
||||||
this.volumeType = volumeType;
|
this.volumeType = volumeType;
|
||||||
this.setWait(timeout);
|
this.setWait(timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool sourcePool, StoragePool targetPool) {
|
||||||
|
this(volumeId,volumePath,targetPool, null, Volume.Type.UNKNOWN, -1);
|
||||||
|
this.sourcePool = new StorageFilerTO(sourcePool);
|
||||||
|
}
|
||||||
|
|
||||||
public MigrateVolumeCommand(DataTO srcData, DataTO destData, Map<String, String> srcDetails, Map<String, String> destDetails, int timeout) {
|
public MigrateVolumeCommand(DataTO srcData, DataTO destData, Map<String, String> srcDetails, Map<String, String> destDetails, int timeout) {
|
||||||
this.srcData = srcData;
|
this.srcData = srcData;
|
||||||
this.destData = destData;
|
this.destData = destData;
|
||||||
@ -81,6 +85,14 @@ public class MigrateVolumeCommand extends Command {
|
|||||||
return pool;
|
return pool;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public StorageFilerTO getSourcePool() {
|
||||||
|
return sourcePool;
|
||||||
|
}
|
||||||
|
|
||||||
|
public StorageFilerTO getTargetPool() {
|
||||||
|
return pool;
|
||||||
|
}
|
||||||
|
|
||||||
public String getAttachedVmName() {
|
public String getAttachedVmName() {
|
||||||
return attachedVmName;
|
return attachedVmName;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -25,11 +25,28 @@ import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
|||||||
import com.cloud.agent.api.to.VirtualMachineTO;
|
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||||
import com.cloud.host.Host;
|
import com.cloud.host.Host;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Interface to query how to move data around and to commision the moving
|
||||||
|
*/
|
||||||
public interface DataMotionStrategy {
|
public interface DataMotionStrategy {
|
||||||
|
/**
|
||||||
|
* Reports whether this instance can do a move from source to destination
|
||||||
|
* @param srcData object to move
|
||||||
|
* @param destData location to move it to
|
||||||
|
* @return the expertise level with which this instance knows how to handle the move
|
||||||
|
*/
|
||||||
StrategyPriority canHandle(DataObject srcData, DataObject destData);
|
StrategyPriority canHandle(DataObject srcData, DataObject destData);
|
||||||
|
|
||||||
StrategyPriority canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost);
|
StrategyPriority canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Copy the source volume to its destination (on a host if not null)
|
||||||
|
*
|
||||||
|
* @param srcData volume to move
|
||||||
|
* @param destData volume description as intended after the move
|
||||||
|
* @param destHost if not null destData should be reachable from here
|
||||||
|
* @param callback where to report completion or failure to
|
||||||
|
*/
|
||||||
void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback);
|
void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback);
|
||||||
|
|
||||||
void copyAsync(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback);
|
void copyAsync(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback);
|
||||||
|
|||||||
@ -106,7 +106,14 @@ public interface StorageManager extends StorageService {
|
|||||||
* @param poolId
|
* @param poolId
|
||||||
* @return comma separated list of tags
|
* @return comma separated list of tags
|
||||||
*/
|
*/
|
||||||
public String getStoragePoolTags(long poolId);
|
String getStoragePoolTags(long poolId);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a list of Strings with tags for the specified storage pool
|
||||||
|
* @param poolId
|
||||||
|
* @return comma separated list of tags
|
||||||
|
*/
|
||||||
|
List<String> getStoragePoolTagList(long poolId);
|
||||||
|
|
||||||
Answer sendToPool(long poolId, Command cmd) throws StorageUnavailableException;
|
Answer sendToPool(long poolId, Command cmd) throws StorageUnavailableException;
|
||||||
|
|
||||||
|
|||||||
@ -41,6 +41,9 @@ import javax.naming.ConfigurationException;
|
|||||||
|
|
||||||
import org.apache.cloudstack.api.ApiConstants;
|
import org.apache.cloudstack.api.ApiConstants;
|
||||||
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
|
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
|
||||||
|
import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd;
|
||||||
|
import org.apache.cloudstack.api.command.admin.volume.MigrateVolumeCmdByAdmin;
|
||||||
|
import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd;
|
||||||
import org.apache.cloudstack.ca.CAManager;
|
import org.apache.cloudstack.ca.CAManager;
|
||||||
import org.apache.cloudstack.context.CallContext;
|
import org.apache.cloudstack.context.CallContext;
|
||||||
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
||||||
@ -86,6 +89,7 @@ import com.cloud.agent.api.ClusterVMMetaDataSyncAnswer;
|
|||||||
import com.cloud.agent.api.ClusterVMMetaDataSyncCommand;
|
import com.cloud.agent.api.ClusterVMMetaDataSyncCommand;
|
||||||
import com.cloud.agent.api.Command;
|
import com.cloud.agent.api.Command;
|
||||||
import com.cloud.agent.api.MigrateCommand;
|
import com.cloud.agent.api.MigrateCommand;
|
||||||
|
import com.cloud.agent.api.MigrateVmToPoolAnswer;
|
||||||
import com.cloud.agent.api.ModifyTargetsCommand;
|
import com.cloud.agent.api.ModifyTargetsCommand;
|
||||||
import com.cloud.agent.api.PingRoutingCommand;
|
import com.cloud.agent.api.PingRoutingCommand;
|
||||||
import com.cloud.agent.api.PlugNicAnswer;
|
import com.cloud.agent.api.PlugNicAnswer;
|
||||||
@ -138,10 +142,8 @@ import com.cloud.exception.AffinityConflictException;
|
|||||||
import com.cloud.exception.AgentUnavailableException;
|
import com.cloud.exception.AgentUnavailableException;
|
||||||
import com.cloud.exception.ConcurrentOperationException;
|
import com.cloud.exception.ConcurrentOperationException;
|
||||||
import com.cloud.exception.ConnectionException;
|
import com.cloud.exception.ConnectionException;
|
||||||
import com.cloud.exception.InsufficientAddressCapacityException;
|
|
||||||
import com.cloud.exception.InsufficientCapacityException;
|
import com.cloud.exception.InsufficientCapacityException;
|
||||||
import com.cloud.exception.InsufficientServerCapacityException;
|
import com.cloud.exception.InsufficientServerCapacityException;
|
||||||
import com.cloud.exception.InsufficientVirtualNetworkCapacityException;
|
|
||||||
import com.cloud.exception.InvalidParameterValueException;
|
import com.cloud.exception.InvalidParameterValueException;
|
||||||
import com.cloud.exception.OperationTimedoutException;
|
import com.cloud.exception.OperationTimedoutException;
|
||||||
import com.cloud.exception.ResourceUnavailableException;
|
import com.cloud.exception.ResourceUnavailableException;
|
||||||
@ -171,10 +173,12 @@ import com.cloud.service.dao.ServiceOfferingDao;
|
|||||||
import com.cloud.storage.DiskOfferingVO;
|
import com.cloud.storage.DiskOfferingVO;
|
||||||
import com.cloud.storage.ScopeType;
|
import com.cloud.storage.ScopeType;
|
||||||
import com.cloud.storage.Storage.ImageFormat;
|
import com.cloud.storage.Storage.ImageFormat;
|
||||||
|
import com.cloud.storage.StorageManager;
|
||||||
import com.cloud.storage.StoragePool;
|
import com.cloud.storage.StoragePool;
|
||||||
import com.cloud.storage.VMTemplateVO;
|
import com.cloud.storage.VMTemplateVO;
|
||||||
import com.cloud.storage.Volume;
|
import com.cloud.storage.Volume;
|
||||||
import com.cloud.storage.Volume.Type;
|
import com.cloud.storage.Volume.Type;
|
||||||
|
import com.cloud.storage.VolumeApiService;
|
||||||
import com.cloud.storage.VolumeVO;
|
import com.cloud.storage.VolumeVO;
|
||||||
import com.cloud.storage.dao.DiskOfferingDao;
|
import com.cloud.storage.dao.DiskOfferingDao;
|
||||||
import com.cloud.storage.dao.GuestOSCategoryDao;
|
import com.cloud.storage.dao.GuestOSCategoryDao;
|
||||||
@ -314,6 +318,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
private VmWorkJobDao _workJobDao;
|
private VmWorkJobDao _workJobDao;
|
||||||
@Inject
|
@Inject
|
||||||
private AsyncJobManager _jobMgr;
|
private AsyncJobManager _jobMgr;
|
||||||
|
@Inject
|
||||||
|
private StorageManager storageMgr;
|
||||||
|
|
||||||
VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this);
|
VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this);
|
||||||
|
|
||||||
@ -1820,14 +1826,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
protected boolean stateTransitTo(final VMInstanceVO vm, final VirtualMachine.Event e, final Long hostId, final String reservationId) throws NoTransitionException {
|
protected boolean stateTransitTo(final VMInstanceVO vm, final VirtualMachine.Event e, final Long hostId, final String reservationId) throws NoTransitionException {
|
||||||
// if there are active vm snapshots task, state change is not allowed
|
// if there are active vm snapshots task, state change is not allowed
|
||||||
|
|
||||||
// Disable this hacking thing, VM snapshot task need to be managed by its orchestartion flow istelf instead of
|
|
||||||
// hacking it here at general VM manager
|
|
||||||
/*
|
|
||||||
if (_vmSnapshotMgr.hasActiveVMSnapshotTasks(vm.getId())) {
|
|
||||||
s_logger.error("State transit with event: " + e + " failed due to: " + vm.getInstanceName() + " has active VM snapshots tasks");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
vm.setReservationId(reservationId);
|
vm.setReservationId(reservationId);
|
||||||
return _stateMachine.transitTo(vm, e, new Pair<Long, Long>(vm.getHostId(), hostId), _vmDao);
|
return _stateMachine.transitTo(vm, e, new Pair<Long, Long>(vm.getHostId(), hostId), _vmDao);
|
||||||
}
|
}
|
||||||
@ -1836,15 +1834,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
public boolean stateTransitTo(final VirtualMachine vm1, final VirtualMachine.Event e, final Long hostId) throws NoTransitionException {
|
public boolean stateTransitTo(final VirtualMachine vm1, final VirtualMachine.Event e, final Long hostId) throws NoTransitionException {
|
||||||
final VMInstanceVO vm = (VMInstanceVO)vm1;
|
final VMInstanceVO vm = (VMInstanceVO)vm1;
|
||||||
|
|
||||||
/*
|
|
||||||
* Remove the hacking logic here.
|
|
||||||
// if there are active vm snapshots task, state change is not allowed
|
|
||||||
if (_vmSnapshotMgr.hasActiveVMSnapshotTasks(vm.getId())) {
|
|
||||||
s_logger.error("State transit with event: " + e + " failed due to: " + vm.getInstanceName() + " has active VM snapshots tasks");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
final State oldState = vm.getState();
|
final State oldState = vm.getState();
|
||||||
if (oldState == State.Starting) {
|
if (oldState == State.Starting) {
|
||||||
if (e == Event.OperationSucceeded) {
|
if (e == Event.OperationSucceeded) {
|
||||||
@ -1988,89 +1977,243 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
private void orchestrateStorageMigration(final String vmUuid, final StoragePool destPool) {
|
private void orchestrateStorageMigration(final String vmUuid, final StoragePool destPool) {
|
||||||
final VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
|
final VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
|
||||||
|
|
||||||
|
preStorageMigrationStateCheck(destPool, vm);
|
||||||
|
|
||||||
|
try {
|
||||||
|
if(s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug(String.format("Offline migration of %s vm %s with volumes",
|
||||||
|
vm.getHypervisorType().toString(),
|
||||||
|
vm.getInstanceName()));
|
||||||
|
}
|
||||||
|
|
||||||
|
migrateThroughHypervisorOrStorage(destPool, vm);
|
||||||
|
|
||||||
|
} catch (ConcurrentOperationException
|
||||||
|
| InsufficientCapacityException // possibly InsufficientVirtualNetworkCapacityException or InsufficientAddressCapacityException
|
||||||
|
| StorageUnavailableException e) {
|
||||||
|
String msg = String.format("Failed to migrate VM: %s", vmUuid);
|
||||||
|
s_logger.debug(msg);
|
||||||
|
throw new CloudRuntimeException(msg, e);
|
||||||
|
} finally {
|
||||||
|
try {
|
||||||
|
stateTransitTo(vm, Event.AgentReportStopped, null);
|
||||||
|
} catch (final NoTransitionException e) {
|
||||||
|
String anotherMEssage = String.format("failed to change vm state of VM: %s", vmUuid);
|
||||||
|
s_logger.debug(anotherMEssage);
|
||||||
|
throw new CloudRuntimeException(anotherMEssage, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private Answer[] attemptHypervisorMigration(StoragePool destPool, VMInstanceVO vm) {
|
||||||
|
final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
|
||||||
|
// OfflineVmwareMigration: in case of vmware call vcenter to do it for us.
|
||||||
|
// OfflineVmwareMigration: should we check the proximity of source and destination
|
||||||
|
// OfflineVmwareMigration: if we are in the same cluster/datacentre/pool or whatever?
|
||||||
|
// OfflineVmwareMigration: we are checking on success to optionally delete an old vm if we are not
|
||||||
|
List<Command> commandsToSend = hvGuru.finalizeMigrate(vm, destPool);
|
||||||
|
|
||||||
|
Long hostId = vm.getHostId();
|
||||||
|
// OfflineVmwareMigration: probably this is null when vm is stopped
|
||||||
|
if(hostId == null) {
|
||||||
|
hostId = vm.getLastHostId();
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug(String.format("host id is null, using last host id %d", hostId) );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if(CollectionUtils.isNotEmpty(commandsToSend)) {
|
||||||
|
Commands commandsContainer = new Commands(Command.OnError.Stop);
|
||||||
|
commandsContainer.addCommands(commandsToSend);
|
||||||
|
try {
|
||||||
|
// OfflineVmwareMigration: change to the call back variety?
|
||||||
|
// OfflineVmwareMigration: getting a Long seq to be filled with _agentMgr.send(hostId, commandsContainer, this)
|
||||||
|
return _agentMgr.send(hostId, commandsContainer);
|
||||||
|
} catch (AgentUnavailableException | OperationTimedoutException e) {
|
||||||
|
throw new CloudRuntimeException(String.format("Failed to migrate VM: %s", vm.getUuid()),e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void afterHypervisorMigrationCleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException {
|
||||||
|
boolean isDebugEnabled = s_logger.isDebugEnabled();
|
||||||
|
if(isDebugEnabled) {
|
||||||
|
String msg = String.format("cleaning up after hypervisor pool migration volumes for VM %s(%s) to pool %s(%s)", vm.getInstanceName(), vm.getUuid(), destPool.getName(), destPool.getUuid());
|
||||||
|
s_logger.debug(msg);
|
||||||
|
}
|
||||||
|
setDestinationPoolAndReallocateNetwork(destPool, vm);
|
||||||
|
// OfflineVmwareMigration: don't set this to null or have another way to address the command; twice migrating will lead to an NPE
|
||||||
|
Long destPodId = destPool.getPodId();
|
||||||
|
Long vmPodId = vm.getPodIdToDeployIn();
|
||||||
|
if (destPodId == null || ! destPodId.equals(vmPodId)) {
|
||||||
|
if(isDebugEnabled) {
|
||||||
|
String msg = String.format("resetting lasHost for VM %s(%s) as pod (%s) is no good.", vm.getInstanceName(), vm.getUuid(), destPodId);
|
||||||
|
s_logger.debug(msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
vm.setLastHostId(null);
|
||||||
|
vm.setPodIdToDeployIn(destPodId);
|
||||||
|
// OfflineVmwareMigration: a consecutive migration will fail probably (no host not pod)
|
||||||
|
}// else keep last host set for this vm
|
||||||
|
markVolumesInPool(vm,destPool, hypervisorMigrationResults);
|
||||||
|
// OfflineVmwareMigration: deal with answers, if (hypervisorMigrationResults.length > 0)
|
||||||
|
// OfflineVmwareMigration: iterate over the volumes for data updates
|
||||||
|
}
|
||||||
|
|
||||||
|
private void markVolumesInPool(VMInstanceVO vm, StoragePool destPool, Answer[] hypervisorMigrationResults) {
|
||||||
|
MigrateVmToPoolAnswer relevantAnswer = null;
|
||||||
|
for (Answer answer : hypervisorMigrationResults) {
|
||||||
|
if (s_logger.isTraceEnabled()) {
|
||||||
|
s_logger.trace(String.format("received an %s: %s", answer.getClass().getSimpleName(), answer));
|
||||||
|
}
|
||||||
|
if (answer instanceof MigrateVmToPoolAnswer) {
|
||||||
|
relevantAnswer = (MigrateVmToPoolAnswer) answer;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (relevantAnswer == null) {
|
||||||
|
throw new CloudRuntimeException("no relevant migration results found");
|
||||||
|
}
|
||||||
|
List<VolumeVO> volumes = _volsDao.findUsableVolumesForInstance(vm.getId());
|
||||||
|
if(s_logger.isDebugEnabled()) {
|
||||||
|
String msg = String.format("found %d volumes for VM %s(uuid:%s, id:%d)", volumes.size(), vm.getInstanceName(), vm.getUuid(), vm.getId());
|
||||||
|
s_logger.debug(msg);
|
||||||
|
}
|
||||||
|
for (VolumeObjectTO result : relevantAnswer.getVolumeTos() ) {
|
||||||
|
if(s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug(String.format("updating volume (%d) with path '%s' on pool '%d'", result.getId(), result.getPath(), destPool.getId()));
|
||||||
|
}
|
||||||
|
VolumeVO volume = _volsDao.findById(result.getId());
|
||||||
|
volume.setPath(result.getPath());
|
||||||
|
volume.setPoolId(destPool.getId());
|
||||||
|
_volsDao.update(volume.getId(), volume);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void migrateThroughHypervisorOrStorage(StoragePool destPool, VMInstanceVO vm) throws StorageUnavailableException, InsufficientCapacityException {
|
||||||
|
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
|
||||||
|
final Long srchostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId();
|
||||||
|
final HostVO srcHost = _hostDao.findById(srchostId);
|
||||||
|
final Long srcClusterId = srcHost.getClusterId();
|
||||||
|
Answer[] hypervisorMigrationResults = attemptHypervisorMigration(destPool, vm);
|
||||||
|
boolean migrationResult = false;
|
||||||
|
if (hypervisorMigrationResults == null) {
|
||||||
|
// OfflineVmwareMigration: if the HypervisorGuru can't do it, let the volume manager take care of it.
|
||||||
|
migrationResult = volumeMgr.storageMigration(profile, destPool);
|
||||||
|
if (migrationResult) {
|
||||||
|
afterStorageMigrationCleanup(destPool, vm, srcHost, srcClusterId);
|
||||||
|
} else {
|
||||||
|
s_logger.debug("Storage migration failed");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
afterHypervisorMigrationCleanup(destPool, vm, srcHost, srcClusterId, hypervisorMigrationResults);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void preStorageMigrationStateCheck(StoragePool destPool, VMInstanceVO vm) {
|
||||||
if (destPool == null) {
|
if (destPool == null) {
|
||||||
throw new CloudRuntimeException("Unable to migrate vm: missing destination storage pool");
|
throw new CloudRuntimeException("Unable to migrate vm: missing destination storage pool");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
checkDestinationForTags(destPool, vm);
|
||||||
try {
|
try {
|
||||||
stateTransitTo(vm, VirtualMachine.Event.StorageMigrationRequested, null);
|
stateTransitTo(vm, Event.StorageMigrationRequested, null);
|
||||||
} catch (final NoTransitionException e) {
|
} catch (final NoTransitionException e) {
|
||||||
s_logger.debug("Unable to migrate vm: " + e.toString());
|
String msg = String.format("Unable to migrate vm: %s", vm.getUuid());
|
||||||
throw new CloudRuntimeException("Unable to migrate vm: " + e.toString());
|
s_logger.debug(msg);
|
||||||
|
throw new CloudRuntimeException(msg, e);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
|
private void checkDestinationForTags(StoragePool destPool, VMInstanceVO vm) {
|
||||||
boolean migrationResult = false;
|
List<VolumeVO> vols = _volsDao.findUsableVolumesForInstance(vm.getId());
|
||||||
|
// OfflineVmwareMigration: iterate over volumes
|
||||||
|
// OfflineVmwareMigration: get disk offering
|
||||||
|
List<String> storageTags = storageMgr.getStoragePoolTagList(destPool.getId());
|
||||||
|
for(Volume vol : vols) {
|
||||||
|
DiskOfferingVO diskOffering = _diskOfferingDao.findById(vol.getDiskOfferingId());
|
||||||
|
List<String> volumeTags = StringUtils.csvTagsToList(diskOffering.getTags());
|
||||||
|
if(! matches(volumeTags, storageTags)) {
|
||||||
|
String msg = String.format("destination pool '%s' with tags '%s', does not support the volume diskoffering for volume '%s' (tags: '%s') ",
|
||||||
|
destPool.getName(),
|
||||||
|
StringUtils.listToCsvTags(storageTags),
|
||||||
|
vol.getName(),
|
||||||
|
StringUtils.listToCsvTags(volumeTags)
|
||||||
|
);
|
||||||
|
throw new CloudRuntimeException(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static boolean matches(List<String> volumeTags, List<String> storagePoolTags) {
|
||||||
|
// OfflineVmwareMigration: commons collections 4 allows for Collections.containsAll(volumeTags,storagePoolTags);
|
||||||
|
boolean result = true;
|
||||||
|
if (volumeTags != null) {
|
||||||
|
for (String tag : volumeTags) {
|
||||||
|
// there is a volume tags so
|
||||||
|
if (storagePoolTags == null || !storagePoolTags.contains(tag)) {
|
||||||
|
result = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private void afterStorageMigrationCleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) throws InsufficientCapacityException {
|
||||||
|
setDestinationPoolAndReallocateNetwork(destPool, vm);
|
||||||
|
|
||||||
|
//when start the vm next time, don;'t look at last_host_id, only choose the host based on volume/storage pool
|
||||||
|
vm.setLastHostId(null);
|
||||||
|
vm.setPodIdToDeployIn(destPool.getPodId());
|
||||||
|
|
||||||
|
// If VM was cold migrated between clusters belonging to two different VMware DCs,
|
||||||
|
// unregister the VM from the source host and cleanup the associated VM files.
|
||||||
|
if (vm.getHypervisorType().equals(HypervisorType.VMware)) {
|
||||||
|
afterStorageMigrationVmwareVMcleanup(destPool, vm, srcHost, srcClusterId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void setDestinationPoolAndReallocateNetwork(StoragePool destPool, VMInstanceVO vm) throws InsufficientCapacityException {
|
||||||
|
//if the vm is migrated to different pod in basic mode, need to reallocate ip
|
||||||
|
|
||||||
|
if (destPool.getPodId() != null && !destPool.getPodId().equals(vm.getPodIdToDeployIn())) {
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
String msg = String.format("as the pod for vm %s has changed we are reallocating its network", vm.getInstanceName());
|
||||||
|
s_logger.debug(msg);
|
||||||
|
}
|
||||||
|
final DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), destPool.getPodId(), null, null, null, null);
|
||||||
|
final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, null, null, null, null);
|
||||||
|
_networkMgr.reallocate(vmProfile, plan);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void afterStorageMigrationVmwareVMcleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) {
|
||||||
|
// OfflineVmwareMigration: this should only happen on storage migration, else the guru would already have issued the command
|
||||||
|
final Long destClusterId = destPool.getClusterId();
|
||||||
|
if (srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId)) {
|
||||||
|
final String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId);
|
||||||
|
final String destDcName = _clusterDetailsDao.getVmwareDcName(destClusterId);
|
||||||
|
if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) {
|
||||||
|
removeStaleVmFromSource(vm, srcHost);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: on port forward refator this to be done in two
|
||||||
|
// OfflineVmwareMigration: command creation in the guru.migrat method
|
||||||
|
// OfflineVmwareMigration: sending up in the attemptHypevisorMigration with execute in sequence (responsibility of the guru)
|
||||||
|
private void removeStaleVmFromSource(VMInstanceVO vm, HostVO srcHost) {
|
||||||
|
s_logger.debug("Since VM's storage was successfully migrated across VMware Datacenters, unregistering VM: " + vm.getInstanceName() +
|
||||||
|
" from source host: " + srcHost.getId());
|
||||||
|
final UnregisterVMCommand uvc = new UnregisterVMCommand(vm.getInstanceName());
|
||||||
|
uvc.setCleanupVmFiles(true);
|
||||||
try {
|
try {
|
||||||
migrationResult = volumeMgr.storageMigration(profile, destPool);
|
_agentMgr.send(srcHost.getId(), uvc);
|
||||||
|
} catch (final Exception e) {
|
||||||
if (migrationResult) {
|
throw new CloudRuntimeException("Failed to unregister VM: " + vm.getInstanceName() + " from source host: " + srcHost.getId() +
|
||||||
//if the vm is migrated to different pod in basic mode, need to reallocate ip
|
" after successfully migrating VM's storage across VMware Datacenters");
|
||||||
|
|
||||||
if (destPool.getPodId() != null && !destPool.getPodId().equals(vm.getPodIdToDeployIn())) {
|
|
||||||
final DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), destPool.getPodId(), null, null, null, null);
|
|
||||||
final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, null, null, null, null);
|
|
||||||
_networkMgr.reallocate(vmProfile, plan);
|
|
||||||
}
|
|
||||||
|
|
||||||
//when start the vm next time, don;'t look at last_host_id, only choose the host based on volume/storage pool
|
|
||||||
vm.setLastHostId(null);
|
|
||||||
vm.setPodIdToDeployIn(destPool.getPodId());
|
|
||||||
|
|
||||||
// If VM was cold migrated between clusters belonging to two different VMware DCs,
|
|
||||||
// unregister the VM from the source host and cleanup the associated VM files.
|
|
||||||
if (vm.getHypervisorType().equals(HypervisorType.VMware)) {
|
|
||||||
Long srcClusterId = null;
|
|
||||||
Long srcHostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId();
|
|
||||||
if (srcHostId != null) {
|
|
||||||
HostVO srcHost = _hostDao.findById(srcHostId);
|
|
||||||
srcClusterId = srcHost.getClusterId();
|
|
||||||
}
|
|
||||||
|
|
||||||
final Long destClusterId = destPool.getClusterId();
|
|
||||||
if (srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId)) {
|
|
||||||
final String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId);
|
|
||||||
final String destDcName = _clusterDetailsDao.getVmwareDcName(destClusterId);
|
|
||||||
if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) {
|
|
||||||
s_logger.debug("Since VM's storage was successfully migrated across VMware Datacenters, unregistering VM: " + vm.getInstanceName() +
|
|
||||||
" from source host: " + srcHostId);
|
|
||||||
final UnregisterVMCommand uvc = new UnregisterVMCommand(vm.getInstanceName());
|
|
||||||
uvc.setCleanupVmFiles(true);
|
|
||||||
try {
|
|
||||||
_agentMgr.send(srcHostId, uvc);
|
|
||||||
} catch (final AgentUnavailableException | OperationTimedoutException e) {
|
|
||||||
throw new CloudRuntimeException("Failed to unregister VM: " + vm.getInstanceName() + " from source host: " + srcHostId +
|
|
||||||
" after successfully migrating VM's storage across VMware Datacenters");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
s_logger.debug("Storage migration failed");
|
|
||||||
}
|
|
||||||
} catch (final ConcurrentOperationException e) {
|
|
||||||
s_logger.debug("Failed to migration: " + e.toString());
|
|
||||||
throw new CloudRuntimeException("Failed to migration: " + e.toString());
|
|
||||||
} catch (final InsufficientVirtualNetworkCapacityException e) {
|
|
||||||
s_logger.debug("Failed to migration: " + e.toString());
|
|
||||||
throw new CloudRuntimeException("Failed to migration: " + e.toString());
|
|
||||||
} catch (final InsufficientAddressCapacityException e) {
|
|
||||||
s_logger.debug("Failed to migration: " + e.toString());
|
|
||||||
throw new CloudRuntimeException("Failed to migration: " + e.toString());
|
|
||||||
} catch (final InsufficientCapacityException e) {
|
|
||||||
s_logger.debug("Failed to migration: " + e.toString());
|
|
||||||
throw new CloudRuntimeException("Failed to migration: " + e.toString());
|
|
||||||
} catch (final StorageUnavailableException e) {
|
|
||||||
s_logger.debug("Failed to migration: " + e.toString());
|
|
||||||
throw new CloudRuntimeException("Failed to migration: " + e.toString());
|
|
||||||
} finally {
|
|
||||||
try {
|
|
||||||
stateTransitTo(vm, VirtualMachine.Event.AgentReportStopped, null);
|
|
||||||
} catch (final NoTransitionException e) {
|
|
||||||
s_logger.debug("Failed to change vm state: " + e.toString());
|
|
||||||
throw new CloudRuntimeException("Failed to change vm state: " + e.toString());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4577,6 +4720,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
final User user = context.getCallingUser();
|
final User user = context.getCallingUser();
|
||||||
final Account account = context.getCallingAccount();
|
final Account account = context.getCallingAccount();
|
||||||
|
|
||||||
|
Map<Volume, StoragePool> volumeStorageMap = dest.getStorageForDisks();
|
||||||
|
if (volumeStorageMap != null) {
|
||||||
|
for (Volume vol : volumeStorageMap.keySet()) {
|
||||||
|
checkConcurrentJobsPerDatastoreThreshhold(volumeStorageMap.get(vol));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
final VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
|
final VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
|
||||||
|
|
||||||
final List<VmWorkJobVO> pendingWorkJobs = _workJobDao.listPendingWorkJobs(
|
final List<VmWorkJobVO> pendingWorkJobs = _workJobDao.listPendingWorkJobs(
|
||||||
@ -4738,6 +4888,16 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
return new VmJobVirtualMachineOutcome(workJob, vm.getId());
|
return new VmJobVirtualMachineOutcome(workJob, vm.getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void checkConcurrentJobsPerDatastoreThreshhold(final StoragePool destPool) {
|
||||||
|
final Long threshold = VolumeApiService.ConcurrentMigrationsThresholdPerDatastore.value();
|
||||||
|
if (threshold != null && threshold > 0) {
|
||||||
|
long count = _jobMgr.countPendingJobs("\"storageid\":\"" + destPool.getUuid() + "\"", MigrateVMCmd.class.getName(), MigrateVolumeCmd.class.getName(), MigrateVolumeCmdByAdmin.class.getName());
|
||||||
|
if (count > threshold) {
|
||||||
|
throw new CloudRuntimeException("Number of concurrent migration jobs per datastore exceeded the threshold: " + threshold.toString() + ". Please try again after some time.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public Outcome<VirtualMachine> migrateVmStorageThroughJobQueue(
|
public Outcome<VirtualMachine> migrateVmStorageThroughJobQueue(
|
||||||
final String vmUuid, final StoragePool destPool) {
|
final String vmUuid, final StoragePool destPool) {
|
||||||
|
|
||||||
|
|||||||
@ -30,6 +30,10 @@ import java.util.concurrent.ExecutionException;
|
|||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
import javax.naming.ConfigurationException;
|
import javax.naming.ConfigurationException;
|
||||||
|
|
||||||
|
import com.cloud.storage.VolumeApiService;
|
||||||
|
import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd;
|
||||||
|
import org.apache.cloudstack.api.command.admin.volume.MigrateVolumeCmdByAdmin;
|
||||||
|
import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd;
|
||||||
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
|
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
|
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||||
@ -953,10 +957,29 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void checkConcurrentJobsPerDatastoreThreshhold(final StoragePool destPool) {
|
||||||
|
final Long threshold = VolumeApiService.ConcurrentMigrationsThresholdPerDatastore.value();
|
||||||
|
if (threshold != null && threshold > 0) {
|
||||||
|
long count = _jobMgr.countPendingJobs("\"storageid\":\"" + destPool.getUuid() + "\"", MigrateVMCmd.class.getName(), MigrateVolumeCmd.class.getName(), MigrateVolumeCmdByAdmin.class.getName());
|
||||||
|
if (count > threshold) {
|
||||||
|
throw new CloudRuntimeException("Number of concurrent migration jobs per datastore exceeded the threshold: " + threshold.toString() + ". Please try again after some time.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@DB
|
@DB
|
||||||
public Volume migrateVolume(Volume volume, StoragePool destPool) throws StorageUnavailableException {
|
public Volume migrateVolume(Volume volume, StoragePool destPool) throws StorageUnavailableException {
|
||||||
VolumeInfo vol = volFactory.getVolume(volume.getId());
|
VolumeInfo vol = volFactory.getVolume(volume.getId());
|
||||||
|
if (vol == null){
|
||||||
|
throw new CloudRuntimeException("Migrate volume failed because volume object of volume " + volume.getName()+ "is null");
|
||||||
|
}
|
||||||
|
if (destPool == null) {
|
||||||
|
throw new CloudRuntimeException("Migrate volume failed because destination storage pool is not available!!");
|
||||||
|
}
|
||||||
|
|
||||||
|
checkConcurrentJobsPerDatastoreThreshhold(destPool);
|
||||||
|
|
||||||
DataStore dataStoreTarget = dataStoreMgr.getDataStore(destPool.getId(), DataStoreRole.Primary);
|
DataStore dataStoreTarget = dataStoreMgr.getDataStore(destPool.getId(), DataStoreRole.Primary);
|
||||||
AsyncCallFuture<VolumeApiResult> future = volService.copyVolume(vol, dataStoreTarget);
|
AsyncCallFuture<VolumeApiResult> future = volService.copyVolume(vol, dataStoreTarget);
|
||||||
@ -1062,6 +1085,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: in case we can (vmware?) don't itterate over volumes but tell the hypervisor to do the thing
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug("Offline vm migration was not done up the stack in VirtualMachineManager so trying here.");
|
||||||
|
}
|
||||||
for (Volume vol : volumesNeedToMigrate) {
|
for (Volume vol : volumesNeedToMigrate) {
|
||||||
Volume result = migrateVolume(vol, destPool);
|
Volume result = migrateVolume(vol, destPool);
|
||||||
if (result == null) {
|
if (result == null) {
|
||||||
|
|||||||
@ -17,6 +17,7 @@
|
|||||||
|
|
||||||
package com.cloud.vm;
|
package com.cloud.vm;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.mockito.Matchers.any;
|
import static org.mockito.Matchers.any;
|
||||||
import static org.mockito.Matchers.anyLong;
|
import static org.mockito.Matchers.anyLong;
|
||||||
@ -25,6 +26,7 @@ import static org.mockito.Mockito.times;
|
|||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
@ -178,7 +180,7 @@ public class VirtualMachineManagerImplTest {
|
|||||||
|
|
||||||
boolean actual = virtualMachineManagerImpl.sendStop(guru, profile, false, false);
|
boolean actual = virtualMachineManagerImpl.sendStop(guru, profile, false, false);
|
||||||
|
|
||||||
Assert.assertFalse(actual);
|
assertFalse(actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -192,7 +194,7 @@ public class VirtualMachineManagerImplTest {
|
|||||||
|
|
||||||
boolean actual = virtualMachineManagerImpl.sendStop(guru, profile, false, false);
|
boolean actual = virtualMachineManagerImpl.sendStop(guru, profile, false, false);
|
||||||
|
|
||||||
Assert.assertFalse(actual);
|
assertFalse(actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -242,7 +244,7 @@ public class VirtualMachineManagerImplTest {
|
|||||||
|
|
||||||
boolean returnedValue = virtualMachineManagerImpl.isStorageCrossClusterMigration(hostMock, storagePoolVoMock);
|
boolean returnedValue = virtualMachineManagerImpl.isStorageCrossClusterMigration(hostMock, storagePoolVoMock);
|
||||||
|
|
||||||
Assert.assertFalse(returnedValue);
|
assertFalse(returnedValue);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -253,7 +255,7 @@ public class VirtualMachineManagerImplTest {
|
|||||||
|
|
||||||
boolean returnedValue = virtualMachineManagerImpl.isStorageCrossClusterMigration(hostMock, storagePoolVoMock);
|
boolean returnedValue = virtualMachineManagerImpl.isStorageCrossClusterMigration(hostMock, storagePoolVoMock);
|
||||||
|
|
||||||
Assert.assertFalse(returnedValue);
|
assertFalse(returnedValue);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -317,7 +319,7 @@ public class VirtualMachineManagerImplTest {
|
|||||||
|
|
||||||
Map<Volume, StoragePool> volumeToPoolObjectMap = virtualMachineManagerImpl.buildMapUsingUserInformation(virtualMachineProfileMock, hostMock, userDefinedVolumeToStoragePoolMap);
|
Map<Volume, StoragePool> volumeToPoolObjectMap = virtualMachineManagerImpl.buildMapUsingUserInformation(virtualMachineProfileMock, hostMock, userDefinedVolumeToStoragePoolMap);
|
||||||
|
|
||||||
Assert.assertFalse(volumeToPoolObjectMap.isEmpty());
|
assertFalse(volumeToPoolObjectMap.isEmpty());
|
||||||
Assert.assertEquals(storagePoolVoMock, volumeToPoolObjectMap.get(volumeVoMock));
|
Assert.assertEquals(storagePoolVoMock, volumeToPoolObjectMap.get(volumeVoMock));
|
||||||
|
|
||||||
Mockito.verify(userDefinedVolumeToStoragePoolMap, times(1)).keySet();
|
Mockito.verify(userDefinedVolumeToStoragePoolMap, times(1)).keySet();
|
||||||
@ -501,7 +503,7 @@ public class VirtualMachineManagerImplTest {
|
|||||||
HashMap<Volume, StoragePool> volumeToPoolObjectMap = new HashMap<>();
|
HashMap<Volume, StoragePool> volumeToPoolObjectMap = new HashMap<>();
|
||||||
virtualMachineManagerImpl.createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock);
|
virtualMachineManagerImpl.createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock);
|
||||||
|
|
||||||
Assert.assertFalse(volumeToPoolObjectMap.isEmpty());
|
assertFalse(volumeToPoolObjectMap.isEmpty());
|
||||||
Assert.assertEquals(storagePoolMockOther, volumeToPoolObjectMap.get(volumeVoMock));
|
Assert.assertEquals(storagePoolMockOther, volumeToPoolObjectMap.get(volumeVoMock));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -558,7 +560,7 @@ public class VirtualMachineManagerImplTest {
|
|||||||
|
|
||||||
virtualMachineManagerImpl.createStoragePoolMappingsForVolumes(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, allVolumes);
|
virtualMachineManagerImpl.createStoragePoolMappingsForVolumes(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, allVolumes);
|
||||||
|
|
||||||
Assert.assertFalse(volumeToPoolObjectMap.isEmpty());
|
assertFalse(volumeToPoolObjectMap.isEmpty());
|
||||||
Assert.assertEquals(storagePoolVoMock, volumeToPoolObjectMap.get(volumeVoMock));
|
Assert.assertEquals(storagePoolVoMock, volumeToPoolObjectMap.get(volumeVoMock));
|
||||||
|
|
||||||
Mockito.verify(virtualMachineManagerImpl).executeManagedStorageChecksWhenTargetStoragePoolNotProvided(hostMock, storagePoolVoMock, volumeVoMock);
|
Mockito.verify(virtualMachineManagerImpl).executeManagedStorageChecksWhenTargetStoragePoolNotProvided(hostMock, storagePoolVoMock, volumeVoMock);
|
||||||
@ -587,4 +589,38 @@ public class VirtualMachineManagerImplTest {
|
|||||||
inOrder.verify(virtualMachineManagerImpl).findVolumesThatWereNotMappedByTheUser(virtualMachineProfileMock, volumeToPoolObjectMap);
|
inOrder.verify(virtualMachineManagerImpl).findVolumesThatWereNotMappedByTheUser(virtualMachineProfileMock, volumeToPoolObjectMap);
|
||||||
inOrder.verify(virtualMachineManagerImpl).createStoragePoolMappingsForVolumes(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumesNotMapped);
|
inOrder.verify(virtualMachineManagerImpl).createStoragePoolMappingsForVolumes(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumesNotMapped);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void matchesOfSorts() {
|
||||||
|
List<String> nothing = null;
|
||||||
|
List<String> empty = new ArrayList<>();
|
||||||
|
List<String> tag = Arrays.asList("bla");
|
||||||
|
List<String> tags = Arrays.asList("bla", "blob");
|
||||||
|
List<String> others = Arrays.asList("bla", "blieb");
|
||||||
|
List<String> three = Arrays.asList("bla", "blob", "blieb");
|
||||||
|
|
||||||
|
// single match
|
||||||
|
assertTrue(VirtualMachineManagerImpl.matches(tag,tags));
|
||||||
|
assertTrue(VirtualMachineManagerImpl.matches(tag,others));
|
||||||
|
|
||||||
|
// no requirements
|
||||||
|
assertTrue(VirtualMachineManagerImpl.matches(nothing,tags));
|
||||||
|
assertTrue(VirtualMachineManagerImpl.matches(empty,tag));
|
||||||
|
|
||||||
|
// mis(sing)match
|
||||||
|
assertFalse(VirtualMachineManagerImpl.matches(tags,tag));
|
||||||
|
assertFalse(VirtualMachineManagerImpl.matches(tag,nothing));
|
||||||
|
assertFalse(VirtualMachineManagerImpl.matches(tag,empty));
|
||||||
|
|
||||||
|
// disjunct sets
|
||||||
|
assertFalse(VirtualMachineManagerImpl.matches(tags,others));
|
||||||
|
assertFalse(VirtualMachineManagerImpl.matches(others,tags));
|
||||||
|
|
||||||
|
// everything matches the larger set
|
||||||
|
assertTrue(VirtualMachineManagerImpl.matches(nothing,three));
|
||||||
|
assertTrue(VirtualMachineManagerImpl.matches(empty,three));
|
||||||
|
assertTrue(VirtualMachineManagerImpl.matches(tag,three));
|
||||||
|
assertTrue(VirtualMachineManagerImpl.matches(tags,three));
|
||||||
|
assertTrue(VirtualMachineManagerImpl.matches(others,three));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
@ -18,12 +18,17 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cloudstack.storage.motion;
|
package org.apache.cloudstack.storage.motion;
|
||||||
|
|
||||||
|
import java.util.Date;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
|
|
||||||
|
import com.cloud.storage.Volume;
|
||||||
|
import com.cloud.storage.VolumeVO;
|
||||||
|
import com.cloud.storage.dao.VolumeDao;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
import org.springframework.stereotype.Component;
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||||
@ -40,10 +45,15 @@ import com.cloud.host.Host;
|
|||||||
import com.cloud.utils.StringUtils;
|
import com.cloud.utils.StringUtils;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
|
||||||
|
|
||||||
@Component
|
@Component
|
||||||
public class DataMotionServiceImpl implements DataMotionService {
|
public class DataMotionServiceImpl implements DataMotionService {
|
||||||
|
private static final Logger LOGGER = Logger.getLogger(DataMotionServiceImpl.class);
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
StorageStrategyFactory storageStrategyFactory;
|
StorageStrategyFactory storageStrategyFactory;
|
||||||
|
@Inject
|
||||||
|
VolumeDao volDao;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback) {
|
public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback) {
|
||||||
@ -61,13 +71,33 @@ public class DataMotionServiceImpl implements DataMotionService {
|
|||||||
|
|
||||||
DataMotionStrategy strategy = storageStrategyFactory.getDataMotionStrategy(srcData, destData);
|
DataMotionStrategy strategy = storageStrategyFactory.getDataMotionStrategy(srcData, destData);
|
||||||
if (strategy == null) {
|
if (strategy == null) {
|
||||||
|
// OfflineVmware volume migration
|
||||||
|
// Cleanup volumes from target and reset the state of volume at source
|
||||||
|
cleanUpVolumesForFailedMigrations(srcData, destData);
|
||||||
throw new CloudRuntimeException("Can't find strategy to move data. " + "Source: " + srcData.getType().name() + " '" + srcData.getUuid() + ", Destination: " +
|
throw new CloudRuntimeException("Can't find strategy to move data. " + "Source: " + srcData.getType().name() + " '" + srcData.getUuid() + ", Destination: " +
|
||||||
destData.getType().name() + " '" + destData.getUuid() + "'");
|
destData.getType().name() + " '" + destData.getUuid() + "'");
|
||||||
}
|
}
|
||||||
|
|
||||||
strategy.copyAsync(srcData, destData, destHost, callback);
|
strategy.copyAsync(srcData, destData, destHost, callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Offline Vmware volume migration
|
||||||
|
* Cleanup volumes after failed migrations and reset state of source volume
|
||||||
|
*
|
||||||
|
* @param srcData
|
||||||
|
* @param destData
|
||||||
|
*/
|
||||||
|
private void cleanUpVolumesForFailedMigrations(DataObject srcData, DataObject destData) {
|
||||||
|
VolumeVO destinationVO = volDao.findById(destData.getId());
|
||||||
|
VolumeVO sourceVO = volDao.findById(srcData.getId());
|
||||||
|
sourceVO.setState(Volume.State.Ready);
|
||||||
|
volDao.update(sourceVO.getId(), sourceVO);
|
||||||
|
destinationVO.setState(Volume.State.Expunged);
|
||||||
|
destinationVO.setRemoved(new Date());
|
||||||
|
volDao.update(destinationVO.getId(), destinationVO);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback) {
|
public void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback) {
|
||||||
copyAsync(srcData, destData, null, callback);
|
copyAsync(srcData, destData, null, callback);
|
||||||
@ -84,7 +114,7 @@ public class DataMotionServiceImpl implements DataMotionService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
throw new CloudRuntimeException("Can't find strategy to move data. " + "Source Host: " + srcHost.getName() + ", Destination Host: " + destHost.getName() +
|
throw new CloudRuntimeException("Can't find strategy to move data. " + "Source Host: " + srcHost.getName() + ", Destination Host: " + destHost.getName() +
|
||||||
", Volume UUIDs: " + StringUtils.join(volumeIds, ","));
|
", Volume UUIDs: " + StringUtils.join(volumeIds, ","));
|
||||||
}
|
}
|
||||||
|
|
||||||
strategy.copyAsync(volumeMap, vmTo, srcHost, destHost, callback);
|
strategy.copyAsync(volumeMap, vmTo, srcHost, destHost, callback);
|
||||||
|
|||||||
@ -1408,6 +1408,19 @@ public class VolumeServiceImpl implements VolumeService {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public AsyncCallFuture<VolumeApiResult> copyVolume(VolumeInfo srcVolume, DataStore destStore) {
|
public AsyncCallFuture<VolumeApiResult> copyVolume(VolumeInfo srcVolume, DataStore destStore) {
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
DataStore srcStore = srcVolume.getDataStore();
|
||||||
|
String srcRole = (srcStore != null && srcStore.getRole() != null ? srcVolume.getDataStore().getRole().toString() : "<unknown role>");
|
||||||
|
|
||||||
|
String msg = String.format("copying %s(id=%d, role=%s) to %s (id=%d, role=%s)"
|
||||||
|
, srcVolume.getName()
|
||||||
|
, srcVolume.getId()
|
||||||
|
, srcRole
|
||||||
|
, destStore.getName()
|
||||||
|
, destStore.getId()
|
||||||
|
, destStore.getRole());
|
||||||
|
s_logger.debug(msg);
|
||||||
|
}
|
||||||
|
|
||||||
if (srcVolume.getState() == Volume.State.Uploaded) {
|
if (srcVolume.getState() == Volume.State.Uploaded) {
|
||||||
return copyVolumeFromImageToPrimary(srcVolume, destStore);
|
return copyVolumeFromImageToPrimary(srcVolume, destStore);
|
||||||
@ -1417,6 +1430,8 @@ public class VolumeServiceImpl implements VolumeService {
|
|||||||
return copyVolumeFromPrimaryToImage(srcVolume, destStore);
|
return copyVolumeFromPrimaryToImage(srcVolume, destStore);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: aren't we missing secondary to secondary in this logic?
|
||||||
|
|
||||||
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
|
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
|
||||||
VolumeApiResult res = new VolumeApiResult(srcVolume);
|
VolumeApiResult res = new VolumeApiResult(srcVolume);
|
||||||
try {
|
try {
|
||||||
@ -1438,7 +1453,10 @@ public class VolumeServiceImpl implements VolumeService {
|
|||||||
caller.setCallback(caller.getTarget().copyVolumeCallBack(null, null)).setContext(context);
|
caller.setCallback(caller.getTarget().copyVolumeCallBack(null, null)).setContext(context);
|
||||||
motionSrv.copyAsync(srcVolume, destVolume, caller);
|
motionSrv.copyAsync(srcVolume, destVolume, caller);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
s_logger.debug("Failed to copy volume" + e);
|
s_logger.error("Failed to copy volume:" + e);
|
||||||
|
if(s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug("Failed to copy volume.", e);
|
||||||
|
}
|
||||||
res.setResult(e.toString());
|
res.setResult(e.toString());
|
||||||
future.complete(res);
|
future.complete(res);
|
||||||
}
|
}
|
||||||
@ -1461,27 +1479,25 @@ public class VolumeServiceImpl implements VolumeService {
|
|||||||
AsyncCallFuture<VolumeApiResult> destroyFuture = expungeVolumeAsync(destVolume);
|
AsyncCallFuture<VolumeApiResult> destroyFuture = expungeVolumeAsync(destVolume);
|
||||||
destroyFuture.get();
|
destroyFuture.get();
|
||||||
future.complete(res);
|
future.complete(res);
|
||||||
return null;
|
} else {
|
||||||
}
|
srcVolume.processEvent(Event.OperationSuccessed);
|
||||||
srcVolume.processEvent(Event.OperationSuccessed);
|
destVolume.processEvent(Event.MigrationCopySucceeded, result.getAnswer());
|
||||||
destVolume.processEvent(Event.MigrationCopySucceeded, result.getAnswer());
|
volDao.updateUuid(srcVolume.getId(), destVolume.getId());
|
||||||
volDao.updateUuid(srcVolume.getId(), destVolume.getId());
|
try {
|
||||||
_volumeStoreDao.updateVolumeId(srcVolume.getId(), destVolume.getId());
|
destroyVolume(srcVolume.getId());
|
||||||
try {
|
srcVolume = volFactory.getVolume(srcVolume.getId());
|
||||||
destroyVolume(srcVolume.getId());
|
AsyncCallFuture<VolumeApiResult> destroyFuture = expungeVolumeAsync(srcVolume);
|
||||||
srcVolume = volFactory.getVolume(srcVolume.getId());
|
// If volume destroy fails, this could be because of vdi is still in use state, so wait and retry.
|
||||||
AsyncCallFuture<VolumeApiResult> destroyFuture = expungeVolumeAsync(srcVolume);
|
if (destroyFuture.get().isFailed()) {
|
||||||
// If volume destroy fails, this could be because of vdi is still in use state, so wait and retry.
|
Thread.sleep(5 * 1000);
|
||||||
if (destroyFuture.get().isFailed()) {
|
destroyFuture = expungeVolumeAsync(srcVolume);
|
||||||
Thread.sleep(5 * 1000);
|
destroyFuture.get();
|
||||||
destroyFuture = expungeVolumeAsync(srcVolume);
|
}
|
||||||
destroyFuture.get();
|
future.complete(res);
|
||||||
|
} catch (Exception e) {
|
||||||
|
s_logger.debug("failed to clean up volume on storage", e);
|
||||||
}
|
}
|
||||||
future.complete(res);
|
|
||||||
} catch (Exception e) {
|
|
||||||
s_logger.debug("failed to clean up volume on storage", e);
|
|
||||||
}
|
}
|
||||||
return null;
|
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
s_logger.debug("Failed to process copy volume callback", e);
|
s_logger.debug("Failed to process copy volume callback", e);
|
||||||
res.setResult(e.toString());
|
res.setResult(e.toString());
|
||||||
|
|||||||
@ -131,4 +131,6 @@ public interface AsyncJobManager extends Manager {
|
|||||||
Object unmarshallResultObject(AsyncJob job);
|
Object unmarshallResultObject(AsyncJob job);
|
||||||
|
|
||||||
List<AsyncJobVO> findFailureAsyncJobs(String... cmds);
|
List<AsyncJobVO> findFailureAsyncJobs(String... cmds);
|
||||||
|
|
||||||
|
long countPendingJobs(String havingInfo, String... cmds);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -44,4 +44,6 @@ public interface AsyncJobDao extends GenericDao<AsyncJobVO, Long> {
|
|||||||
List<AsyncJobVO> getResetJobs(long msid);
|
List<AsyncJobVO> getResetJobs(long msid);
|
||||||
|
|
||||||
List<AsyncJobVO> getFailureJobsSinceLastMsStart(long msId, String... cmds);
|
List<AsyncJobVO> getFailureJobsSinceLastMsStart(long msId, String... cmds);
|
||||||
|
|
||||||
|
long countPendingJobs(String havingInfo, String... cmds);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -30,6 +30,7 @@ import org.apache.cloudstack.jobs.JobInfo;
|
|||||||
import com.cloud.utils.db.DB;
|
import com.cloud.utils.db.DB;
|
||||||
import com.cloud.utils.db.Filter;
|
import com.cloud.utils.db.Filter;
|
||||||
import com.cloud.utils.db.GenericDaoBase;
|
import com.cloud.utils.db.GenericDaoBase;
|
||||||
|
import com.cloud.utils.db.GenericSearchBuilder;
|
||||||
import com.cloud.utils.db.SearchBuilder;
|
import com.cloud.utils.db.SearchBuilder;
|
||||||
import com.cloud.utils.db.SearchCriteria;
|
import com.cloud.utils.db.SearchCriteria;
|
||||||
import com.cloud.utils.db.SearchCriteria.Op;
|
import com.cloud.utils.db.SearchCriteria.Op;
|
||||||
@ -46,6 +47,7 @@ public class AsyncJobDaoImpl extends GenericDaoBase<AsyncJobVO, Long> implements
|
|||||||
private final SearchBuilder<AsyncJobVO> expiringUnfinishedAsyncJobSearch;
|
private final SearchBuilder<AsyncJobVO> expiringUnfinishedAsyncJobSearch;
|
||||||
private final SearchBuilder<AsyncJobVO> expiringCompletedAsyncJobSearch;
|
private final SearchBuilder<AsyncJobVO> expiringCompletedAsyncJobSearch;
|
||||||
private final SearchBuilder<AsyncJobVO> failureMsidAsyncJobSearch;
|
private final SearchBuilder<AsyncJobVO> failureMsidAsyncJobSearch;
|
||||||
|
private final GenericSearchBuilder<AsyncJobVO, Long> asyncJobTypeSearch;
|
||||||
|
|
||||||
public AsyncJobDaoImpl() {
|
public AsyncJobDaoImpl() {
|
||||||
pendingAsyncJobSearch = createSearchBuilder();
|
pendingAsyncJobSearch = createSearchBuilder();
|
||||||
@ -94,6 +96,13 @@ public class AsyncJobDaoImpl extends GenericDaoBase<AsyncJobVO, Long> implements
|
|||||||
failureMsidAsyncJobSearch.and("job_cmd", failureMsidAsyncJobSearch.entity().getCmd(), Op.IN);
|
failureMsidAsyncJobSearch.and("job_cmd", failureMsidAsyncJobSearch.entity().getCmd(), Op.IN);
|
||||||
failureMsidAsyncJobSearch.done();
|
failureMsidAsyncJobSearch.done();
|
||||||
|
|
||||||
|
asyncJobTypeSearch = createSearchBuilder(Long.class);
|
||||||
|
asyncJobTypeSearch.select(null, SearchCriteria.Func.COUNT, asyncJobTypeSearch.entity().getId());
|
||||||
|
asyncJobTypeSearch.and("job_info", asyncJobTypeSearch.entity().getCmdInfo(),Op.LIKE);
|
||||||
|
asyncJobTypeSearch.and("job_cmd", asyncJobTypeSearch.entity().getCmd(), Op.IN);
|
||||||
|
asyncJobTypeSearch.and("status", asyncJobTypeSearch.entity().getStatus(), SearchCriteria.Op.EQ);
|
||||||
|
asyncJobTypeSearch.done();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -227,4 +236,14 @@ public class AsyncJobDaoImpl extends GenericDaoBase<AsyncJobVO, Long> implements
|
|||||||
sc.setParameters("job_cmd", (Object[])cmds);
|
sc.setParameters("job_cmd", (Object[])cmds);
|
||||||
return listBy(sc);
|
return listBy(sc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long countPendingJobs(String havingInfo, String... cmds) {
|
||||||
|
SearchCriteria<Long> sc = asyncJobTypeSearch.create();
|
||||||
|
sc.setParameters("status", JobInfo.Status.IN_PROGRESS);
|
||||||
|
sc.setParameters("job_cmd", (Object[])cmds);
|
||||||
|
sc.setParameters("job_info", "%" + havingInfo + "%");
|
||||||
|
List<Long> results = customSearch(sc, null);
|
||||||
|
return results.get(0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1122,4 +1122,9 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
public List<AsyncJobVO> findFailureAsyncJobs(String... cmds) {
|
public List<AsyncJobVO> findFailureAsyncJobs(String... cmds) {
|
||||||
return _jobDao.getFailureJobsSinceLastMsStart(getMsid(), cmds);
|
return _jobDao.getFailureJobsSinceLastMsStart(getMsid(), cmds);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long countPendingJobs(String havingInfo, String... cmds) {
|
||||||
|
return _jobDao.countPendingJobs(havingInfo, cmds);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -26,6 +26,11 @@ import java.util.UUID;
|
|||||||
|
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
|
|
||||||
|
import com.cloud.agent.api.MigrateVmToPoolCommand;
|
||||||
|
import com.cloud.agent.api.UnregisterVMCommand;
|
||||||
|
import com.cloud.agent.api.to.VolumeTO;
|
||||||
|
import com.cloud.dc.ClusterDetailsDao;
|
||||||
|
import com.cloud.storage.StoragePool;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||||
@ -115,12 +120,14 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
|
|||||||
@Inject
|
@Inject
|
||||||
private GuestOSDao _guestOsDao;
|
private GuestOSDao _guestOsDao;
|
||||||
@Inject
|
@Inject
|
||||||
GuestOSHypervisorDao _guestOsHypervisorDao;
|
private GuestOSHypervisorDao _guestOsHypervisorDao;
|
||||||
@Inject
|
@Inject
|
||||||
private HostDao _hostDao;
|
private HostDao _hostDao;
|
||||||
@Inject
|
@Inject
|
||||||
private HostDetailsDao _hostDetailsDao;
|
private HostDetailsDao _hostDetailsDao;
|
||||||
@Inject
|
@Inject
|
||||||
|
private ClusterDetailsDao _clusterDetailsDao;
|
||||||
|
@Inject
|
||||||
private CommandExecLogDao _cmdExecLogDao;
|
private CommandExecLogDao _cmdExecLogDao;
|
||||||
@Inject
|
@Inject
|
||||||
private VmwareManager _vmwareMgr;
|
private VmwareManager _vmwareMgr;
|
||||||
@ -640,4 +647,35 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
|
|||||||
details.put(VmwareReserveMemory.key(), VmwareReserveMemory.valueIn(clusterId).toString());
|
details.put(VmwareReserveMemory.key(), VmwareReserveMemory.valueIn(clusterId).toString());
|
||||||
return details;
|
return details;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<Command> finalizeMigrate(VirtualMachine vm, StoragePool destination) {
|
||||||
|
List<Command> commands = new ArrayList<Command>();
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: specialised migration command
|
||||||
|
List<VolumeVO> volumes = _volumeDao.findByInstance(vm.getId());
|
||||||
|
List<VolumeTO> vols = new ArrayList<>();
|
||||||
|
for (Volume volume : volumes) {
|
||||||
|
VolumeTO vol = new VolumeTO(volume,destination);
|
||||||
|
vols.add(vol);
|
||||||
|
}
|
||||||
|
MigrateVmToPoolCommand migrateVmToPoolCommand = new MigrateVmToPoolCommand(vm.getInstanceName(), vols, destination.getUuid(), true);
|
||||||
|
commands.add(migrateVmToPoolCommand);
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: cleanup if needed
|
||||||
|
final Long destClusterId = destination.getClusterId();
|
||||||
|
final Long srcClusterId = getClusterId(vm.getId());
|
||||||
|
|
||||||
|
if (srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId)) {
|
||||||
|
final String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId);
|
||||||
|
final String destDcName = _clusterDetailsDao.getVmwareDcName(destClusterId);
|
||||||
|
if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) {
|
||||||
|
final UnregisterVMCommand unregisterVMCommand = new UnregisterVMCommand(vm.getInstanceName(), true);
|
||||||
|
unregisterVMCommand.setCleanupVmFiles(true);
|
||||||
|
|
||||||
|
commands.add(unregisterVMCommand);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return commands;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -43,8 +43,8 @@ import java.util.UUID;
|
|||||||
|
|
||||||
import javax.naming.ConfigurationException;
|
import javax.naming.ConfigurationException;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringUtils;
|
|
||||||
import org.apache.commons.lang.math.NumberUtils;
|
import org.apache.commons.lang.math.NumberUtils;
|
||||||
|
import org.apache.commons.lang.StringUtils;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
import org.apache.log4j.NDC;
|
import org.apache.log4j.NDC;
|
||||||
import org.joda.time.Duration;
|
import org.joda.time.Duration;
|
||||||
@ -163,6 +163,8 @@ import com.cloud.agent.api.ManageSnapshotAnswer;
|
|||||||
import com.cloud.agent.api.ManageSnapshotCommand;
|
import com.cloud.agent.api.ManageSnapshotCommand;
|
||||||
import com.cloud.agent.api.MigrateAnswer;
|
import com.cloud.agent.api.MigrateAnswer;
|
||||||
import com.cloud.agent.api.MigrateCommand;
|
import com.cloud.agent.api.MigrateCommand;
|
||||||
|
import com.cloud.agent.api.MigrateVmToPoolAnswer;
|
||||||
|
import com.cloud.agent.api.MigrateVmToPoolCommand;
|
||||||
import com.cloud.agent.api.MigrateWithStorageAnswer;
|
import com.cloud.agent.api.MigrateWithStorageAnswer;
|
||||||
import com.cloud.agent.api.MigrateWithStorageCommand;
|
import com.cloud.agent.api.MigrateWithStorageCommand;
|
||||||
import com.cloud.agent.api.ModifySshKeysCommand;
|
import com.cloud.agent.api.ModifySshKeysCommand;
|
||||||
@ -311,6 +313,7 @@ import com.cloud.vm.VmDetailConstants;
|
|||||||
|
|
||||||
public class VmwareResource implements StoragePoolResource, ServerResource, VmwareHostService, VirtualRouterDeployer {
|
public class VmwareResource implements StoragePoolResource, ServerResource, VmwareHostService, VirtualRouterDeployer {
|
||||||
private static final Logger s_logger = Logger.getLogger(VmwareResource.class);
|
private static final Logger s_logger = Logger.getLogger(VmwareResource.class);
|
||||||
|
public static final String VMDK_EXTENSION = ".vmdk";
|
||||||
|
|
||||||
private static final Random RANDOM = new Random(System.nanoTime());
|
private static final Random RANDOM = new Random(System.nanoTime());
|
||||||
|
|
||||||
@ -442,6 +445,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
answer = execute((PrepareForMigrationCommand)cmd);
|
answer = execute((PrepareForMigrationCommand)cmd);
|
||||||
} else if (clz == MigrateCommand.class) {
|
} else if (clz == MigrateCommand.class) {
|
||||||
answer = execute((MigrateCommand)cmd);
|
answer = execute((MigrateCommand)cmd);
|
||||||
|
} else if (clz == MigrateVmToPoolCommand.class) {
|
||||||
|
answer = execute((MigrateVmToPoolCommand)cmd);
|
||||||
} else if (clz == MigrateWithStorageCommand.class) {
|
} else if (clz == MigrateWithStorageCommand.class) {
|
||||||
answer = execute((MigrateWithStorageCommand)cmd);
|
answer = execute((MigrateWithStorageCommand)cmd);
|
||||||
} else if (clz == MigrateVolumeCommand.class) {
|
} else if (clz == MigrateVolumeCommand.class) {
|
||||||
@ -699,30 +704,38 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (vmName.equalsIgnoreCase("none")) {
|
if (vmName.equalsIgnoreCase("none")) {
|
||||||
|
// OfflineVmwareMigration: we need to refactor the worker vm creation out for use in migration methods as well as here
|
||||||
|
// OfflineVmwareMigration: this method is 100 lines and needs refactorring anyway
|
||||||
// we need to spawn a worker VM to attach the volume to and resize the volume.
|
// we need to spawn a worker VM to attach the volume to and resize the volume.
|
||||||
useWorkerVm = true;
|
useWorkerVm = true;
|
||||||
vmName = getWorkerName(getServiceContext(), cmd, 0);
|
vmName = getWorkerName(getServiceContext(), cmd, 0);
|
||||||
|
|
||||||
String poolId = cmd.getPoolUuid();
|
String poolId = cmd.getPoolUuid();
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: refactor for re-use
|
||||||
|
// OfflineVmwareMigration: 1. find data(store)
|
||||||
ManagedObjectReference morDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolId);
|
ManagedObjectReference morDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolId);
|
||||||
DatastoreMO dsMo = new DatastoreMO(hyperHost.getContext(), morDS);
|
DatastoreMO dsMo = new DatastoreMO(hyperHost.getContext(), morDS);
|
||||||
|
|
||||||
s_logger.info("Create worker VM " + vmName);
|
s_logger.info("Create worker VM " + vmName);
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: 2. create the worker with access to the data(store)
|
||||||
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, vmName);
|
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, vmName);
|
||||||
|
|
||||||
if (vmMo == null) {
|
if (vmMo == null) {
|
||||||
|
// OfflineVmwareMigration: don't throw a general Exception but think of a specific one
|
||||||
throw new Exception("Unable to create a worker VM for volume resize");
|
throw new Exception("Unable to create a worker VM for volume resize");
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, path + ".vmdk");
|
// OfflineVmwareMigration: 3. attach the disk to the worker
|
||||||
|
vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, path + VMDK_EXTENSION);
|
||||||
|
|
||||||
vmMo.attachDisk(new String[] { vmdkDataStorePath }, morDS);
|
vmMo.attachDisk(new String[] { vmdkDataStorePath }, morDS);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: 4. find the (worker-) VM
|
||||||
// find VM through datacenter (VM is not at the target host yet)
|
// find VM through datacenter (VM is not at the target host yet)
|
||||||
vmMo = hyperHost.findVmOnPeerHyperHost(vmName);
|
vmMo = hyperHost.findVmOnPeerHyperHost(vmName);
|
||||||
|
|
||||||
@ -734,6 +747,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
throw new Exception(msg);
|
throw new Exception(msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: 5. ignore/replace the rest of the try-block; It is the functional bit
|
||||||
Pair<VirtualDisk, String> vdisk = vmMo.getDiskDevice(path);
|
Pair<VirtualDisk, String> vdisk = vmMo.getDiskDevice(path);
|
||||||
|
|
||||||
if (vdisk == null) {
|
if (vdisk == null) {
|
||||||
@ -813,6 +827,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
|
|
||||||
return new ResizeVolumeAnswer(cmd, false, error);
|
return new ResizeVolumeAnswer(cmd, false, error);
|
||||||
} finally {
|
} finally {
|
||||||
|
// OfflineVmwareMigration: 6. check if a worker was used and destroy it if needed
|
||||||
try {
|
try {
|
||||||
if (useWorkerVm) {
|
if (useWorkerVm) {
|
||||||
s_logger.info("Destroy worker VM after volume resize");
|
s_logger.info("Destroy worker VM after volume resize");
|
||||||
@ -2313,7 +2328,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
}
|
}
|
||||||
|
|
||||||
private void resizeRootDiskOnVMStart(VirtualMachineMO vmMo, DiskTO rootDiskTO, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception {
|
private void resizeRootDiskOnVMStart(VirtualMachineMO vmMo, DiskTO rootDiskTO, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception {
|
||||||
final Pair<VirtualDisk, String> vdisk = getVirtualDiskInfo(vmMo, appendFileType(rootDiskTO.getPath(), ".vmdk"));
|
final Pair<VirtualDisk, String> vdisk = getVirtualDiskInfo(vmMo, appendFileType(rootDiskTO.getPath(), VMDK_EXTENSION));
|
||||||
assert(vdisk != null);
|
assert(vdisk != null);
|
||||||
|
|
||||||
Long reqSize = 0L;
|
Long reqSize = 0L;
|
||||||
@ -2536,7 +2551,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
vmdkPath = dsMo.getName();
|
vmdkPath = dsMo.getName();
|
||||||
}
|
}
|
||||||
|
|
||||||
datastoreDiskPath = dsMo.getDatastorePath(vmdkPath + ".vmdk");
|
datastoreDiskPath = dsMo.getDatastorePath(vmdkPath + VMDK_EXTENSION);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
datastoreDiskPath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmMo.getName(), dsMo, volumeTO.getPath(), VmwareManager.s_vmwareSearchExcludeFolder.value());
|
datastoreDiskPath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmMo.getName(), dsMo, volumeTO.getPath(), VmwareManager.s_vmwareSearchExcludeFolder.value());
|
||||||
@ -3061,7 +3076,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
* Ex. "[-iqn.2010-01.com.solidfire:4nhe.vol-1.27-0] i-2-18-VM/ROOT-18.vmdk" should return "i-2-18-VM/ROOT-18"
|
* Ex. "[-iqn.2010-01.com.solidfire:4nhe.vol-1.27-0] i-2-18-VM/ROOT-18.vmdk" should return "i-2-18-VM/ROOT-18"
|
||||||
*/
|
*/
|
||||||
public String getVmdkPath(String path) {
|
public String getVmdkPath(String path) {
|
||||||
if (!com.cloud.utils.StringUtils.isNotBlank(path)) {
|
if (!StringUtils.isNotBlank(path)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3075,7 +3090,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
|
|
||||||
path = path.substring(startIndex + search.length());
|
path = path.substring(startIndex + search.length());
|
||||||
|
|
||||||
final String search2 = ".vmdk";
|
final String search2 = VMDK_EXTENSION;
|
||||||
|
|
||||||
int endIndex = path.indexOf(search2);
|
int endIndex = path.indexOf(search2);
|
||||||
|
|
||||||
@ -3128,10 +3143,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
final String datastoreVolumePath;
|
final String datastoreVolumePath;
|
||||||
|
|
||||||
if (vmdkPath != null) {
|
if (vmdkPath != null) {
|
||||||
datastoreVolumePath = dsMo.getDatastorePath(vmdkPath + ".vmdk");
|
datastoreVolumePath = dsMo.getDatastorePath(vmdkPath + VMDK_EXTENSION);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk");
|
datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + VMDK_EXTENSION);
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeTO.setPath(datastoreVolumePath);
|
volumeTO.setPath(datastoreVolumePath);
|
||||||
@ -3780,12 +3795,172 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
invalidateServiceContext();
|
invalidateServiceContext();
|
||||||
}
|
}
|
||||||
|
|
||||||
String msg = "Unexcpeted exception " + VmwareHelper.getExceptionMessage(e);
|
String msg = "Unexpected exception " + VmwareHelper.getExceptionMessage(e);
|
||||||
s_logger.error(msg, e);
|
s_logger.error(msg, e);
|
||||||
return new PrepareForMigrationAnswer(cmd, msg);
|
return new PrepareForMigrationAnswer(cmd, msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected Answer execute(MigrateVmToPoolCommand cmd) {
|
||||||
|
if (s_logger.isInfoEnabled()) {
|
||||||
|
s_logger.info(String.format("excuting MigrateVmToPoolCommand %s -> %s", cmd.getVmName(), cmd.getDestinationPool()));
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug("MigrateVmToPoolCommand: " + _gson.toJson(cmd));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
final String vmName = cmd.getVmName();
|
||||||
|
|
||||||
|
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
|
||||||
|
try {
|
||||||
|
VirtualMachineMO vmMo = getVirtualMachineMO(vmName, hyperHost);
|
||||||
|
if (vmMo == null) {
|
||||||
|
String msg = "VM " + vmName + " does not exist in VMware datacenter";
|
||||||
|
s_logger.error(msg);
|
||||||
|
throw new CloudRuntimeException(msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
String poolUuid = cmd.getDestinationPool();
|
||||||
|
return migrateAndAnswer(vmMo, poolUuid, hyperHost, cmd);
|
||||||
|
} catch (Throwable e) { // hopefully only CloudRuntimeException :/
|
||||||
|
if (e instanceof Exception) {
|
||||||
|
return new Answer(cmd, (Exception) e);
|
||||||
|
}
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug("problem" , e);
|
||||||
|
}
|
||||||
|
s_logger.error(e.getLocalizedMessage());
|
||||||
|
return new Answer(cmd, false, "unknown problem: " + e.getLocalizedMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private Answer migrateAndAnswer(VirtualMachineMO vmMo, String poolUuid, VmwareHypervisorHost hyperHost, Command cmd) throws Exception {
|
||||||
|
ManagedObjectReference morDs = getTargetDatastoreMOReference(poolUuid, hyperHost);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// OfflineVmwareMigration: getVolumesFromCommand(cmd);
|
||||||
|
Map<Integer, Long> volumeDeviceKey = getVolumesFromCommand(vmMo, cmd);
|
||||||
|
if (s_logger.isTraceEnabled()) {
|
||||||
|
for (Integer diskId: volumeDeviceKey.keySet()) {
|
||||||
|
s_logger.trace(String.format("disk to migrate has disk id %d and volumeId %d", diskId, volumeDeviceKey.get(diskId)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (vmMo.changeDatastore(morDs)) {
|
||||||
|
// OfflineVmwareMigration: create target specification to include in answer
|
||||||
|
// Consolidate VM disks after successful VM migration
|
||||||
|
// In case of a linked clone VM, if VM's disks are not consolidated, further VM operations such as volume snapshot, VM snapshot etc. will result in DB inconsistencies.
|
||||||
|
if (!vmMo.consolidateVmDisks()) {
|
||||||
|
s_logger.warn("VM disk consolidation failed after storage migration. Yet proceeding with VM migration.");
|
||||||
|
} else {
|
||||||
|
s_logger.debug("Successfully consolidated disks of VM " + vmMo.getVmName() + ".");
|
||||||
|
}
|
||||||
|
return createAnswerForCmd(vmMo, poolUuid, cmd, volumeDeviceKey);
|
||||||
|
} else {
|
||||||
|
return new Answer(cmd, false, "failed to changes data store for VM" + vmMo.getVmName());
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
String msg = "change data store for VM " + vmMo.getVmName() + " failed";
|
||||||
|
s_logger.error(msg + ": " + e.getLocalizedMessage());
|
||||||
|
throw new CloudRuntimeException(msg,e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Answer createAnswerForCmd(VirtualMachineMO vmMo, String poolUuid, Command cmd, Map<Integer, Long> volumeDeviceKey) throws Exception {
|
||||||
|
List<VolumeObjectTO> volumeToList = new ArrayList<>();
|
||||||
|
VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder();
|
||||||
|
VirtualDisk[] disks = vmMo.getAllDiskDevice();
|
||||||
|
Answer answer;
|
||||||
|
if (s_logger.isTraceEnabled()) {
|
||||||
|
s_logger.trace(String.format("creating answer for %s", cmd.getClass().getSimpleName()));
|
||||||
|
}
|
||||||
|
if (cmd instanceof MigrateVolumeCommand) {
|
||||||
|
if (disks.length == 1) {
|
||||||
|
String volumePath = vmMo.getVmdkFileBaseName(disks[0]);
|
||||||
|
return new MigrateVolumeAnswer(cmd, true, null, volumePath);
|
||||||
|
}
|
||||||
|
throw new CloudRuntimeException("not expecting more then one disk after migrate volume command");
|
||||||
|
} else if (cmd instanceof MigrateVmToPoolCommand) {
|
||||||
|
for (VirtualDisk disk : disks) {
|
||||||
|
VolumeObjectTO newVol = new VolumeObjectTO();
|
||||||
|
String newPath = vmMo.getVmdkFileBaseName(disk);
|
||||||
|
VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(newPath, poolUuid);
|
||||||
|
newVol.setId(volumeDeviceKey.get(disk.getKey()));
|
||||||
|
newVol.setPath(newPath);
|
||||||
|
newVol.setChainInfo(_gson.toJson(diskInfo));
|
||||||
|
volumeToList.add(newVol);
|
||||||
|
}
|
||||||
|
return new MigrateVmToPoolAnswer((MigrateVmToPoolCommand)cmd, volumeToList);
|
||||||
|
}
|
||||||
|
return new Answer(cmd, false, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<Integer, Long> getVolumesFromCommand(VirtualMachineMO vmMo, Command cmd) throws Exception {
|
||||||
|
Map<Integer, Long> volumeDeviceKey = new HashMap<Integer, Long>();
|
||||||
|
if (cmd instanceof MigrateVmToPoolCommand) {
|
||||||
|
MigrateVmToPoolCommand mcmd = (MigrateVmToPoolCommand)cmd;
|
||||||
|
for (VolumeTO volume : mcmd.getVolumes()) {
|
||||||
|
addVolumeDiskmapping(vmMo, volumeDeviceKey, volume.getPath(), volume.getId());
|
||||||
|
}
|
||||||
|
} else if (cmd instanceof MigrateVolumeCommand) {
|
||||||
|
MigrateVolumeCommand mcmd = (MigrateVolumeCommand)cmd;
|
||||||
|
addVolumeDiskmapping(vmMo, volumeDeviceKey, mcmd.getVolumePath(), mcmd.getVolumeId());
|
||||||
|
}
|
||||||
|
return volumeDeviceKey;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void addVolumeDiskmapping(VirtualMachineMO vmMo, Map<Integer, Long> volumeDeviceKey, String volumePath, long volumeId) throws Exception {
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug(String.format("locating disk for volume (%d) using path %s", volumeId, volumePath));
|
||||||
|
}
|
||||||
|
Pair<VirtualDisk, String> diskInfo = getVirtualDiskInfo(vmMo, volumePath + VMDK_EXTENSION);
|
||||||
|
String vmdkAbsFile = getAbsoluteVmdkFile(diskInfo.first());
|
||||||
|
if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) {
|
||||||
|
vmMo.updateAdapterTypeIfRequired(vmdkAbsFile);
|
||||||
|
}
|
||||||
|
int diskId = diskInfo.first().getKey();
|
||||||
|
volumeDeviceKey.put(diskId, volumeId);
|
||||||
|
}
|
||||||
|
|
||||||
|
private ManagedObjectReference getTargetDatastoreMOReference(String destinationPool, VmwareHypervisorHost hyperHost) {
|
||||||
|
ManagedObjectReference morDs;
|
||||||
|
try {
|
||||||
|
if(s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug(String.format("finding datastore %s", destinationPool));
|
||||||
|
}
|
||||||
|
morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, destinationPool);
|
||||||
|
} catch (Exception e) {
|
||||||
|
String msg = "exception while finding data store " + destinationPool;
|
||||||
|
s_logger.error(msg);
|
||||||
|
throw new CloudRuntimeException(msg + ": " + e.getLocalizedMessage());
|
||||||
|
}
|
||||||
|
return morDs;
|
||||||
|
}
|
||||||
|
|
||||||
|
private ManagedObjectReference getDataCenterMOReference(String vmName, VmwareHypervisorHost hyperHost) {
|
||||||
|
ManagedObjectReference morDc;
|
||||||
|
try {
|
||||||
|
morDc = hyperHost.getHyperHostDatacenter();
|
||||||
|
} catch (Exception e) {
|
||||||
|
String msg = "exception while finding VMware datacenter to search for VM " + vmName;
|
||||||
|
s_logger.error(msg);
|
||||||
|
throw new CloudRuntimeException(msg + ": " + e.getLocalizedMessage());
|
||||||
|
}
|
||||||
|
return morDc;
|
||||||
|
}
|
||||||
|
|
||||||
|
private VirtualMachineMO getVirtualMachineMO(String vmName, VmwareHypervisorHost hyperHost) {
|
||||||
|
VirtualMachineMO vmMo = null;
|
||||||
|
try {
|
||||||
|
// find VM through datacenter (VM is not at the target host yet)
|
||||||
|
vmMo = hyperHost.findVmOnPeerHyperHost(vmName);
|
||||||
|
} catch (Exception e) {
|
||||||
|
String msg = "exception while searching for VM " + vmName + " in VMware datacenter";
|
||||||
|
s_logger.error(msg);
|
||||||
|
throw new CloudRuntimeException(msg + ": " + e.getLocalizedMessage());
|
||||||
|
}
|
||||||
|
return vmMo;
|
||||||
|
}
|
||||||
|
|
||||||
protected Answer execute(MigrateCommand cmd) {
|
protected Answer execute(MigrateCommand cmd) {
|
||||||
if (s_logger.isInfoEnabled()) {
|
if (s_logger.isInfoEnabled()) {
|
||||||
s_logger.info("Executing resource MigrateCommand: " + _gson.toJson(cmd));
|
s_logger.info("Executing resource MigrateCommand: " + _gson.toJson(cmd));
|
||||||
@ -3946,7 +4121,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
}
|
}
|
||||||
diskLocator = new VirtualMachineRelocateSpecDiskLocator();
|
diskLocator = new VirtualMachineRelocateSpecDiskLocator();
|
||||||
diskLocator.setDatastore(morDsAtSource);
|
diskLocator.setDatastore(morDsAtSource);
|
||||||
Pair<VirtualDisk, String> diskInfo = getVirtualDiskInfo(vmMo, appendFileType(volume.getPath(), ".vmdk"));
|
Pair<VirtualDisk, String> diskInfo = getVirtualDiskInfo(vmMo, appendFileType(volume.getPath(), VMDK_EXTENSION));
|
||||||
String vmdkAbsFile = getAbsoluteVmdkFile(diskInfo.first());
|
String vmdkAbsFile = getAbsoluteVmdkFile(diskInfo.first());
|
||||||
if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) {
|
if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) {
|
||||||
vmMo.updateAdapterTypeIfRequired(vmdkAbsFile);
|
vmMo.updateAdapterTypeIfRequired(vmdkAbsFile);
|
||||||
@ -4074,6 +4249,141 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private Answer migrateVolume(MigrateVolumeCommand cmd) {
|
||||||
|
Answer answer = null;
|
||||||
|
String path = cmd.getVolumePath();
|
||||||
|
|
||||||
|
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
|
||||||
|
VirtualMachineMO vmMo = null;
|
||||||
|
DatastoreMO dsMo = null;
|
||||||
|
ManagedObjectReference morSourceDS = null;
|
||||||
|
String vmdkDataStorePath = null;
|
||||||
|
|
||||||
|
String vmName = null;
|
||||||
|
try {
|
||||||
|
// OfflineVmwareMigration: we need to refactor the worker vm creation out for use in migration methods as well as here
|
||||||
|
// OfflineVmwareMigration: this method is 100 lines and needs refactorring anyway
|
||||||
|
// we need to spawn a worker VM to attach the volume to and move it
|
||||||
|
vmName = getWorkerName(getServiceContext(), cmd, 0);
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: refactor for re-use
|
||||||
|
// OfflineVmwareMigration: 1. find data(store)
|
||||||
|
// OfflineVmwareMigration: more robust would be to find the store given the volume as it might have been moved out of band or due to error
|
||||||
|
// example: DatastoreMO existingVmDsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(fileInDatastore.getDatastoreName()));
|
||||||
|
|
||||||
|
morSourceDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getSourcePool().getUuid());
|
||||||
|
dsMo = new DatastoreMO(hyperHost.getContext(), morSourceDS);
|
||||||
|
s_logger.info("Create worker VM " + vmName);
|
||||||
|
// OfflineVmwareMigration: 2. create the worker with access to the data(store)
|
||||||
|
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, vmName);
|
||||||
|
if (vmMo == null) {
|
||||||
|
// OfflineVmwareMigration: don't throw a general Exception but think of a specific one
|
||||||
|
throw new CloudRuntimeException("Unable to create a worker VM for volume operation");
|
||||||
|
}
|
||||||
|
|
||||||
|
synchronized (this) {
|
||||||
|
// OfflineVmwareMigration: 3. attach the disk to the worker
|
||||||
|
String vmdkFileName = path + VMDK_EXTENSION;
|
||||||
|
vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, vmdkFileName);
|
||||||
|
if (!dsMo.fileExists(vmdkDataStorePath)) {
|
||||||
|
if(s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug(String.format("path not found (%s), trying under '%s'", vmdkFileName, path));
|
||||||
|
}
|
||||||
|
vmdkDataStorePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(dsMo, path, vmdkFileName);
|
||||||
|
}
|
||||||
|
if (!dsMo.fileExists(vmdkDataStorePath)) {
|
||||||
|
if(s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug(String.format("path not found (%s), trying under '%s'", vmdkFileName, vmName));
|
||||||
|
}
|
||||||
|
vmdkDataStorePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, vmdkFileName);
|
||||||
|
}
|
||||||
|
if(s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug(String.format("attaching %s to %s for migration", vmdkDataStorePath, vmMo.getVmName()));
|
||||||
|
}
|
||||||
|
vmMo.attachDisk(new String[] { vmdkDataStorePath }, morSourceDS);
|
||||||
|
}
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: 4. find the (worker-) VM
|
||||||
|
// find VM through datacenter (VM is not at the target host yet)
|
||||||
|
vmMo = hyperHost.findVmOnPeerHyperHost(vmName);
|
||||||
|
if (vmMo == null) {
|
||||||
|
String msg = "VM " + vmName + " does not exist in VMware datacenter";
|
||||||
|
s_logger.error(msg);
|
||||||
|
throw new Exception(msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (s_logger.isTraceEnabled()) {
|
||||||
|
VirtualDisk[] disks = vmMo.getAllDiskDevice();
|
||||||
|
String format = "disk %d is attached as %s";
|
||||||
|
for (VirtualDisk disk : disks) {
|
||||||
|
s_logger.trace(String.format(format,disk.getKey(),vmMo.getVmdkFileBaseName(disk)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: 5. create a relocate spec and perform
|
||||||
|
Pair<VirtualDisk, String> vdisk = vmMo.getDiskDevice(path);
|
||||||
|
if (vdisk == null) {
|
||||||
|
if (s_logger.isTraceEnabled())
|
||||||
|
s_logger.trace("migrate volume done (failed)");
|
||||||
|
throw new CloudRuntimeException("No such disk device: " + path);
|
||||||
|
}
|
||||||
|
|
||||||
|
VirtualDisk disk = vdisk.first();
|
||||||
|
String vmdkAbsFile = getAbsoluteVmdkFile(disk);
|
||||||
|
if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) {
|
||||||
|
vmMo.updateAdapterTypeIfRequired(vmdkAbsFile);
|
||||||
|
}
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: this may have to be disected and executed in separate steps
|
||||||
|
answer = migrateAndAnswer(vmMo, cmd.getTargetPool().getUuid(), hyperHost, cmd);
|
||||||
|
} catch (Exception e) {
|
||||||
|
String msg = String.format("Migration of volume '%s' failed due to %s", cmd.getVolumePath(), e.getLocalizedMessage());
|
||||||
|
s_logger.error(msg, e);
|
||||||
|
answer = new Answer(cmd, false, msg);
|
||||||
|
} finally {
|
||||||
|
try {
|
||||||
|
// OfflineVmwareMigration: worker *may* have been renamed
|
||||||
|
vmName = vmMo.getVmName();
|
||||||
|
morSourceDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getTargetPool().getUuid());
|
||||||
|
dsMo = new DatastoreMO(hyperHost.getContext(), morSourceDS);
|
||||||
|
s_logger.info("Dettaching disks before destroying worker VM '" + vmName + "' after volume migration");
|
||||||
|
VirtualDisk[] disks = vmMo.getAllDiskDevice();
|
||||||
|
String format = "disk %d was migrated to %s";
|
||||||
|
for (VirtualDisk disk : disks) {
|
||||||
|
if (s_logger.isTraceEnabled()) {
|
||||||
|
s_logger.trace(String.format(format, disk.getKey(), vmMo.getVmdkFileBaseName(disk)));
|
||||||
|
}
|
||||||
|
vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, vmMo.getVmdkFileBaseName(disk) + VMDK_EXTENSION);
|
||||||
|
vmMo.detachDisk(vmdkDataStorePath, false);
|
||||||
|
}
|
||||||
|
s_logger.info("Destroy worker VM '" + vmName + "' after volume migration");
|
||||||
|
vmMo.destroy();
|
||||||
|
} catch (Throwable e) {
|
||||||
|
s_logger.info("Failed to destroy worker VM: " + vmName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (answer instanceof MigrateVolumeAnswer) {
|
||||||
|
String newPath = ((MigrateVolumeAnswer)answer).getVolumePath();
|
||||||
|
String vmdkFileName = newPath + VMDK_EXTENSION;
|
||||||
|
try {
|
||||||
|
VmwareStorageLayoutHelper.syncVolumeToRootFolder(dsMo.getOwnerDatacenter().first(), dsMo, newPath, vmName);
|
||||||
|
vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, vmdkFileName);
|
||||||
|
|
||||||
|
if (!dsMo.fileExists(vmdkDataStorePath)) {
|
||||||
|
String msg = String.format("Migration of volume '%s' failed; file (%s) not found as path '%s'", cmd.getVolumePath(), vmdkFileName, vmdkDataStorePath);
|
||||||
|
s_logger.error(msg);
|
||||||
|
answer = new Answer(cmd, false, msg);
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
String msg = String.format("Migration of volume '%s' failed due to %s", cmd.getVolumePath(), e.getLocalizedMessage());
|
||||||
|
s_logger.error(msg, e);
|
||||||
|
answer = new Answer(cmd, false, msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return answer;
|
||||||
|
}
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: refactor to be able to handle a detached volume
|
||||||
private Answer execute(MigrateVolumeCommand cmd) {
|
private Answer execute(MigrateVolumeCommand cmd) {
|
||||||
String volumePath = cmd.getVolumePath();
|
String volumePath = cmd.getVolumePath();
|
||||||
StorageFilerTO poolTo = cmd.getPool();
|
StorageFilerTO poolTo = cmd.getPool();
|
||||||
@ -4087,6 +4397,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
VirtualMachineMO vmMo = null;
|
VirtualMachineMO vmMo = null;
|
||||||
VmwareHypervisorHost srcHyperHost = null;
|
VmwareHypervisorHost srcHyperHost = null;
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: ifhost is null ???
|
||||||
|
if (org.apache.commons.lang.StringUtils.isBlank(cmd.getAttachedVmName())) {
|
||||||
|
return migrateVolume(cmd);
|
||||||
|
}
|
||||||
ManagedObjectReference morDs = null;
|
ManagedObjectReference morDs = null;
|
||||||
ManagedObjectReference morDc = null;
|
ManagedObjectReference morDc = null;
|
||||||
VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec();
|
VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec();
|
||||||
@ -4107,7 +4421,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
if (vmMo == null) {
|
if (vmMo == null) {
|
||||||
String msg = "VM " + vmName + " does not exist in VMware datacenter " + morDc.getValue();
|
String msg = "VM " + vmName + " does not exist in VMware datacenter " + morDc.getValue();
|
||||||
s_logger.error(msg);
|
s_logger.error(msg);
|
||||||
throw new Exception(msg);
|
throw new CloudRuntimeException(msg);
|
||||||
}
|
}
|
||||||
vmName = vmMo.getName();
|
vmName = vmMo.getName();
|
||||||
morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, tgtDsName);
|
morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, tgtDsName);
|
||||||
@ -4119,8 +4433,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
}
|
}
|
||||||
|
|
||||||
DatastoreMO targetDsMo = new DatastoreMO(srcHyperHost.getContext(), morDs);
|
DatastoreMO targetDsMo = new DatastoreMO(srcHyperHost.getContext(), morDs);
|
||||||
String fullVolumePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(targetDsMo, vmName, volumePath + ".vmdk");
|
String fullVolumePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(targetDsMo, vmName, volumePath + VMDK_EXTENSION);
|
||||||
Pair<VirtualDisk, String> diskInfo = getVirtualDiskInfo(vmMo, appendFileType(volumePath, ".vmdk"));
|
Pair<VirtualDisk, String> diskInfo = getVirtualDiskInfo(vmMo, appendFileType(volumePath, VMDK_EXTENSION));
|
||||||
String vmdkAbsFile = getAbsoluteVmdkFile(diskInfo.first());
|
String vmdkAbsFile = getAbsoluteVmdkFile(diskInfo.first());
|
||||||
if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) {
|
if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) {
|
||||||
vmMo.updateAdapterTypeIfRequired(vmdkAbsFile);
|
vmMo.updateAdapterTypeIfRequired(vmdkAbsFile);
|
||||||
|
|||||||
@ -20,11 +20,39 @@
|
|||||||
package org.apache.cloudstack.storage.motion;
|
package org.apache.cloudstack.storage.motion;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Date;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
|
|
||||||
|
import com.cloud.agent.AgentManager;
|
||||||
|
import com.cloud.agent.api.Answer;
|
||||||
|
import com.cloud.agent.api.MigrateWithStorageAnswer;
|
||||||
|
import com.cloud.agent.api.MigrateWithStorageCommand;
|
||||||
|
import com.cloud.agent.api.storage.MigrateVolumeAnswer;
|
||||||
|
import com.cloud.agent.api.storage.MigrateVolumeCommand;
|
||||||
|
import com.cloud.agent.api.to.DataObjectType;
|
||||||
|
import com.cloud.agent.api.to.StorageFilerTO;
|
||||||
|
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||||
|
import com.cloud.agent.api.to.VolumeTO;
|
||||||
|
import com.cloud.exception.AgentUnavailableException;
|
||||||
|
import com.cloud.exception.OperationTimedoutException;
|
||||||
|
import com.cloud.host.Host;
|
||||||
|
import com.cloud.host.HostVO;
|
||||||
|
import com.cloud.host.Status;
|
||||||
|
import com.cloud.host.dao.HostDao;
|
||||||
|
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||||
|
import com.cloud.storage.DataStoreRole;
|
||||||
|
import com.cloud.storage.ScopeType;
|
||||||
|
import com.cloud.storage.StoragePool;
|
||||||
|
import com.cloud.storage.Volume;
|
||||||
|
import com.cloud.storage.VolumeVO;
|
||||||
|
import com.cloud.storage.dao.VolumeDao;
|
||||||
|
import com.cloud.utils.Pair;
|
||||||
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
import com.cloud.vm.VMInstanceVO;
|
||||||
|
import com.cloud.vm.dao.VMInstanceDao;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||||
@ -38,25 +66,6 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
|||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
import org.springframework.stereotype.Component;
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
import com.cloud.agent.AgentManager;
|
|
||||||
import com.cloud.agent.api.Answer;
|
|
||||||
import com.cloud.agent.api.MigrateWithStorageAnswer;
|
|
||||||
import com.cloud.agent.api.MigrateWithStorageCommand;
|
|
||||||
import com.cloud.agent.api.to.StorageFilerTO;
|
|
||||||
import com.cloud.agent.api.to.VirtualMachineTO;
|
|
||||||
import com.cloud.agent.api.to.VolumeTO;
|
|
||||||
import com.cloud.exception.AgentUnavailableException;
|
|
||||||
import com.cloud.exception.OperationTimedoutException;
|
|
||||||
import com.cloud.host.Host;
|
|
||||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
|
||||||
import com.cloud.storage.StoragePool;
|
|
||||||
import com.cloud.storage.VolumeVO;
|
|
||||||
import com.cloud.storage.dao.VolumeDao;
|
|
||||||
import com.cloud.utils.Pair;
|
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
|
||||||
import com.cloud.vm.VMInstanceVO;
|
|
||||||
import com.cloud.vm.dao.VMInstanceDao;
|
|
||||||
|
|
||||||
@Component
|
@Component
|
||||||
public class VmwareStorageMotionStrategy implements DataMotionStrategy {
|
public class VmwareStorageMotionStrategy implements DataMotionStrategy {
|
||||||
private static final Logger s_logger = Logger.getLogger(VmwareStorageMotionStrategy.class);
|
private static final Logger s_logger = Logger.getLogger(VmwareStorageMotionStrategy.class);
|
||||||
@ -70,12 +79,77 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
|
|||||||
PrimaryDataStoreDao storagePoolDao;
|
PrimaryDataStoreDao storagePoolDao;
|
||||||
@Inject
|
@Inject
|
||||||
VMInstanceDao instanceDao;
|
VMInstanceDao instanceDao;
|
||||||
|
@Inject
|
||||||
|
private HostDao hostDao;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
|
public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
|
||||||
|
// OfflineVmwareMigration: return StrategyPriority.HYPERVISOR when destData is in a storage pool in the same vmware-cluster and both are volumes
|
||||||
|
if (isOnVmware(srcData, destData)
|
||||||
|
&& isOnPrimary(srcData, destData)
|
||||||
|
&& isVolumesOnly(srcData, destData)
|
||||||
|
&& isDettached(srcData)
|
||||||
|
&& isIntraCluster(srcData, destData)
|
||||||
|
&& isStoreScopeEqual(srcData, destData)) {
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
String msg = String.format("%s can handle the request because %d(%s) and %d(%s) share the VMware cluster %s (== %s)"
|
||||||
|
, this.getClass()
|
||||||
|
, srcData.getId()
|
||||||
|
, srcData.getUuid()
|
||||||
|
, destData.getId()
|
||||||
|
, destData.getUuid()
|
||||||
|
, storagePoolDao.findById(srcData.getDataStore().getId()).getClusterId()
|
||||||
|
, storagePoolDao.findById(destData.getDataStore().getId()).getClusterId());
|
||||||
|
s_logger.debug(msg);
|
||||||
|
}
|
||||||
|
return StrategyPriority.HYPERVISOR;
|
||||||
|
}
|
||||||
return StrategyPriority.CANT_HANDLE;
|
return StrategyPriority.CANT_HANDLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private boolean isDettached(DataObject srcData) {
|
||||||
|
VolumeVO volume = volDao.findById(srcData.getId());
|
||||||
|
return volume.getInstanceId() == null;
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isVolumesOnly(DataObject srcData, DataObject destData) {
|
||||||
|
return DataObjectType.VOLUME.equals(srcData.getType())
|
||||||
|
&& DataObjectType.VOLUME.equals(destData.getType());
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isOnPrimary(DataObject srcData, DataObject destData) {
|
||||||
|
return DataStoreRole.Primary.equals(srcData.getDataStore().getRole())
|
||||||
|
&& DataStoreRole.Primary.equals(destData.getDataStore().getRole());
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isOnVmware(DataObject srcData, DataObject destData) {
|
||||||
|
return HypervisorType.VMware.equals(srcData.getTO().getHypervisorType())
|
||||||
|
&& HypervisorType.VMware.equals(destData.getTO().getHypervisorType());
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isIntraCluster(DataObject srcData, DataObject destData) {
|
||||||
|
DataStore srcStore = srcData.getDataStore();
|
||||||
|
StoragePool srcPool = storagePoolDao.findById(srcStore.getId());
|
||||||
|
DataStore destStore = destData.getDataStore();
|
||||||
|
StoragePool destPool = storagePoolDao.findById(destStore.getId());
|
||||||
|
return srcPool.getClusterId().equals(destPool.getClusterId());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensure that the scope of source and destination storage pools match
|
||||||
|
*
|
||||||
|
* @param srcData
|
||||||
|
* @param destData
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
private boolean isStoreScopeEqual(DataObject srcData, DataObject destData) {
|
||||||
|
DataStore srcStore = srcData.getDataStore();
|
||||||
|
DataStore destStore = destData.getDataStore();
|
||||||
|
String msg = String.format("Storage scope of source pool is %s and of destination pool is %s", srcStore.getScope().toString(), destStore.getScope().toString());
|
||||||
|
s_logger.debug(msg);
|
||||||
|
return srcStore.getScope().getScopeType() == (destStore.getScope().getScopeType());
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public StrategyPriority canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost) {
|
public StrategyPriority canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost) {
|
||||||
if (srcHost.getHypervisorType() == HypervisorType.VMware && destHost.getHypervisorType() == HypervisorType.VMware) {
|
if (srcHost.getHypervisorType() == HypervisorType.VMware && destHost.getHypervisorType() == HypervisorType.VMware) {
|
||||||
@ -85,9 +159,96 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
|
|||||||
return StrategyPriority.CANT_HANDLE;
|
return StrategyPriority.CANT_HANDLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* the Vmware storageMotion strategy allows to copy to a destination pool but not to a destination host
|
||||||
|
*
|
||||||
|
* @param srcData volume to move
|
||||||
|
* @param destData volume description as intended after the move
|
||||||
|
* @param destHost null or else
|
||||||
|
* @param callback where to report completion or failure to
|
||||||
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback) {
|
public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback) {
|
||||||
throw new UnsupportedOperationException();
|
if (destHost != null) {
|
||||||
|
String format = "%s cannot target a host in moving an object from {%s}\n to {%s}";
|
||||||
|
String msg = String.format(format
|
||||||
|
, this.getClass().getName()
|
||||||
|
, srcData.toString()
|
||||||
|
, destData.toString()
|
||||||
|
);
|
||||||
|
s_logger.error(msg);
|
||||||
|
throw new CloudRuntimeException(msg);
|
||||||
|
}
|
||||||
|
// OfflineVmwareMigration: extract the destination pool from destData and construct a migrateVolume command
|
||||||
|
if (!isOnPrimary(srcData, destData)) {
|
||||||
|
// OfflineVmwareMigration: we shouldn't be here as we would have refused in the canHandle call
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
StoragePool sourcePool = (StoragePool) srcData.getDataStore();
|
||||||
|
StoragePool targetPool = (StoragePool) destData.getDataStore();
|
||||||
|
MigrateVolumeCommand cmd = new MigrateVolumeCommand(srcData.getId()
|
||||||
|
, srcData.getTO().getPath()
|
||||||
|
, sourcePool
|
||||||
|
, targetPool);
|
||||||
|
// OfflineVmwareMigration: should be ((StoragePool)srcData.getDataStore()).getHypervisor() but that is NULL, so hardcoding
|
||||||
|
Answer answer;
|
||||||
|
ScopeType scopeType = srcData.getDataStore().getScope().getScopeType();
|
||||||
|
if (ScopeType.CLUSTER == scopeType) {
|
||||||
|
// Find Volume source cluster and select any Vmware hypervisor host to attach worker VM
|
||||||
|
Long hostId = findSuitableHostIdForWorkerVmPlacement(sourcePool.getClusterId());
|
||||||
|
if (hostId == null) {
|
||||||
|
throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in cluster: " + sourcePool.getName());
|
||||||
|
}
|
||||||
|
answer = agentMgr.easySend(hostId, cmd);
|
||||||
|
} else {
|
||||||
|
answer = agentMgr.sendTo(sourcePool.getDataCenterId(), HypervisorType.VMware, cmd);
|
||||||
|
}
|
||||||
|
updateVolumeAfterMigration(answer, srcData, destData);
|
||||||
|
CopyCommandResult result = new CopyCommandResult(null, answer);
|
||||||
|
callback.complete(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Selects a host from the cluster housing the source storage pool
|
||||||
|
* Assumption is that Primary Storage is cluster-wide
|
||||||
|
* <p>
|
||||||
|
* returns any host ID within the cluster if storage-pool is cluster-wide, and exception is thrown otherwise
|
||||||
|
*
|
||||||
|
* @param clusterId
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
private Long findSuitableHostIdForWorkerVmPlacement(Long clusterId) {
|
||||||
|
List<HostVO> hostLists = hostDao.findByClusterId(clusterId);
|
||||||
|
Long hostId = null;
|
||||||
|
for (HostVO hostVO : hostLists) {
|
||||||
|
if (hostVO.getHypervisorType().equals(HypervisorType.VMware) && hostVO.getStatus() == Status.Up) {
|
||||||
|
hostId = hostVO.getId();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return hostId;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void updateVolumeAfterMigration(Answer answer, DataObject srcData, DataObject destData) {
|
||||||
|
VolumeVO destinationVO = volDao.findById(destData.getId());
|
||||||
|
if (!(answer instanceof MigrateVolumeAnswer)) {
|
||||||
|
// OfflineVmwareMigration: reset states and such
|
||||||
|
VolumeVO sourceVO = volDao.findById(srcData.getId());
|
||||||
|
sourceVO.setState(Volume.State.Ready);
|
||||||
|
volDao.update(sourceVO.getId(), sourceVO);
|
||||||
|
destinationVO.setState(Volume.State.Expunged);
|
||||||
|
destinationVO.setRemoved(new Date());
|
||||||
|
volDao.update(destinationVO.getId(), destinationVO);
|
||||||
|
throw new CloudRuntimeException("unexpected answer from hypervisor agent: " + answer.getDetails());
|
||||||
|
}
|
||||||
|
MigrateVolumeAnswer ans = (MigrateVolumeAnswer) answer;
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
String format = "retrieved '%s' as new path for volume(%d)";
|
||||||
|
s_logger.debug(String.format(format, ans.getVolumePath(), destData.getId()));
|
||||||
|
}
|
||||||
|
// OfflineVmwareMigration: update the volume with new pool/volume path
|
||||||
|
destinationVO.setPath(ans.getVolumePath());
|
||||||
|
volDao.update(destinationVO.getId(), destinationVO);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -124,7 +285,7 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
|
|||||||
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
|
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
|
||||||
VolumeInfo volume = entry.getKey();
|
VolumeInfo volume = entry.getKey();
|
||||||
VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId()));
|
VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId()));
|
||||||
StorageFilerTO filerTo = new StorageFilerTO((StoragePool)entry.getValue());
|
StorageFilerTO filerTo = new StorageFilerTO((StoragePool) entry.getValue());
|
||||||
volumeToFilerto.add(new Pair<VolumeTO, StorageFilerTO>(volumeTo, filerTo));
|
volumeToFilerto.add(new Pair<VolumeTO, StorageFilerTO>(volumeTo, filerTo));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -133,7 +294,7 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
|
|||||||
// Run validations against target!!
|
// Run validations against target!!
|
||||||
// 2. Complete the process. Update the volume details.
|
// 2. Complete the process. Update the volume details.
|
||||||
MigrateWithStorageCommand migrateWithStorageCmd = new MigrateWithStorageCommand(to, volumeToFilerto, destHost.getGuid());
|
MigrateWithStorageCommand migrateWithStorageCmd = new MigrateWithStorageCommand(to, volumeToFilerto, destHost.getGuid());
|
||||||
MigrateWithStorageAnswer migrateWithStorageAnswer = (MigrateWithStorageAnswer)agentMgr.send(srcHost.getId(), migrateWithStorageCmd);
|
MigrateWithStorageAnswer migrateWithStorageAnswer = (MigrateWithStorageAnswer) agentMgr.send(srcHost.getId(), migrateWithStorageCmd);
|
||||||
if (migrateWithStorageAnswer == null) {
|
if (migrateWithStorageAnswer == null) {
|
||||||
s_logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed.");
|
s_logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed.");
|
||||||
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
|
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
|
||||||
@ -162,12 +323,12 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
|
|||||||
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
|
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
|
||||||
VolumeInfo volume = entry.getKey();
|
VolumeInfo volume = entry.getKey();
|
||||||
VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId()));
|
VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId()));
|
||||||
StorageFilerTO filerTo = new StorageFilerTO((StoragePool)entry.getValue());
|
StorageFilerTO filerTo = new StorageFilerTO((StoragePool) entry.getValue());
|
||||||
volumeToFilerto.add(new Pair<VolumeTO, StorageFilerTO>(volumeTo, filerTo));
|
volumeToFilerto.add(new Pair<VolumeTO, StorageFilerTO>(volumeTo, filerTo));
|
||||||
}
|
}
|
||||||
|
|
||||||
MigrateWithStorageCommand command = new MigrateWithStorageCommand(to, volumeToFilerto, destHost.getGuid());
|
MigrateWithStorageCommand command = new MigrateWithStorageCommand(to, volumeToFilerto, destHost.getGuid());
|
||||||
MigrateWithStorageAnswer answer = (MigrateWithStorageAnswer)agentMgr.send(srcHost.getId(), command);
|
MigrateWithStorageAnswer answer = (MigrateWithStorageAnswer) agentMgr.send(srcHost.getId(), command);
|
||||||
if (answer == null) {
|
if (answer == null) {
|
||||||
s_logger.error("Migration with storage of vm " + vm + " failed.");
|
s_logger.error("Migration with storage of vm " + vm + " failed.");
|
||||||
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
|
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
|
||||||
@ -190,7 +351,7 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
|
|||||||
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
|
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
|
||||||
boolean updated = false;
|
boolean updated = false;
|
||||||
VolumeInfo volume = entry.getKey();
|
VolumeInfo volume = entry.getKey();
|
||||||
StoragePool pool = (StoragePool)entry.getValue();
|
StoragePool pool = (StoragePool) entry.getValue();
|
||||||
for (VolumeObjectTO volumeTo : volumeTos) {
|
for (VolumeObjectTO volumeTo : volumeTos) {
|
||||||
if (volume.getId() == volumeTo.getId()) {
|
if (volume.getId() == volumeTo.getId()) {
|
||||||
VolumeVO volumeVO = volDao.findById(volume.getId());
|
VolumeVO volumeVO = volDao.findById(volume.getId());
|
||||||
|
|||||||
@ -16,13 +16,6 @@
|
|||||||
// under the License.
|
// under the License.
|
||||||
package org.apache.cloudstack.storage.motion;
|
package org.apache.cloudstack.storage.motion;
|
||||||
|
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
import static org.mockito.Matchers.anyLong;
|
|
||||||
import static org.mockito.Matchers.isA;
|
|
||||||
import static org.mockito.Mockito.mock;
|
|
||||||
import static org.mockito.Mockito.when;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
@ -30,6 +23,29 @@ import java.util.Map;
|
|||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
import javax.naming.ConfigurationException;
|
import javax.naming.ConfigurationException;
|
||||||
|
|
||||||
|
import com.cloud.agent.AgentManager;
|
||||||
|
import com.cloud.agent.api.MigrateWithStorageAnswer;
|
||||||
|
import com.cloud.agent.api.MigrateWithStorageCommand;
|
||||||
|
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||||
|
import com.cloud.host.Host;
|
||||||
|
import com.cloud.host.dao.HostDao;
|
||||||
|
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||||
|
import com.cloud.storage.dao.VolumeDao;
|
||||||
|
import com.cloud.utils.component.ComponentContext;
|
||||||
|
import com.cloud.vm.VMInstanceVO;
|
||||||
|
import com.cloud.vm.dao.VMInstanceDao;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||||
|
import org.apache.cloudstack.framework.async.AsyncCallFuture;
|
||||||
|
import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
|
||||||
|
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||||
|
import org.apache.cloudstack.framework.async.AsyncRpcContext;
|
||||||
|
import org.apache.cloudstack.storage.command.CommandResult;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
|
import org.apache.cloudstack.test.utils.SpringUtils;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
@ -47,29 +63,12 @@ import org.springframework.test.context.ContextConfiguration;
|
|||||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||||
import org.springframework.test.context.support.AnnotationConfigContextLoader;
|
import org.springframework.test.context.support.AnnotationConfigContextLoader;
|
||||||
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
import static org.junit.Assert.assertFalse;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
import static org.junit.Assert.assertTrue;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
|
import static org.mockito.Matchers.anyLong;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
import static org.mockito.Matchers.isA;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
import static org.mockito.Mockito.mock;
|
||||||
import org.apache.cloudstack.framework.async.AsyncCallFuture;
|
import static org.mockito.Mockito.when;
|
||||||
import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
|
|
||||||
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
|
||||||
import org.apache.cloudstack.framework.async.AsyncRpcContext;
|
|
||||||
import org.apache.cloudstack.storage.command.CommandResult;
|
|
||||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
|
||||||
import org.apache.cloudstack.test.utils.SpringUtils;
|
|
||||||
|
|
||||||
import com.cloud.agent.AgentManager;
|
|
||||||
import com.cloud.agent.api.MigrateWithStorageAnswer;
|
|
||||||
import com.cloud.agent.api.MigrateWithStorageCommand;
|
|
||||||
import com.cloud.agent.api.to.VirtualMachineTO;
|
|
||||||
import com.cloud.host.Host;
|
|
||||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
|
||||||
import com.cloud.storage.dao.VolumeDao;
|
|
||||||
import com.cloud.utils.component.ComponentContext;
|
|
||||||
import com.cloud.vm.VMInstanceVO;
|
|
||||||
import com.cloud.vm.dao.VMInstanceDao;
|
|
||||||
|
|
||||||
@RunWith(SpringJUnit4ClassRunner.class)
|
@RunWith(SpringJUnit4ClassRunner.class)
|
||||||
@ContextConfiguration(loader = AnnotationConfigContextLoader.class)
|
@ContextConfiguration(loader = AnnotationConfigContextLoader.class)
|
||||||
@ -87,6 +86,8 @@ public class VmwareStorageMotionStrategyTest {
|
|||||||
PrimaryDataStoreDao storagePoolDao;
|
PrimaryDataStoreDao storagePoolDao;
|
||||||
@Inject
|
@Inject
|
||||||
VMInstanceDao instanceDao;
|
VMInstanceDao instanceDao;
|
||||||
|
@Inject
|
||||||
|
private HostDao hostDao;
|
||||||
|
|
||||||
CopyCommandResult result;
|
CopyCommandResult result;
|
||||||
|
|
||||||
@ -262,6 +263,11 @@ public class VmwareStorageMotionStrategyTest {
|
|||||||
return Mockito.mock(AgentManager.class);
|
return Mockito.mock(AgentManager.class);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Bean
|
||||||
|
public HostDao hostDao() {
|
||||||
|
return Mockito.mock(HostDao.class);
|
||||||
|
}
|
||||||
|
|
||||||
public static class Library implements TypeFilter {
|
public static class Library implements TypeFilter {
|
||||||
@Override
|
@Override
|
||||||
public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException {
|
public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException {
|
||||||
|
|||||||
@ -49,6 +49,7 @@ public class ApiDispatcher {
|
|||||||
private static final Logger s_logger = Logger.getLogger(ApiDispatcher.class.getName());
|
private static final Logger s_logger = Logger.getLogger(ApiDispatcher.class.getName());
|
||||||
|
|
||||||
Long _createSnapshotQueueSizeLimit;
|
Long _createSnapshotQueueSizeLimit;
|
||||||
|
Long migrateQueueSizeLimit;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
AsyncJobManager _asyncMgr;
|
AsyncJobManager _asyncMgr;
|
||||||
@ -79,6 +80,9 @@ public class ApiDispatcher {
|
|||||||
_createSnapshotQueueSizeLimit = snapshotLimit;
|
_createSnapshotQueueSizeLimit = snapshotLimit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void setMigrateQueueSizeLimit(final Long migrateLimit) {
|
||||||
|
migrateQueueSizeLimit = migrateLimit;
|
||||||
|
}
|
||||||
|
|
||||||
public void dispatchCreateCmd(final BaseAsyncCreateCmd cmd, final Map<String, String> params) throws Exception {
|
public void dispatchCreateCmd(final BaseAsyncCreateCmd cmd, final Map<String, String> params) throws Exception {
|
||||||
asyncCreationDispatchChain.dispatch(new DispatchTask(cmd, params));
|
asyncCreationDispatchChain.dispatch(new DispatchTask(cmd, params));
|
||||||
@ -123,7 +127,9 @@ public class ApiDispatcher {
|
|||||||
if (asyncCmd.getJob() != null && asyncCmd.getSyncObjId() != null && asyncCmd.getSyncObjType() != null) {
|
if (asyncCmd.getJob() != null && asyncCmd.getSyncObjId() != null && asyncCmd.getSyncObjType() != null) {
|
||||||
Long queueSizeLimit = null;
|
Long queueSizeLimit = null;
|
||||||
if (asyncCmd.getSyncObjType() != null && asyncCmd.getSyncObjType().equalsIgnoreCase(BaseAsyncCmd.snapshotHostSyncObject)) {
|
if (asyncCmd.getSyncObjType() != null && asyncCmd.getSyncObjType().equalsIgnoreCase(BaseAsyncCmd.snapshotHostSyncObject)) {
|
||||||
queueSizeLimit = _createSnapshotQueueSizeLimit;
|
queueSizeLimit = _createSnapshotQueueSizeLimit;
|
||||||
|
} else if (asyncCmd.getSyncObjType() != null && asyncCmd.getSyncObjType().equalsIgnoreCase(BaseAsyncCmd.migrationSyncObject)) {
|
||||||
|
queueSizeLimit = migrateQueueSizeLimit;
|
||||||
} else {
|
} else {
|
||||||
queueSizeLimit = 1L;
|
queueSizeLimit = 1L;
|
||||||
}
|
}
|
||||||
@ -148,6 +154,6 @@ public class ApiDispatcher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
cmd.execute();
|
cmd.execute();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -19,7 +19,6 @@ package com.cloud.api;
|
|||||||
import com.cloud.api.dispatch.DispatchChainFactory;
|
import com.cloud.api.dispatch.DispatchChainFactory;
|
||||||
import com.cloud.api.dispatch.DispatchTask;
|
import com.cloud.api.dispatch.DispatchTask;
|
||||||
import com.cloud.api.response.ApiResponseSerializer;
|
import com.cloud.api.response.ApiResponseSerializer;
|
||||||
import com.cloud.configuration.Config;
|
|
||||||
import com.cloud.domain.Domain;
|
import com.cloud.domain.Domain;
|
||||||
import com.cloud.domain.DomainVO;
|
import com.cloud.domain.DomainVO;
|
||||||
import com.cloud.domain.dao.DomainDao;
|
import com.cloud.domain.dao.DomainDao;
|
||||||
@ -35,6 +34,7 @@ import com.cloud.exception.RequestLimitException;
|
|||||||
import com.cloud.exception.ResourceAllocationException;
|
import com.cloud.exception.ResourceAllocationException;
|
||||||
import com.cloud.exception.ResourceUnavailableException;
|
import com.cloud.exception.ResourceUnavailableException;
|
||||||
import com.cloud.exception.UnavailableCommandException;
|
import com.cloud.exception.UnavailableCommandException;
|
||||||
|
import com.cloud.storage.VolumeApiService;
|
||||||
import com.cloud.user.Account;
|
import com.cloud.user.Account;
|
||||||
import com.cloud.user.AccountManager;
|
import com.cloud.user.AccountManager;
|
||||||
import com.cloud.user.DomainManager;
|
import com.cloud.user.DomainManager;
|
||||||
@ -44,7 +44,6 @@ import com.cloud.user.UserVO;
|
|||||||
import com.cloud.utils.ConstantTimeComparator;
|
import com.cloud.utils.ConstantTimeComparator;
|
||||||
import com.cloud.utils.DateUtil;
|
import com.cloud.utils.DateUtil;
|
||||||
import com.cloud.utils.HttpUtils;
|
import com.cloud.utils.HttpUtils;
|
||||||
import com.cloud.utils.NumbersUtil;
|
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
import com.cloud.utils.ReflectUtil;
|
import com.cloud.utils.ReflectUtil;
|
||||||
import com.cloud.utils.StringUtils;
|
import com.cloud.utils.StringUtils;
|
||||||
@ -54,7 +53,6 @@ import com.cloud.utils.component.ManagerBase;
|
|||||||
import com.cloud.utils.component.PluggableService;
|
import com.cloud.utils.component.PluggableService;
|
||||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||||
import com.cloud.utils.db.EntityManager;
|
import com.cloud.utils.db.EntityManager;
|
||||||
import com.cloud.utils.db.SearchCriteria;
|
|
||||||
import com.cloud.utils.db.TransactionLegacy;
|
import com.cloud.utils.db.TransactionLegacy;
|
||||||
import com.cloud.utils.db.UUIDManager;
|
import com.cloud.utils.db.UUIDManager;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
@ -100,8 +98,6 @@ import org.apache.cloudstack.config.ApiServiceConfiguration;
|
|||||||
import org.apache.cloudstack.context.CallContext;
|
import org.apache.cloudstack.context.CallContext;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.Configurable;
|
import org.apache.cloudstack.framework.config.Configurable;
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
|
||||||
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
|
|
||||||
import org.apache.cloudstack.framework.events.EventBus;
|
import org.apache.cloudstack.framework.events.EventBus;
|
||||||
import org.apache.cloudstack.framework.events.EventBusException;
|
import org.apache.cloudstack.framework.events.EventBusException;
|
||||||
import org.apache.cloudstack.framework.jobs.AsyncJob;
|
import org.apache.cloudstack.framework.jobs.AsyncJob;
|
||||||
@ -209,8 +205,6 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer
|
|||||||
@Inject
|
@Inject
|
||||||
private AsyncJobManager asyncMgr;
|
private AsyncJobManager asyncMgr;
|
||||||
@Inject
|
@Inject
|
||||||
private ConfigurationDao configDao;
|
|
||||||
@Inject
|
|
||||||
private EntityManager entityMgr;
|
private EntityManager entityMgr;
|
||||||
@Inject
|
@Inject
|
||||||
private APIAuthenticationManager authManager;
|
private APIAuthenticationManager authManager;
|
||||||
@ -228,14 +222,60 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer
|
|||||||
private static ExecutorService s_executor = new ThreadPoolExecutor(10, 150, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory(
|
private static ExecutorService s_executor = new ThreadPoolExecutor(10, 150, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory(
|
||||||
"ApiServer"));
|
"ApiServer"));
|
||||||
|
|
||||||
static final ConfigKey<Boolean> EnableSecureSessionCookie = new ConfigKey<Boolean>("Advanced", Boolean.class, "enable.secure.session.cookie", "false",
|
|
||||||
"Session cookie is marked as secure if this is enabled. Secure cookies only work when HTTPS is used.", false);
|
|
||||||
|
|
||||||
static final ConfigKey<String> JSONcontentType = new ConfigKey<String>(String.class, "json.content.type", "Advanced", "application/json; charset=UTF-8",
|
|
||||||
"Http response content type for .js files (default is text/javascript)", false, ConfigKey.Scope.Global, null);
|
|
||||||
@Inject
|
@Inject
|
||||||
private MessageBus messageBus;
|
private MessageBus messageBus;
|
||||||
|
|
||||||
|
private static final ConfigKey<Integer> IntegrationAPIPort = new ConfigKey<Integer>("Advanced"
|
||||||
|
, Integer.class
|
||||||
|
, "integration.api.port"
|
||||||
|
, "8096"
|
||||||
|
, "Default API port"
|
||||||
|
, false
|
||||||
|
, ConfigKey.Scope.Global);
|
||||||
|
private static final ConfigKey<Long> ConcurrentSnapshotsThresholdPerHost = new ConfigKey<Long>("Advanced"
|
||||||
|
, Long.class
|
||||||
|
, "concurrent.snapshots.threshold.perhost"
|
||||||
|
, null
|
||||||
|
, "Limits number of snapshots that can be handled by the host concurrently; default is NULL - unlimited"
|
||||||
|
, true // not sure if this is to be dynamic
|
||||||
|
, ConfigKey.Scope.Global);
|
||||||
|
private static final ConfigKey<Boolean> EncodeApiResponse = new ConfigKey<Boolean>("Advanced"
|
||||||
|
, Boolean.class
|
||||||
|
, "encode.api.response"
|
||||||
|
, "false"
|
||||||
|
, "Do URL encoding for the api response, false by default"
|
||||||
|
, false
|
||||||
|
, ConfigKey.Scope.Global);
|
||||||
|
static final ConfigKey<String> JSONcontentType = new ConfigKey<String>( "Advanced"
|
||||||
|
, String.class
|
||||||
|
, "json.content.type"
|
||||||
|
, "application/json; charset=UTF-8"
|
||||||
|
, "Http response content type for .js files (default is text/javascript)"
|
||||||
|
, false
|
||||||
|
, ConfigKey.Scope.Global);
|
||||||
|
static final ConfigKey<Boolean> EnableSecureSessionCookie = new ConfigKey<Boolean>("Advanced"
|
||||||
|
, Boolean.class
|
||||||
|
, "enable.secure.session.cookie"
|
||||||
|
, "false"
|
||||||
|
, "Session cookie is marked as secure if this is enabled. Secure cookies only work when HTTPS is used."
|
||||||
|
, false
|
||||||
|
, ConfigKey.Scope.Global);
|
||||||
|
private static final ConfigKey<String> JSONDefaultContentType = new ConfigKey<String> ("Advanced"
|
||||||
|
, String.class
|
||||||
|
, "json.content.type"
|
||||||
|
, "application/json; charset=UTF-8"
|
||||||
|
, "Http response content type for JSON"
|
||||||
|
, false
|
||||||
|
, ConfigKey.Scope.Global);
|
||||||
|
|
||||||
|
private static final ConfigKey<Boolean> UseEventAccountInfo = new ConfigKey<Boolean>( "advanced"
|
||||||
|
, Boolean.class
|
||||||
|
, "event.accountinfo"
|
||||||
|
, "false"
|
||||||
|
, "use account info in event logging"
|
||||||
|
, true
|
||||||
|
, ConfigKey.Scope.Global);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
|
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
|
||||||
messageBus.subscribe(AsyncJob.Topics.JOB_EVENT_PUBLISH, MessageDispatcher.getDispatcher(this));
|
messageBus.subscribe(AsyncJob.Topics.JOB_EVENT_PUBLISH, MessageDispatcher.getDispatcher(this));
|
||||||
@ -305,8 +345,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer
|
|||||||
eventDescription.put("cmdInfo", job.getCmdInfo());
|
eventDescription.put("cmdInfo", job.getCmdInfo());
|
||||||
eventDescription.put("status", "" + job.getStatus() );
|
eventDescription.put("status", "" + job.getStatus() );
|
||||||
// If the event.accountinfo boolean value is set, get the human readable value for the username / domainname
|
// If the event.accountinfo boolean value is set, get the human readable value for the username / domainname
|
||||||
Map<String, String> configs = configDao.getConfiguration("management-server", new HashMap<String, String>());
|
if (UseEventAccountInfo.value()) {
|
||||||
if (Boolean.valueOf(configs.get("event.accountinfo"))) {
|
|
||||||
DomainVO domain = domainDao.findById(jobOwner.getDomainId());
|
DomainVO domain = domainDao.findById(jobOwner.getDomainId());
|
||||||
eventDescription.put("username", userJobOwner.getUsername());
|
eventDescription.put("username", userJobOwner.getUsername());
|
||||||
eventDescription.put("accountname", jobOwner.getAccountName());
|
eventDescription.put("accountname", jobOwner.getAccountName());
|
||||||
@ -325,27 +364,20 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer
|
|||||||
@Override
|
@Override
|
||||||
public boolean start() {
|
public boolean start() {
|
||||||
Security.addProvider(new BouncyCastleProvider());
|
Security.addProvider(new BouncyCastleProvider());
|
||||||
Integer apiPort = null; // api port, null by default
|
Integer apiPort = IntegrationAPIPort.value(); // api port, null by default
|
||||||
final SearchCriteria<ConfigurationVO> sc = configDao.createSearchCriteria();
|
|
||||||
sc.addAnd("name", SearchCriteria.Op.EQ, Config.IntegrationAPIPort.key());
|
final Long snapshotLimit = ConcurrentSnapshotsThresholdPerHost.value();
|
||||||
final List<ConfigurationVO> values = configDao.search(sc, null);
|
if (snapshotLimit == null || snapshotLimit.longValue() <= 0) {
|
||||||
if ((values != null) && (values.size() > 0)) {
|
s_logger.debug("Global concurrent snapshot config parameter " + ConcurrentSnapshotsThresholdPerHost.value() + " is less or equal 0; defaulting to unlimited");
|
||||||
final ConfigurationVO apiPortConfig = values.get(0);
|
} else {
|
||||||
if (apiPortConfig.getValue() != null) {
|
dispatcher.setCreateSnapshotQueueSizeLimit(snapshotLimit);
|
||||||
apiPort = Integer.parseInt(apiPortConfig.getValue());
|
|
||||||
apiPort = (apiPort <= 0) ? null : apiPort;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
final Map<String, String> configs = configDao.getConfiguration();
|
final Long migrationLimit = VolumeApiService.ConcurrentMigrationsThresholdPerDatastore.value();
|
||||||
final String strSnapshotLimit = configs.get(Config.ConcurrentSnapshotsThresholdPerHost.key());
|
if (migrationLimit == null || migrationLimit.longValue() <= 0) {
|
||||||
if (strSnapshotLimit != null) {
|
s_logger.debug("Global concurrent migration config parameter " + VolumeApiService.ConcurrentMigrationsThresholdPerDatastore.value() + " is less or equal 0; defaulting to unlimited");
|
||||||
final Long snapshotLimit = NumbersUtil.parseLong(strSnapshotLimit, 1L);
|
} else {
|
||||||
if (snapshotLimit.longValue() <= 0) {
|
dispatcher.setMigrateQueueSizeLimit(migrationLimit);
|
||||||
s_logger.debug("Global config parameter " + Config.ConcurrentSnapshotsThresholdPerHost.toString() + " is less or equal 0; defaulting to unlimited");
|
|
||||||
} else {
|
|
||||||
dispatcher.setCreateSnapshotQueueSizeLimit(snapshotLimit);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
final Set<Class<?>> cmdClasses = new HashSet<Class<?>>();
|
final Set<Class<?>> cmdClasses = new HashSet<Class<?>>();
|
||||||
@ -372,7 +404,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
setEncodeApiResponse(Boolean.valueOf(configDao.getValue(Config.EncodeApiResponse.key())));
|
setEncodeApiResponse(EncodeApiResponse.value());
|
||||||
|
|
||||||
if (apiPort != null) {
|
if (apiPort != null) {
|
||||||
final ListenerThread listenerThread = new ListenerThread(this, apiPort);
|
final ListenerThread listenerThread = new ListenerThread(this, apiPort);
|
||||||
@ -1200,16 +1232,6 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public String getConfigComponentName() {
|
|
||||||
return ApiServer.class.getSimpleName();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ConfigKey<?>[] getConfigKeys() {
|
|
||||||
return new ConfigKey<?>[] { EnableSecureSessionCookie, JSONcontentType };
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME: the following two threads are copied from
|
// FIXME: the following two threads are copied from
|
||||||
// http://svn.apache.org/repos/asf/httpcomponents/httpcore/trunk/httpcore/src/examples/org/apache/http/examples/ElementalHttpServer.java
|
// http://svn.apache.org/repos/asf/httpcomponents/httpcore/trunk/httpcore/src/examples/org/apache/http/examples/ElementalHttpServer.java
|
||||||
// we have to cite a license if we are using this code directly, so we need to add the appropriate citation or
|
// we have to cite a license if we are using this code directly, so we need to add the appropriate citation or
|
||||||
@ -1413,4 +1435,19 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer
|
|||||||
ApiServer.encodeApiResponse = encodeApiResponse;
|
ApiServer.encodeApiResponse = encodeApiResponse;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getConfigComponentName() {
|
||||||
|
return ApiServer.class.getSimpleName();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ConfigKey<?>[] getConfigKeys() {
|
||||||
|
return new ConfigKey<?>[] {
|
||||||
|
IntegrationAPIPort,
|
||||||
|
ConcurrentSnapshotsThresholdPerHost,
|
||||||
|
EncodeApiResponse,
|
||||||
|
EnableSecureSessionCookie,
|
||||||
|
JSONDefaultContentType
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -566,7 +566,6 @@ public enum Config {
|
|||||||
"The interval (in milliseconds) when host stats are retrieved from agents.",
|
"The interval (in milliseconds) when host stats are retrieved from agents.",
|
||||||
null),
|
null),
|
||||||
HostRetry("Advanced", AgentManager.class, Integer.class, "host.retry", "2", "Number of times to retry hosts for creating a volume", null),
|
HostRetry("Advanced", AgentManager.class, Integer.class, "host.retry", "2", "Number of times to retry hosts for creating a volume", null),
|
||||||
IntegrationAPIPort("Advanced", ManagementServer.class, Integer.class, "integration.api.port", null, "Default API port. To disable set it to 0 or negative.", null),
|
|
||||||
InvestigateRetryInterval(
|
InvestigateRetryInterval(
|
||||||
"Advanced",
|
"Advanced",
|
||||||
HighAvailabilityManager.class,
|
HighAvailabilityManager.class,
|
||||||
@ -1439,7 +1438,6 @@ public enum Config {
|
|||||||
"true",
|
"true",
|
||||||
"Allow subdomains to use networks dedicated to their parent domain(s)",
|
"Allow subdomains to use networks dedicated to their parent domain(s)",
|
||||||
null),
|
null),
|
||||||
EncodeApiResponse("Advanced", ManagementServer.class, Boolean.class, "encode.api.response", "false", "Do URL encoding for the api response, false by default", null),
|
|
||||||
DnsBasicZoneUpdates(
|
DnsBasicZoneUpdates(
|
||||||
"Advanced",
|
"Advanced",
|
||||||
NetworkOrchestrationService.class,
|
NetworkOrchestrationService.class,
|
||||||
@ -1693,14 +1691,6 @@ public enum Config {
|
|||||||
null),
|
null),
|
||||||
VpcMaxNetworks("Advanced", ManagementServer.class, Integer.class, "vpc.max.networks", "3", "Maximum number of networks per vpc", null),
|
VpcMaxNetworks("Advanced", ManagementServer.class, Integer.class, "vpc.max.networks", "3", "Maximum number of networks per vpc", null),
|
||||||
DetailBatchQuerySize("Advanced", ManagementServer.class, Integer.class, "detail.batch.query.size", "2000", "Default entity detail batch query size for listing", null),
|
DetailBatchQuerySize("Advanced", ManagementServer.class, Integer.class, "detail.batch.query.size", "2000", "Default entity detail batch query size for listing", null),
|
||||||
ConcurrentSnapshotsThresholdPerHost(
|
|
||||||
"Advanced",
|
|
||||||
ManagementServer.class,
|
|
||||||
Long.class,
|
|
||||||
"concurrent.snapshots.threshold.perhost",
|
|
||||||
null,
|
|
||||||
"Limits number of snapshots that can be handled by the host concurrently; default is NULL - unlimited",
|
|
||||||
null),
|
|
||||||
NetworkIPv6SearchRetryMax(
|
NetworkIPv6SearchRetryMax(
|
||||||
"Network",
|
"Network",
|
||||||
ManagementServer.class,
|
ManagementServer.class,
|
||||||
|
|||||||
@ -1342,7 +1342,8 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
|
|||||||
|
|
||||||
// There should be atleast the ROOT volume of the VM in usable state
|
// There should be atleast the ROOT volume of the VM in usable state
|
||||||
if (volumesTobeCreated.isEmpty()) {
|
if (volumesTobeCreated.isEmpty()) {
|
||||||
throw new CloudRuntimeException("Unable to create deployment, no usable volumes found for the VM");
|
// OfflineVmwareMigration: find out what is wrong with the id of the vm we try to start
|
||||||
|
throw new CloudRuntimeException("Unable to create deployment, no usable volumes found for the VM: " + vmProfile.getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
// don't allow to start vm that doesn't have a root volume
|
// don't allow to start vm that doesn't have a root volume
|
||||||
|
|||||||
@ -40,6 +40,7 @@ import com.cloud.resource.ResourceManager;
|
|||||||
import com.cloud.service.ServiceOfferingDetailsVO;
|
import com.cloud.service.ServiceOfferingDetailsVO;
|
||||||
import com.cloud.service.dao.ServiceOfferingDao;
|
import com.cloud.service.dao.ServiceOfferingDao;
|
||||||
import com.cloud.service.dao.ServiceOfferingDetailsDao;
|
import com.cloud.service.dao.ServiceOfferingDetailsDao;
|
||||||
|
import com.cloud.storage.StoragePool;
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
import com.cloud.utils.component.AdapterBase;
|
import com.cloud.utils.component.AdapterBase;
|
||||||
import com.cloud.vm.NicProfile;
|
import com.cloud.vm.NicProfile;
|
||||||
@ -225,4 +226,8 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<Command> finalizeMigrate(VirtualMachine vm, StoragePool destination) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -37,6 +37,7 @@ import javax.crypto.spec.SecretKeySpec;
|
|||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
import javax.naming.ConfigurationException;
|
import javax.naming.ConfigurationException;
|
||||||
|
|
||||||
|
import com.cloud.storage.ScopeType;
|
||||||
import org.apache.cloudstack.acl.ControlledEntity;
|
import org.apache.cloudstack.acl.ControlledEntity;
|
||||||
import org.apache.cloudstack.affinity.AffinityGroupProcessor;
|
import org.apache.cloudstack.affinity.AffinityGroupProcessor;
|
||||||
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
|
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
|
||||||
@ -1103,6 +1104,32 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||||||
return new Pair<List<? extends Cluster>, Integer>(result.first(), result.second());
|
return new Pair<List<? extends Cluster>, Integer>(result.first(), result.second());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private HypervisorType getHypervisorType(VMInstanceVO vm, StoragePool srcVolumePool, VirtualMachineProfile profile) {
|
||||||
|
HypervisorType type = null;
|
||||||
|
if (vm == null) {
|
||||||
|
StoragePoolVO poolVo = _poolDao.findById(srcVolumePool.getId());
|
||||||
|
if (ScopeType.CLUSTER.equals(poolVo.getScope())) {
|
||||||
|
Long clusterId = poolVo.getClusterId();
|
||||||
|
if (clusterId != null) {
|
||||||
|
ClusterVO cluster = _clusterDao.findById(clusterId);
|
||||||
|
type = cluster.getHypervisorType();
|
||||||
|
}
|
||||||
|
} else if (ScopeType.ZONE.equals(poolVo.getScope())) {
|
||||||
|
Long zoneId = poolVo.getDataCenterId();
|
||||||
|
if (zoneId != null) {
|
||||||
|
DataCenterVO dc = _dcDao.findById(zoneId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (null == type) {
|
||||||
|
type = srcVolumePool.getHypervisor();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
type = profile.getHypervisorType();
|
||||||
|
}
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Pair<List<? extends Host>, Integer> searchForServers(final ListHostsCmd cmd) {
|
public Pair<List<? extends Host>, Integer> searchForServers(final ListHostsCmd cmd) {
|
||||||
|
|
||||||
@ -1433,10 +1460,12 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||||||
|
|
||||||
DataCenterDeployment plan = new DataCenterDeployment(volume.getDataCenterId(), srcVolumePool.getPodId(), srcVolumePool.getClusterId(), null, null, null);
|
DataCenterDeployment plan = new DataCenterDeployment(volume.getDataCenterId(), srcVolumePool.getPodId(), srcVolumePool.getClusterId(), null, null, null);
|
||||||
VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
|
VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
|
||||||
|
// OfflineVmwareMigration: vm might be null here; deal!
|
||||||
|
HypervisorType type = getHypervisorType(vm, srcVolumePool, profile);
|
||||||
|
|
||||||
DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
|
DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
|
||||||
//This is an override mechanism so we can list the possible local storage pools that a volume in a shared pool might be able to be migrated to
|
//This is an override mechanism so we can list the possible local storage pools that a volume in a shared pool might be able to be migrated to
|
||||||
DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType());
|
DiskProfile diskProfile = new DiskProfile(volume, diskOffering, type);
|
||||||
diskProfile.setUseLocalStorage(true);
|
diskProfile.setUseLocalStorage(true);
|
||||||
|
|
||||||
for (StoragePoolAllocator allocator : _storagePoolAllocators) {
|
for (StoragePoolAllocator allocator : _storagePoolAllocators) {
|
||||||
|
|||||||
@ -522,7 +522,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getStoragePoolTags(long poolId) {
|
public String getStoragePoolTags(long poolId) {
|
||||||
return com.cloud.utils.StringUtils.listToCsvTags(_storagePoolDao.searchForStoragePoolTags(poolId));
|
return StringUtils.listToCsvTags(getStoragePoolTagList(poolId));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getStoragePoolTagList(long poolId) {
|
||||||
|
return _storagePoolDao.searchForStoragePoolTags(poolId);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|||||||
@ -56,6 +56,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
|
|||||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult;
|
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult;
|
||||||
import org.apache.cloudstack.framework.async.AsyncCallFuture;
|
import org.apache.cloudstack.framework.async.AsyncCallFuture;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
|
import org.apache.cloudstack.framework.config.Configurable;
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.framework.jobs.AsyncJob;
|
import org.apache.cloudstack.framework.jobs.AsyncJob;
|
||||||
import org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext;
|
import org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext;
|
||||||
@ -178,7 +179,7 @@ import com.google.gson.Gson;
|
|||||||
import com.google.gson.GsonBuilder;
|
import com.google.gson.GsonBuilder;
|
||||||
import com.google.gson.JsonParseException;
|
import com.google.gson.JsonParseException;
|
||||||
|
|
||||||
public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiService, VmWorkJobHandler {
|
public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiService, VmWorkJobHandler, Configurable {
|
||||||
private final static Logger s_logger = Logger.getLogger(VolumeApiServiceImpl.class);
|
private final static Logger s_logger = Logger.getLogger(VolumeApiServiceImpl.class);
|
||||||
public static final String VM_WORK_JOB_HANDLER = VolumeApiServiceImpl.class.getSimpleName();
|
public static final String VM_WORK_JOB_HANDLER = VolumeApiServiceImpl.class.getSimpleName();
|
||||||
|
|
||||||
@ -2028,10 +2029,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check that Vm to which this volume is attached does not have VM Snapshots
|
// Check that Vm to which this volume is attached does not have VM Snapshots
|
||||||
|
// OfflineVmwareMigration: considder if this is needed and desirable
|
||||||
if (vm != null && _vmSnapshotDao.findByVm(vm.getId()).size() > 0) {
|
if (vm != null && _vmSnapshotDao.findByVm(vm.getId()).size() > 0) {
|
||||||
throw new InvalidParameterValueException("Volume cannot be migrated, please remove all VM snapshots for VM to which this volume is attached");
|
throw new InvalidParameterValueException("Volume cannot be migrated, please remove all VM snapshots for VM to which this volume is attached");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: extract this block as method and check if it is subject to regression
|
||||||
if (vm != null && vm.getState() == State.Running) {
|
if (vm != null && vm.getState() == State.Running) {
|
||||||
// Check if the VM is GPU enabled.
|
// Check if the VM is GPU enabled.
|
||||||
if (_serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) {
|
if (_serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) {
|
||||||
@ -2073,6 +2076,16 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||||||
throw new CloudRuntimeException("Storage pool " + destPool.getName() + " does not have enough space to migrate volume " + vol.getName());
|
throw new CloudRuntimeException("Storage pool " + destPool.getName() + " does not have enough space to migrate volume " + vol.getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: check storage tags on disk(offering)s in comparison to destination storage pool
|
||||||
|
// OfflineVmwareMigration: if no match return a proper error now
|
||||||
|
DiskOfferingVO diskOffering = _diskOfferingDao.findById(vol.getDiskOfferingId());
|
||||||
|
if(diskOffering.equals(null)) {
|
||||||
|
throw new CloudRuntimeException("volume '" + vol.getUuid() +"', has no diskoffering. Migration target cannot be checked.");
|
||||||
|
}
|
||||||
|
if(! doesTargetStorageSupportDiskOffering(destPool, diskOffering)) {
|
||||||
|
throw new CloudRuntimeException("Migration target has no matching tags for volume '" +vol.getName() + "(" + vol.getUuid() + ")'");
|
||||||
|
}
|
||||||
|
|
||||||
if (liveMigrateVolume && destPool.getClusterId() != null && srcClusterId != null) {
|
if (liveMigrateVolume && destPool.getClusterId() != null && srcClusterId != null) {
|
||||||
if (!srcClusterId.equals(destPool.getClusterId())) {
|
if (!srcClusterId.equals(destPool.getClusterId())) {
|
||||||
throw new InvalidParameterValueException("Cannot migrate a volume of a virtual machine to a storage pool in a different cluster");
|
throw new InvalidParameterValueException("Cannot migrate a volume of a virtual machine to a storage pool in a different cluster");
|
||||||
@ -2191,7 +2204,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||||||
if ((destPool.isShared() && newDiskOffering.isUseLocalStorage()) || destPool.isLocal() && newDiskOffering.isShared()) {
|
if ((destPool.isShared() && newDiskOffering.isUseLocalStorage()) || destPool.isLocal() && newDiskOffering.isShared()) {
|
||||||
throw new InvalidParameterValueException("You cannot move the volume to a shared storage and assing a disk offering for local storage and vice versa.");
|
throw new InvalidParameterValueException("You cannot move the volume to a shared storage and assing a disk offering for local storage and vice versa.");
|
||||||
}
|
}
|
||||||
if (!doesTargetStorageSupportNewDiskOffering(destPool, newDiskOffering)) {
|
if (!doesTargetStorageSupportDiskOffering(destPool, newDiskOffering)) {
|
||||||
throw new InvalidParameterValueException(String.format("Target Storage [id=%s] tags [%s] does not match new disk offering [id=%s] tags [%s].", destPool.getUuid(),
|
throw new InvalidParameterValueException(String.format("Target Storage [id=%s] tags [%s] does not match new disk offering [id=%s] tags [%s].", destPool.getUuid(),
|
||||||
getStoragePoolTags(destPool), newDiskOffering.getUuid(), newDiskOffering.getTags()));
|
getStoragePoolTags(destPool), newDiskOffering.getUuid(), newDiskOffering.getTags()));
|
||||||
}
|
}
|
||||||
@ -2236,9 +2249,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||||||
* </body>
|
* </body>
|
||||||
* </table>
|
* </table>
|
||||||
*/
|
*/
|
||||||
protected boolean doesTargetStorageSupportNewDiskOffering(StoragePool destPool, DiskOfferingVO newDiskOffering) {
|
protected boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, DiskOfferingVO diskOffering) {
|
||||||
String newDiskOfferingTags = newDiskOffering.getTags();
|
String targetStoreTags = diskOffering.getTags();
|
||||||
return doesTargetStorageSupportDiskOffering(destPool, newDiskOfferingTags);
|
return doesTargetStorageSupportDiskOffering(destPool, targetStoreTags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -3350,4 +3363,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||||||
return workJob;
|
return workJob;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getConfigComponentName() {
|
||||||
|
return VolumeApiService.class.getSimpleName();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ConfigKey<?>[] getConfigKeys() {
|
||||||
|
return new ConfigKey<?>[] {ConcurrentMigrationsThresholdPerDatastore};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
@ -5065,12 +5065,18 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (vm.getType() != VirtualMachine.Type.User) {
|
if (vm.getType() != VirtualMachine.Type.User) {
|
||||||
|
// OffLineVmwareMigration: *WHY* ?
|
||||||
throw new InvalidParameterValueException("can only do storage migration on user vm");
|
throw new InvalidParameterValueException("can only do storage migration on user vm");
|
||||||
}
|
}
|
||||||
|
|
||||||
List<VolumeVO> vols = _volsDao.findByInstance(vm.getId());
|
List<VolumeVO> vols = _volsDao.findByInstance(vm.getId());
|
||||||
if (vols.size() > 1) {
|
if (vols.size() > 1) {
|
||||||
throw new InvalidParameterValueException("Data disks attached to the vm, can not migrate. Need to detach data disks first");
|
// OffLineVmwareMigration: data disks are not permitted, here!
|
||||||
|
if (vols.size() > 1 &&
|
||||||
|
// OffLineVmwareMigration: allow multiple disks for vmware
|
||||||
|
!HypervisorType.VMware.equals(vm.getHypervisorType())) {
|
||||||
|
throw new InvalidParameterValueException("Data disks attached to the vm, can not migrate. Need to detach data disks first");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that Vm does not have VM Snapshots
|
// Check that Vm does not have VM Snapshots
|
||||||
@ -5078,6 +5084,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||||||
throw new InvalidParameterValueException("VM's disk cannot be migrated, please remove all the VM Snapshots for this VM");
|
throw new InvalidParameterValueException("VM's disk cannot be migrated, please remove all the VM Snapshots for this VM");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
checkDestinationHypervisorType(destPool, vm);
|
||||||
|
|
||||||
|
_itMgr.storageMigration(vm.getUuid(), destPool);
|
||||||
|
return _vmDao.findById(vm.getId());
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private void checkDestinationHypervisorType(StoragePool destPool, VMInstanceVO vm) {
|
||||||
HypervisorType destHypervisorType = destPool.getHypervisor();
|
HypervisorType destHypervisorType = destPool.getHypervisor();
|
||||||
if (destHypervisorType == null) {
|
if (destHypervisorType == null) {
|
||||||
destHypervisorType = _clusterDao.findById(
|
destHypervisorType = _clusterDao.findById(
|
||||||
@ -5087,8 +5101,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||||||
if (vm.getHypervisorType() != destHypervisorType && destHypervisorType != HypervisorType.Any) {
|
if (vm.getHypervisorType() != destHypervisorType && destHypervisorType != HypervisorType.Any) {
|
||||||
throw new InvalidParameterValueException("hypervisor is not compatible: dest: " + destHypervisorType.toString() + ", vm: " + vm.getHypervisorType().toString());
|
throw new InvalidParameterValueException("hypervisor is not compatible: dest: " + destHypervisorType.toString() + ", vm: " + vm.getHypervisorType().toString());
|
||||||
}
|
}
|
||||||
_itMgr.storageMigration(vm.getUuid(), destPool);
|
|
||||||
return _vmDao.findById(vm.getId());
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5144,12 +5156,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||||||
throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported");
|
throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!vm.getHypervisorType().equals(HypervisorType.XenServer) && !vm.getHypervisorType().equals(HypervisorType.VMware) && !vm.getHypervisorType().equals(HypervisorType.KVM)
|
if (!isOnSupportedHypevisorForMigration(vm)) {
|
||||||
&& !vm.getHypervisorType().equals(HypervisorType.Ovm) && !vm.getHypervisorType().equals(HypervisorType.Hyperv)
|
|
||||||
&& !vm.getHypervisorType().equals(HypervisorType.LXC) && !vm.getHypervisorType().equals(HypervisorType.Simulator)
|
|
||||||
&& !vm.getHypervisorType().equals(HypervisorType.Ovm3)) {
|
|
||||||
if (s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug(vm + " is not XenServer/VMware/KVM/Ovm/Hyperv, cannot migrate this VM.");
|
s_logger.debug(vm + " is not XenServer/VMware/KVM/Ovm/Hyperv, cannot migrate this VM form hypervisor type " + vm.getHypervisorType());
|
||||||
}
|
}
|
||||||
throw new InvalidParameterValueException("Unsupported Hypervisor Type for VM migration, we support XenServer/VMware/KVM/Ovm/Hyperv/Ovm3 only");
|
throw new InvalidParameterValueException("Unsupported Hypervisor Type for VM migration, we support XenServer/VMware/KVM/Ovm/Hyperv/Ovm3 only");
|
||||||
}
|
}
|
||||||
@ -5227,6 +5236,17 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private boolean isOnSupportedHypevisorForMigration(VMInstanceVO vm) {
|
||||||
|
return (vm.getHypervisorType().equals(HypervisorType.XenServer) ||
|
||||||
|
vm.getHypervisorType().equals(HypervisorType.VMware) ||
|
||||||
|
vm.getHypervisorType().equals(HypervisorType.KVM) ||
|
||||||
|
vm.getHypervisorType().equals(HypervisorType.Ovm) ||
|
||||||
|
vm.getHypervisorType().equals(HypervisorType.Hyperv) ||
|
||||||
|
vm.getHypervisorType().equals(HypervisorType.LXC) ||
|
||||||
|
vm.getHypervisorType().equals(HypervisorType.Simulator) ||
|
||||||
|
vm.getHypervisorType().equals(HypervisorType.Ovm3));
|
||||||
|
}
|
||||||
|
|
||||||
private boolean checkIfHostIsDedicated(HostVO host) {
|
private boolean checkIfHostIsDedicated(HostVO host) {
|
||||||
long hostId = host.getId();
|
long hostId = host.getId();
|
||||||
DedicatedResourceVO dedicatedHost = _dedicatedDao.findByHostId(hostId);
|
DedicatedResourceVO dedicatedHost = _dedicatedDao.findByHostId(hostId);
|
||||||
@ -5469,7 +5489,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||||||
throw new InvalidParameterValueException("Unable to find the vm by id " + vmId);
|
throw new InvalidParameterValueException("Unable to find the vm by id " + vmId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: this would be it ;) if multiple paths exist: unify
|
||||||
if (vm.getState() != State.Running) {
|
if (vm.getState() != State.Running) {
|
||||||
|
// OfflineVmwareMigration: and not vmware
|
||||||
if (s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("VM is not Running, unable to migrate the vm " + vm);
|
s_logger.debug("VM is not Running, unable to migrate the vm " + vm);
|
||||||
}
|
}
|
||||||
@ -5482,6 +5504,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||||||
throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported");
|
throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OfflineVmwareMigration: this condition is to complicated. (already a method somewhere)
|
||||||
if (!vm.getHypervisorType().equals(HypervisorType.XenServer) && !vm.getHypervisorType().equals(HypervisorType.VMware) && !vm.getHypervisorType().equals(HypervisorType.KVM)
|
if (!vm.getHypervisorType().equals(HypervisorType.XenServer) && !vm.getHypervisorType().equals(HypervisorType.VMware) && !vm.getHypervisorType().equals(HypervisorType.KVM)
|
||||||
&& !vm.getHypervisorType().equals(HypervisorType.Ovm) && !vm.getHypervisorType().equals(HypervisorType.Hyperv)
|
&& !vm.getHypervisorType().equals(HypervisorType.Ovm) && !vm.getHypervisorType().equals(HypervisorType.Hyperv)
|
||||||
&& !vm.getHypervisorType().equals(HypervisorType.Simulator)) {
|
&& !vm.getHypervisorType().equals(HypervisorType.Simulator)) {
|
||||||
|
|||||||
@ -1004,7 +1004,7 @@ public class VolumeApiServiceImplTest {
|
|||||||
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
|
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
|
||||||
Mockito.doReturn("A").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
|
Mockito.doReturn("A").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
|
||||||
|
|
||||||
boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock);
|
boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock);
|
||||||
|
|
||||||
Assert.assertFalse(result);
|
Assert.assertFalse(result);
|
||||||
}
|
}
|
||||||
@ -1017,7 +1017,7 @@ public class VolumeApiServiceImplTest {
|
|||||||
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
|
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
|
||||||
Mockito.doReturn("A,B,C,D,X,Y").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
|
Mockito.doReturn("A,B,C,D,X,Y").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
|
||||||
|
|
||||||
boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock);
|
boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock);
|
||||||
|
|
||||||
Assert.assertTrue(result);
|
Assert.assertTrue(result);
|
||||||
}
|
}
|
||||||
@ -1030,7 +1030,7 @@ public class VolumeApiServiceImplTest {
|
|||||||
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
|
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
|
||||||
Mockito.doReturn("A,B,C,D,X,Y").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
|
Mockito.doReturn("A,B,C,D,X,Y").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
|
||||||
|
|
||||||
boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock);
|
boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock);
|
||||||
|
|
||||||
Assert.assertTrue(result);
|
Assert.assertTrue(result);
|
||||||
}
|
}
|
||||||
@ -1043,7 +1043,7 @@ public class VolumeApiServiceImplTest {
|
|||||||
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
|
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
|
||||||
Mockito.doReturn("").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
|
Mockito.doReturn("").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
|
||||||
|
|
||||||
boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock);
|
boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock);
|
||||||
|
|
||||||
Assert.assertFalse(result);
|
Assert.assertFalse(result);
|
||||||
}
|
}
|
||||||
@ -1056,7 +1056,7 @@ public class VolumeApiServiceImplTest {
|
|||||||
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
|
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
|
||||||
Mockito.doReturn("").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
|
Mockito.doReturn("").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
|
||||||
|
|
||||||
boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock);
|
boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock);
|
||||||
|
|
||||||
Assert.assertTrue(result);
|
Assert.assertTrue(result);
|
||||||
}
|
}
|
||||||
@ -1069,7 +1069,7 @@ public class VolumeApiServiceImplTest {
|
|||||||
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
|
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
|
||||||
Mockito.doReturn("C,D").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
|
Mockito.doReturn("C,D").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
|
||||||
|
|
||||||
boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock);
|
boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock);
|
||||||
|
|
||||||
Assert.assertFalse(result);
|
Assert.assertFalse(result);
|
||||||
}
|
}
|
||||||
@ -1082,7 +1082,7 @@ public class VolumeApiServiceImplTest {
|
|||||||
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
|
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
|
||||||
Mockito.doReturn("A").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
|
Mockito.doReturn("A").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
|
||||||
|
|
||||||
boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock);
|
boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock);
|
||||||
|
|
||||||
Assert.assertTrue(result);
|
Assert.assertTrue(result);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -16,21 +16,19 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
""" BVT tests for Primary Storage
|
""" BVT tests for Primary Storage
|
||||||
"""
|
"""
|
||||||
#Import Local Modules
|
|
||||||
import marvin
|
# Import System modules
|
||||||
|
# Import Local Modules
|
||||||
from marvin.cloudstackTestCase import *
|
from marvin.cloudstackTestCase import *
|
||||||
from marvin.cloudstackAPI import *
|
|
||||||
from marvin.lib.utils import *
|
|
||||||
from marvin.lib.base import *
|
from marvin.lib.base import *
|
||||||
from marvin.lib.common import *
|
from marvin.lib.common import *
|
||||||
from nose.plugins.attrib import attr
|
|
||||||
import logging
|
|
||||||
from marvin.lib.decoratorGenerators import skipTestIf
|
from marvin.lib.decoratorGenerators import skipTestIf
|
||||||
|
from marvin.lib.utils import *
|
||||||
|
from nose.plugins.attrib import attr
|
||||||
|
|
||||||
#Import System modules
|
|
||||||
import time
|
|
||||||
_multiprocess_shared_ = True
|
_multiprocess_shared_ = True
|
||||||
|
|
||||||
|
|
||||||
class TestPrimaryStorageServices(cloudstackTestCase):
|
class TestPrimaryStorageServices(cloudstackTestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
@ -49,14 +47,14 @@ class TestPrimaryStorageServices(cloudstackTestCase):
|
|||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
try:
|
try:
|
||||||
#Clean up, terminate the created templates
|
# Clean up, terminate the created templates
|
||||||
cleanup_resources(self.apiclient, self.cleanup)
|
cleanup_resources(self.apiclient, self.cleanup)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||||
return
|
return
|
||||||
|
|
||||||
@attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
|
@attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
|
||||||
def test_01_primary_storage_nfs(self):
|
def test_01_primary_storage_nfs(self):
|
||||||
"""Test primary storage pools - XEN, KVM, VMWare. Not Supported for hyperv
|
"""Test primary storage pools - XEN, KVM, VMWare. Not Supported for hyperv
|
||||||
"""
|
"""
|
||||||
@ -64,39 +62,36 @@ class TestPrimaryStorageServices(cloudstackTestCase):
|
|||||||
if self.hypervisor.lower() in ["hyperv"]:
|
if self.hypervisor.lower() in ["hyperv"]:
|
||||||
raise self.skipTest("NFS primary storage not supported for Hyper-V")
|
raise self.skipTest("NFS primary storage not supported for Hyper-V")
|
||||||
|
|
||||||
|
|
||||||
# Validate the following:
|
# Validate the following:
|
||||||
# 1. List Clusters
|
# 1. List Clusters
|
||||||
# 2. verify that the cluster is in 'Enabled' allocation state
|
# 2. verify that the cluster is in 'Enabled' allocation state
|
||||||
# 3. verify that the host is added successfully and
|
# 3. verify that the host is added successfully and
|
||||||
# in Up state with listHosts api response
|
# in Up state with listHosts api response
|
||||||
|
|
||||||
#Create NFS storage pools with on XEN/KVM/VMWare clusters
|
# Create NFS storage pools with on XEN/KVM/VMWare clusters
|
||||||
|
|
||||||
|
|
||||||
clusters = list_clusters(
|
clusters = list_clusters(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
zoneid=self.zone.id
|
zoneid=self.zone.id
|
||||||
)
|
)
|
||||||
assert isinstance(clusters,list) and len(clusters)>0
|
assert isinstance(clusters, list) and len(clusters) > 0
|
||||||
for cluster in clusters:
|
for cluster in clusters:
|
||||||
|
# Host should be present before adding primary storage
|
||||||
#Host should be present before adding primary storage
|
|
||||||
list_hosts_response = list_hosts(
|
list_hosts_response = list_hosts(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
clusterid=cluster.id
|
clusterid=cluster.id
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
isinstance(list_hosts_response, list),
|
isinstance(list_hosts_response, list),
|
||||||
True,
|
True,
|
||||||
"Check list response returns a valid list"
|
"Check list response returns a valid list"
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertNotEqual(
|
self.assertNotEqual(
|
||||||
len(list_hosts_response),
|
len(list_hosts_response),
|
||||||
0,
|
0,
|
||||||
"Check list Hosts in the cluster: " + cluster.name
|
"Check list Hosts in the cluster: " + cluster.name
|
||||||
)
|
)
|
||||||
|
|
||||||
storage = StoragePool.create(self.apiclient,
|
storage = StoragePool.create(self.apiclient,
|
||||||
self.services["nfs"],
|
self.services["nfs"],
|
||||||
@ -112,53 +107,52 @@ class TestPrimaryStorageServices(cloudstackTestCase):
|
|||||||
storage.state,
|
storage.state,
|
||||||
'Up',
|
'Up',
|
||||||
"Check primary storage state "
|
"Check primary storage state "
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
storage.type,
|
storage.type,
|
||||||
'NetworkFilesystem',
|
'NetworkFilesystem',
|
||||||
"Check storage pool type "
|
"Check storage pool type "
|
||||||
)
|
)
|
||||||
|
|
||||||
#Verify List Storage pool Response has newly added storage pool
|
# Verify List Storage pool Response has newly added storage pool
|
||||||
storage_pools_response = list_storage_pools(
|
storage_pools_response = list_storage_pools(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
id=storage.id,
|
id=storage.id,
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
isinstance(storage_pools_response, list),
|
isinstance(storage_pools_response, list),
|
||||||
True,
|
True,
|
||||||
"Check list response returns a valid list"
|
"Check list response returns a valid list"
|
||||||
)
|
)
|
||||||
self.assertNotEqual(
|
self.assertNotEqual(
|
||||||
len(storage_pools_response),
|
len(storage_pools_response),
|
||||||
0,
|
0,
|
||||||
"Check list Hosts response"
|
"Check list Hosts response"
|
||||||
)
|
)
|
||||||
|
|
||||||
storage_response = storage_pools_response[0]
|
storage_response = storage_pools_response[0]
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
storage_response.id,
|
storage_response.id,
|
||||||
storage.id,
|
storage.id,
|
||||||
"Check storage pool ID"
|
"Check storage pool ID"
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
storage.type,
|
storage.type,
|
||||||
storage_response.type,
|
storage_response.type,
|
||||||
"Check storage pool type "
|
"Check storage pool type "
|
||||||
)
|
)
|
||||||
# Call cleanup for reusing primary storage
|
# Call cleanup for reusing primary storage
|
||||||
cleanup_resources(self.apiclient, self.cleanup)
|
cleanup_resources(self.apiclient, self.cleanup)
|
||||||
self.cleanup = []
|
self.cleanup = []
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true")
|
||||||
@attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true")
|
|
||||||
def test_01_primary_storage_iscsi(self):
|
def test_01_primary_storage_iscsi(self):
|
||||||
"""Test primary storage pools - XEN. Not Supported for kvm,hyperv,vmware
|
"""Test primary storage pools - XEN. Not Supported for kvm,hyperv,vmware
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self.hypervisor.lower() in ["kvm","hyperv", "vmware", "lxc"]:
|
if self.hypervisor.lower() in ["kvm", "hyperv", "vmware", "lxc"]:
|
||||||
raise self.skipTest("iscsi primary storage not supported on kvm, VMWare, Hyper-V, or LXC")
|
raise self.skipTest("iscsi primary storage not supported on kvm, VMWare, Hyper-V, or LXC")
|
||||||
|
|
||||||
if not self.services["configurableData"]["iscsi"]["url"]:
|
if not self.services["configurableData"]["iscsi"]["url"]:
|
||||||
@ -175,26 +169,24 @@ class TestPrimaryStorageServices(cloudstackTestCase):
|
|||||||
self.apiclient,
|
self.apiclient,
|
||||||
zoneid=self.zone.id
|
zoneid=self.zone.id
|
||||||
)
|
)
|
||||||
assert isinstance(clusters,list) and len(clusters)>0
|
assert isinstance(clusters, list) and len(clusters) > 0
|
||||||
for cluster in clusters:
|
for cluster in clusters:
|
||||||
|
# Host should be present before adding primary storage
|
||||||
#Host should be present before adding primary storage
|
|
||||||
list_hosts_response = list_hosts(
|
list_hosts_response = list_hosts(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
clusterid=cluster.id
|
clusterid=cluster.id
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
isinstance(list_hosts_response, list),
|
isinstance(list_hosts_response, list),
|
||||||
True,
|
True,
|
||||||
"Check list response returns a valid list"
|
"Check list response returns a valid list"
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertNotEqual(
|
self.assertNotEqual(
|
||||||
len(list_hosts_response),
|
len(list_hosts_response),
|
||||||
0,
|
0,
|
||||||
"Check list Hosts in the cluster: " + cluster.name
|
"Check list Hosts in the cluster: " + cluster.name
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
storage = StoragePool.create(self.apiclient,
|
storage = StoragePool.create(self.apiclient,
|
||||||
self.services["configurableData"]["iscsi"],
|
self.services["configurableData"]["iscsi"],
|
||||||
@ -210,58 +202,58 @@ class TestPrimaryStorageServices(cloudstackTestCase):
|
|||||||
storage.state,
|
storage.state,
|
||||||
'Up',
|
'Up',
|
||||||
"Check primary storage state "
|
"Check primary storage state "
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
storage.type,
|
storage.type,
|
||||||
'IscsiLUN',
|
'IscsiLUN',
|
||||||
"Check storage pool type "
|
"Check storage pool type "
|
||||||
)
|
)
|
||||||
|
|
||||||
#Verify List Storage pool Response has newly added storage pool
|
# Verify List Storage pool Response has newly added storage pool
|
||||||
storage_pools_response = list_storage_pools(
|
storage_pools_response = list_storage_pools(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
id=storage.id,
|
id=storage.id,
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
isinstance(storage_pools_response, list),
|
isinstance(storage_pools_response, list),
|
||||||
True,
|
True,
|
||||||
"Check list response returns a valid list"
|
"Check list response returns a valid list"
|
||||||
)
|
)
|
||||||
self.assertNotEqual(
|
self.assertNotEqual(
|
||||||
len(storage_pools_response),
|
len(storage_pools_response),
|
||||||
0,
|
0,
|
||||||
"Check list Hosts response"
|
"Check list Hosts response"
|
||||||
)
|
)
|
||||||
|
|
||||||
storage_response = storage_pools_response[0]
|
storage_response = storage_pools_response[0]
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
storage_response.id,
|
storage_response.id,
|
||||||
storage.id,
|
storage.id,
|
||||||
"Check storage pool ID"
|
"Check storage pool ID"
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
storage.type,
|
storage.type,
|
||||||
storage_response.type,
|
storage_response.type,
|
||||||
"Check storage pool type "
|
"Check storage pool type "
|
||||||
)
|
)
|
||||||
# Call cleanup for reusing primary storage
|
# Call cleanup for reusing primary storage
|
||||||
cleanup_resources(self.apiclient, self.cleanup)
|
cleanup_resources(self.apiclient, self.cleanup)
|
||||||
self.cleanup = []
|
self.cleanup = []
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
@attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
|
@attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
|
||||||
def test_01_add_primary_storage_disabled_host(self):
|
def test_01_add_primary_storage_disabled_host(self):
|
||||||
"""Test add primary storage pool with disabled host
|
"""Test add primary storage pool with disabled host
|
||||||
"""
|
"""
|
||||||
|
|
||||||
#Disable a host
|
# Disable a host
|
||||||
clusters = list_clusters(
|
clusters = list_clusters(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
zoneid=self.zone.id
|
zoneid=self.zone.id
|
||||||
)
|
)
|
||||||
assert isinstance(clusters,list) and len(clusters)>0
|
assert isinstance(clusters, list) and len(clusters) > 0
|
||||||
for cluster in clusters:
|
for cluster in clusters:
|
||||||
|
|
||||||
list_hosts_response = list_hosts(
|
list_hosts_response = list_hosts(
|
||||||
@ -269,15 +261,14 @@ class TestPrimaryStorageServices(cloudstackTestCase):
|
|||||||
clusterid=cluster.id,
|
clusterid=cluster.id,
|
||||||
type="Routing"
|
type="Routing"
|
||||||
)
|
)
|
||||||
assert isinstance(list_hosts_response,list)
|
assert isinstance(list_hosts_response, list)
|
||||||
if len(list_hosts_response) < 2:
|
if len(list_hosts_response) < 2:
|
||||||
continue
|
continue
|
||||||
selected_cluster = cluster
|
selected_cluster = cluster
|
||||||
selected_host = list_hosts_response[0]
|
selected_host = list_hosts_response[0]
|
||||||
Host.update(self.apiclient, id=selected_host.id, allocationstate="Disable")
|
Host.update(self.apiclient, id=selected_host.id, allocationstate="Disable")
|
||||||
|
|
||||||
|
# create a pool
|
||||||
#create a pool
|
|
||||||
storage_pool_2 = StoragePool.create(
|
storage_pool_2 = StoragePool.create(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
self.services["nfs2"],
|
self.services["nfs2"],
|
||||||
@ -285,24 +276,23 @@ class TestPrimaryStorageServices(cloudstackTestCase):
|
|||||||
zoneid=self.zone.id,
|
zoneid=self.zone.id,
|
||||||
podid=self.pod.id
|
podid=self.pod.id
|
||||||
)
|
)
|
||||||
#self.cleanup.append(storage_pool_2)
|
# self.cleanup.append(storage_pool_2)
|
||||||
|
|
||||||
#Enable host and disable others
|
# Enable host and disable others
|
||||||
Host.update(self.apiclient, id=selected_host.id, allocationstate="Enable")
|
Host.update(self.apiclient, id=selected_host.id, allocationstate="Enable")
|
||||||
for host in list_hosts_response :
|
for host in list_hosts_response:
|
||||||
if(host.id == selected_host.id) :
|
if (host.id == selected_host.id):
|
||||||
continue
|
continue
|
||||||
Host.update(self.apiclient, id=host.id, allocationstate="Disable")
|
Host.update(self.apiclient, id=host.id, allocationstate="Disable")
|
||||||
|
|
||||||
|
# put other pools in maintenance
|
||||||
#put other pools in maintenance
|
storage_pool_list = StoragePool.list(self.apiclient, zoneid=self.zone.id)
|
||||||
storage_pool_list = StoragePool.list(self.apiclient, zoneid = self.zone.id)
|
for pool in storage_pool_list:
|
||||||
for pool in storage_pool_list :
|
if (pool.id == storage_pool_2.id):
|
||||||
if(pool.id == storage_pool_2.id) :
|
|
||||||
continue
|
continue
|
||||||
StoragePool.update(self.apiclient,id=pool.id, enabled=False)
|
StoragePool.update(self.apiclient, id=pool.id, enabled=False)
|
||||||
|
|
||||||
#deployvm
|
# deployvm
|
||||||
try:
|
try:
|
||||||
# Create Account
|
# Create Account
|
||||||
account = Account.create(
|
account = Account.create(
|
||||||
@ -329,20 +319,20 @@ class TestPrimaryStorageServices(cloudstackTestCase):
|
|||||||
self.cleanup.append(self.virtual_machine)
|
self.cleanup.append(self.virtual_machine)
|
||||||
self.cleanup.append(account)
|
self.cleanup.append(account)
|
||||||
finally:
|
finally:
|
||||||
#cancel maintenance
|
# cancel maintenance
|
||||||
for pool in storage_pool_list :
|
for pool in storage_pool_list:
|
||||||
if(pool.id == storage_pool_2.id) :
|
if (pool.id == storage_pool_2.id):
|
||||||
continue
|
continue
|
||||||
StoragePool.update(self.apiclient,id=pool.id, enabled=True)
|
StoragePool.update(self.apiclient, id=pool.id, enabled=True)
|
||||||
#Enable all hosts
|
# Enable all hosts
|
||||||
for host in list_hosts_response :
|
for host in list_hosts_response:
|
||||||
if(host.id == selected_host.id) :
|
if (host.id == selected_host.id):
|
||||||
continue
|
continue
|
||||||
Host.update(self.apiclient, id=host.id, allocationstate="Enable")
|
Host.update(self.apiclient, id=host.id, allocationstate="Enable")
|
||||||
|
|
||||||
cleanup_resources(self.apiclient, self.cleanup)
|
cleanup_resources(self.apiclient, self.cleanup)
|
||||||
self.cleanup = []
|
self.cleanup = []
|
||||||
StoragePool.enableMaintenance(self.apiclient,storage_pool_2.id)
|
StoragePool.enableMaintenance(self.apiclient, storage_pool_2.id)
|
||||||
time.sleep(30);
|
time.sleep(30);
|
||||||
cmd = deleteStoragePool.deleteStoragePoolCmd()
|
cmd = deleteStoragePool.deleteStoragePoolCmd()
|
||||||
cmd.id = storage_pool_2.id
|
cmd.id = storage_pool_2.id
|
||||||
@ -355,12 +345,14 @@ class TestPrimaryStorageServices(cloudstackTestCase):
|
|||||||
class StorageTagsServices:
|
class StorageTagsServices:
|
||||||
"""Test Storage Tags Data Class.
|
"""Test Storage Tags Data Class.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.storage_tags = {
|
self.storage_tags = {
|
||||||
"a" : "NFS-A",
|
"a": "NFS-A",
|
||||||
"b" : "NFS-B"
|
"b": "NFS-B"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class TestStorageTags(cloudstackTestCase):
|
class TestStorageTags(cloudstackTestCase):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -390,7 +382,6 @@ class TestStorageTags(cloudstackTestCase):
|
|||||||
cls._cleanup = []
|
cls._cleanup = []
|
||||||
|
|
||||||
if not cls.hypervisorNotSupported:
|
if not cls.hypervisorNotSupported:
|
||||||
|
|
||||||
cls.clusters = list_clusters(
|
cls.clusters = list_clusters(
|
||||||
cls.apiclient,
|
cls.apiclient,
|
||||||
zoneid=cls.zone.id
|
zoneid=cls.zone.id
|
||||||
@ -399,13 +390,13 @@ class TestStorageTags(cloudstackTestCase):
|
|||||||
|
|
||||||
# Create PS with Storage Tag
|
# Create PS with Storage Tag
|
||||||
cls.storage_pool_1 = StoragePool.create(cls.apiclient,
|
cls.storage_pool_1 = StoragePool.create(cls.apiclient,
|
||||||
cls.services["nfs"],
|
cls.services["nfs"],
|
||||||
clusterid=cls.clusters[0].id,
|
clusterid=cls.clusters[0].id,
|
||||||
zoneid=cls.zone.id,
|
zoneid=cls.zone.id,
|
||||||
podid=cls.pod.id,
|
podid=cls.pod.id,
|
||||||
tags=cls.services["storage_tags"]["a"]
|
tags=cls.services["storage_tags"]["a"]
|
||||||
)
|
)
|
||||||
#PS not appended to _cleanup, it is removed on tearDownClass before cleaning up resources
|
# PS not appended to _cleanup, it is removed on tearDownClass before cleaning up resources
|
||||||
assert cls.storage_pool_1.state == 'Up'
|
assert cls.storage_pool_1.state == 'Up'
|
||||||
storage_pools_response = list_storage_pools(cls.apiclient,
|
storage_pools_response = list_storage_pools(cls.apiclient,
|
||||||
id=cls.storage_pool_1.id)
|
id=cls.storage_pool_1.id)
|
||||||
@ -525,23 +516,23 @@ class TestStorageTags(cloudstackTestCase):
|
|||||||
|
|
||||||
# Create V-1 using DO-1
|
# Create V-1 using DO-1
|
||||||
self.volume_1 = Volume.create(
|
self.volume_1 = Volume.create(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
self.services,
|
self.services,
|
||||||
zoneid=self.zone.id,
|
zoneid=self.zone.id,
|
||||||
account=self.account.name,
|
account=self.account.name,
|
||||||
domainid=self.account.domainid,
|
domainid=self.account.domainid,
|
||||||
diskofferingid=self.disk_offering_1.id
|
diskofferingid=self.disk_offering_1.id
|
||||||
)
|
)
|
||||||
self.cleanup.append(self.volume_1)
|
self.cleanup.append(self.volume_1)
|
||||||
|
|
||||||
# Create V-2 using DO-2
|
# Create V-2 using DO-2
|
||||||
self.volume_2 = Volume.create(
|
self.volume_2 = Volume.create(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
self.services,
|
self.services,
|
||||||
zoneid=self.zone.id,
|
zoneid=self.zone.id,
|
||||||
account=self.account.name,
|
account=self.account.name,
|
||||||
domainid=self.account.domainid,
|
domainid=self.account.domainid,
|
||||||
diskofferingid=self.disk_offering_2.id
|
diskofferingid=self.disk_offering_2.id
|
||||||
)
|
)
|
||||||
self.cleanup.append(self.volume_2)
|
self.cleanup.append(self.volume_2)
|
||||||
|
|
||||||
@ -564,7 +555,7 @@ class TestStorageTags(cloudstackTestCase):
|
|||||||
self.assertEquals(None, vm_1_volumes, "Check that volume V-2 has not been attached to VM-1")
|
self.assertEquals(None, vm_1_volumes, "Check that volume V-2 has not been attached to VM-1")
|
||||||
|
|
||||||
# Attach V_1 to VM_1
|
# Attach V_1 to VM_1
|
||||||
self.virtual_machine_1.attach_volume(self.apiclient,self.volume_1)
|
self.virtual_machine_1.attach_volume(self.apiclient, self.volume_1)
|
||||||
vm_1_volumes = Volume.list(
|
vm_1_volumes = Volume.list(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
virtualmachineid=self.virtual_machine_1.id,
|
virtualmachineid=self.virtual_machine_1.id,
|
||||||
@ -628,12 +619,12 @@ class TestStorageTags(cloudstackTestCase):
|
|||||||
|
|
||||||
# Create PS-2 using Storage Tag
|
# Create PS-2 using Storage Tag
|
||||||
storage_pool_2 = StoragePool.create(self.apiclient,
|
storage_pool_2 = StoragePool.create(self.apiclient,
|
||||||
self.services["nfs2"],
|
self.services["nfs2"],
|
||||||
clusterid=self.clusters[0].id,
|
clusterid=self.clusters[0].id,
|
||||||
zoneid=self.zone.id,
|
zoneid=self.zone.id,
|
||||||
podid=self.pod.id,
|
podid=self.pod.id,
|
||||||
tags=self.services["storage_tags"]["a"]
|
tags=self.services["storage_tags"]["a"]
|
||||||
)
|
)
|
||||||
self.cleanup.append(storage_pool_2)
|
self.cleanup.append(storage_pool_2)
|
||||||
assert storage_pool_2.state == 'Up'
|
assert storage_pool_2.state == 'Up'
|
||||||
storage_pools_response = list_storage_pools(self.apiclient,
|
storage_pools_response = list_storage_pools(self.apiclient,
|
||||||
@ -667,7 +658,7 @@ class TestStorageTags(cloudstackTestCase):
|
|||||||
self.apiclient,
|
self.apiclient,
|
||||||
id=vol.id
|
id=vol.id
|
||||||
)
|
)
|
||||||
pools_suitable = filter(lambda p : p.suitableformigration, pools_response)
|
pools_suitable = filter(lambda p: p.suitableformigration, pools_response)
|
||||||
|
|
||||||
self.debug("Suitable storage pools found: %s" % len(pools_suitable))
|
self.debug("Suitable storage pools found: %s" % len(pools_suitable))
|
||||||
self.assertEquals(1, len(pools_suitable), "Check that there is only one item on the list")
|
self.assertEquals(1, len(pools_suitable), "Check that there is only one item on the list")
|
||||||
@ -685,7 +676,7 @@ class TestStorageTags(cloudstackTestCase):
|
|||||||
self.apiclient,
|
self.apiclient,
|
||||||
id=vol.id
|
id=vol.id
|
||||||
)
|
)
|
||||||
pools_suitable = filter(lambda p : p.suitableformigration, pools_response)
|
pools_suitable = filter(lambda p: p.suitableformigration, pools_response)
|
||||||
|
|
||||||
self.debug("Suitable storage pools found: %s" % len(pools_suitable))
|
self.debug("Suitable storage pools found: %s" % len(pools_suitable))
|
||||||
self.assertEquals(0, len(pools_suitable), "Check that there is no migration option for volume")
|
self.assertEquals(0, len(pools_suitable), "Check that there is no migration option for volume")
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -73,6 +73,14 @@ public class StringUtils {
|
|||||||
public static String join(final String delimiter, final Object... components) {
|
public static String join(final String delimiter, final Object... components) {
|
||||||
return org.apache.commons.lang.StringUtils.join(components, delimiter);
|
return org.apache.commons.lang.StringUtils.join(components, delimiter);
|
||||||
}
|
}
|
||||||
|
/**
|
||||||
|
* @deprecated
|
||||||
|
* Please use org.apache.commons.lang.StringUtils.isBlank() as a replacement
|
||||||
|
*/
|
||||||
|
@Deprecated
|
||||||
|
public static boolean isBlank(String str) {
|
||||||
|
return org.apache.commons.lang.StringUtils.isBlank(str);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @deprecated
|
* @deprecated
|
||||||
|
|||||||
@ -447,6 +447,23 @@ public class VirtualMachineMO extends BaseMO {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public boolean changeDatastore(ManagedObjectReference morDataStore) throws Exception {
|
||||||
|
VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec();
|
||||||
|
relocateSpec.setDatastore(morDataStore);
|
||||||
|
|
||||||
|
ManagedObjectReference morTask = _context.getService().relocateVMTask(_mor, relocateSpec, null);
|
||||||
|
|
||||||
|
boolean result = _context.getVimClient().waitForTask(morTask);
|
||||||
|
if (result) {
|
||||||
|
_context.waitForTaskProgressDone(morTask);
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
s_logger.error("VMware change datastore relocateVM_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask));
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
public boolean relocate(ManagedObjectReference morTargetHost) throws Exception {
|
public boolean relocate(ManagedObjectReference morTargetHost) throws Exception {
|
||||||
VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec();
|
VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec();
|
||||||
relocateSpec.setHost(morTargetHost);
|
relocateSpec.setHost(morTargetHost);
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user