mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge branch '4.17'
This commit is contained in:
commit
a32f19b34d
@ -346,8 +346,7 @@ public class CreateNetworkCmd extends BaseCmd implements UserCmd {
|
||||
|
||||
@Override
|
||||
// an exception thrown by createNetwork() will be caught by the dispatcher.
|
||||
public
|
||||
void execute() throws InsufficientCapacityException, ConcurrentOperationException, ResourceAllocationException {
|
||||
public void execute() throws InsufficientCapacityException, ConcurrentOperationException, ResourceAllocationException {
|
||||
Network result = _networkService.createGuestNetwork(this);
|
||||
if (result != null) {
|
||||
NetworkResponse response = _responseGenerator.createNetworkResponse(getResponseView(), result);
|
||||
|
||||
@ -100,6 +100,7 @@ public class CreateVMSnapshotCmd extends BaseAsyncCreateCmd {
|
||||
|
||||
if (vmsnapshot != null) {
|
||||
setEntityId(vmsnapshot.getId());
|
||||
setEntityUuid(vmsnapshot.getUuid());
|
||||
} else {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create vm snapshot");
|
||||
}
|
||||
|
||||
@ -210,6 +210,14 @@ public class NicResponse extends BaseResponse {
|
||||
this.extraDhcpOptions = extraDhcpOptions;
|
||||
}
|
||||
|
||||
@SerializedName(ApiConstants.VPC_ID)
|
||||
@Param(description = "Id of the vpc to which the nic belongs")
|
||||
private String vpcId;
|
||||
|
||||
@SerializedName(ApiConstants.VPC_NAME)
|
||||
@Param(description = "name of the vpc to which the nic belongs")
|
||||
private String vpcName;
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 31;
|
||||
@ -364,4 +372,20 @@ public class NicResponse extends BaseResponse {
|
||||
public void setIpAddresses(List<String> ipAddresses) {
|
||||
this.ipAddresses = ipAddresses;
|
||||
}
|
||||
|
||||
public String getVpcId() {
|
||||
return vpcId;
|
||||
}
|
||||
|
||||
public void setVpcId(String vpcId) {
|
||||
this.vpcId = vpcId;
|
||||
}
|
||||
|
||||
public String getVpcName() {
|
||||
return vpcName;
|
||||
}
|
||||
|
||||
public void setVpcName(String vpcName) {
|
||||
this.vpcName = vpcName;
|
||||
}
|
||||
}
|
||||
|
||||
@ -33,6 +33,8 @@ import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
public class StorageUtil {
|
||||
@Inject private ClusterDao clusterDao;
|
||||
@Inject private HostDao hostDao;
|
||||
@ -40,6 +42,18 @@ public class StorageUtil {
|
||||
@Inject private VMInstanceDao vmInstanceDao;
|
||||
@Inject private VolumeDao volumeDao;
|
||||
|
||||
public static void traceLogStoragePools(List<StoragePool> poolList, Logger logger, String initialMessage) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder pooltable = new StringBuilder();
|
||||
pooltable.append(initialMessage);
|
||||
int i = 1;
|
||||
for (StoragePool pool : poolList) {
|
||||
pooltable.append("\nno ").append(i).append(": ").append(pool.getName()).append("/").append(pool.getUuid());
|
||||
}
|
||||
logger.trace(pooltable.toString());
|
||||
}
|
||||
}
|
||||
|
||||
private Long getClusterId(Long hostId) {
|
||||
if (hostId == null) {
|
||||
return null;
|
||||
|
||||
@ -37,6 +37,7 @@ import java.util.stream.Collectors;
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.storage.StorageUtil;
|
||||
import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd;
|
||||
import org.apache.cloudstack.api.command.admin.volume.MigrateVolumeCmdByAdmin;
|
||||
import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd;
|
||||
@ -161,6 +162,7 @@ import com.cloud.vm.dao.UserVmDao;
|
||||
import com.cloud.vm.dao.UserVmDetailsDao;
|
||||
|
||||
import org.apache.cloudstack.snapshot.SnapshotHelper;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrationService, Configurable {
|
||||
|
||||
@ -335,15 +337,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
|
||||
@Override
|
||||
public StoragePool findStoragePool(DiskProfile dskCh, DataCenter dc, Pod pod, Long clusterId, Long hostId, VirtualMachine vm, final Set<StoragePool> avoid) {
|
||||
Long podId = null;
|
||||
if (pod != null) {
|
||||
podId = pod.getId();
|
||||
} else if (clusterId != null) {
|
||||
Cluster cluster = _entityMgr.findById(Cluster.class, clusterId);
|
||||
if (cluster != null) {
|
||||
podId = cluster.getPodId();
|
||||
}
|
||||
}
|
||||
Long podId = retrievePod(pod, clusterId);
|
||||
|
||||
VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
|
||||
for (StoragePoolAllocator allocator : _storagePoolAllocators) {
|
||||
@ -356,8 +350,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
|
||||
final List<StoragePool> poolList = allocator.allocateToPool(dskCh, profile, plan, avoidList, StoragePoolAllocator.RETURN_UPTO_ALL);
|
||||
if (poolList != null && !poolList.isEmpty()) {
|
||||
StorageUtil.traceLogStoragePools(poolList, s_logger, "pools to choose from: ");
|
||||
// Check if the preferred storage pool can be used. If yes, use it.
|
||||
Optional<StoragePool> storagePool = getPreferredStoragePool(poolList, vm);
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace(String.format("we have a preferred pool: %b", storagePool.isPresent()));
|
||||
}
|
||||
|
||||
return (storagePool.isPresent()) ? (StoragePool) this.dataStoreMgr.getDataStore(storagePool.get().getId(), DataStoreRole.Primary) :
|
||||
(StoragePool)dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary);
|
||||
@ -366,7 +364,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
return null;
|
||||
}
|
||||
|
||||
public List<StoragePool> findStoragePoolsForVolumeWithNewDiskOffering(DiskProfile dskCh, DataCenter dc, Pod pod, Long clusterId, Long hostId, VirtualMachine vm, final Set<StoragePool> avoid) {
|
||||
@Nullable
|
||||
private Long retrievePod(Pod pod, Long clusterId) {
|
||||
Long podId = null;
|
||||
if (pod != null) {
|
||||
podId = pod.getId();
|
||||
@ -376,6 +375,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
podId = cluster.getPodId();
|
||||
}
|
||||
}
|
||||
return podId;
|
||||
}
|
||||
|
||||
public List<StoragePool> findStoragePoolsForVolumeWithNewDiskOffering(DiskProfile dskCh, DataCenter dc, Pod pod, Long clusterId, Long hostId, VirtualMachine vm, final Set<StoragePool> avoid) {
|
||||
Long podId = retrievePod(pod, clusterId);
|
||||
|
||||
VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
|
||||
List<StoragePool> suitablePools = new ArrayList<>();
|
||||
@ -398,15 +402,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
|
||||
@Override
|
||||
public StoragePool findChildDataStoreInDataStoreCluster(DataCenter dc, Pod pod, Long clusterId, Long hostId, VirtualMachine vm, Long datastoreClusterId) {
|
||||
Long podId = null;
|
||||
if (pod != null) {
|
||||
podId = pod.getId();
|
||||
} else if (clusterId != null) {
|
||||
Cluster cluster = _entityMgr.findById(Cluster.class, clusterId);
|
||||
if (cluster != null) {
|
||||
podId = cluster.getPodId();
|
||||
}
|
||||
}
|
||||
Long podId = retrievePod(pod, clusterId);
|
||||
List<StoragePoolVO> childDatastores = _storagePoolDao.listChildStoragePoolsInDatastoreCluster(datastoreClusterId);
|
||||
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
|
||||
|
||||
@ -1011,12 +1007,18 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
public VolumeInfo createVolumeOnPrimaryStorage(VirtualMachine vm, VolumeInfo volume, HypervisorType rootDiskHyperType, StoragePool storagePool) throws NoTransitionException {
|
||||
VirtualMachineTemplate rootDiskTmplt = _entityMgr.findById(VirtualMachineTemplate.class, vm.getTemplateId());
|
||||
DataCenter dcVO = _entityMgr.findById(DataCenter.class, vm.getDataCenterId());
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace(String.format("storage-pool %s/%s is associated with pod %d",storagePool.getName(), storagePool.getUuid(), storagePool.getPodId()));
|
||||
}
|
||||
Long podId = storagePool.getPodId() != null ? storagePool.getPodId() : vm.getPodIdToDeployIn();
|
||||
Pod pod = _entityMgr.findById(Pod.class, podId);
|
||||
|
||||
ServiceOffering svo = _entityMgr.findById(ServiceOffering.class, vm.getServiceOfferingId());
|
||||
DiskOffering diskVO = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId());
|
||||
Long clusterId = storagePool.getClusterId();
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace(String.format("storage-pool %s/%s is associated with cluster %d",storagePool.getName(), storagePool.getUuid(), clusterId));
|
||||
}
|
||||
|
||||
VolumeInfo vol = null;
|
||||
if (volume.getState() == Volume.State.Allocated) {
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
package org.apache.cloudstack.storage.allocator;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
@ -31,6 +32,7 @@ import com.cloud.storage.StoragePoolStatus;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.utils.Pair;
|
||||
@ -74,6 +76,11 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
||||
@Inject private StorageUtil storageUtil;
|
||||
@Inject private StoragePoolDetailsDao storagePoolDetailsDao;
|
||||
|
||||
/**
|
||||
* make sure shuffled lists of Pools are really shuffled
|
||||
*/
|
||||
private SecureRandom secureRandom = new SecureRandom();
|
||||
|
||||
@Override
|
||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||
super.configure(name, params);
|
||||
@ -169,25 +176,26 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
||||
|
||||
@Override
|
||||
public List<StoragePool> reorderPools(List<StoragePool> pools, VirtualMachineProfile vmProfile, DeploymentPlan plan, DiskProfile dskCh) {
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("reordering pools");
|
||||
}
|
||||
if (pools == null) {
|
||||
return null;
|
||||
}
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace(String.format("reordering %d pools", pools.size()));
|
||||
}
|
||||
Account account = null;
|
||||
if (vmProfile.getVirtualMachine() != null) {
|
||||
account = vmProfile.getOwner();
|
||||
}
|
||||
|
||||
if (allocationAlgorithm.equals("random") || allocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) {
|
||||
// Shuffle this so that we don't check the pools in the same order.
|
||||
Collections.shuffle(pools);
|
||||
} else if (allocationAlgorithm.equals("userdispersing")) {
|
||||
pools = reorderPoolsByNumberOfVolumes(plan, pools, account);
|
||||
} else if(allocationAlgorithm.equals("firstfitleastconsumed")){
|
||||
pools = reorderPoolsByCapacity(plan, pools);
|
||||
}
|
||||
pools = reorderStoragePoolsBasedOnAlgorithm(pools, plan, account);
|
||||
|
||||
if (vmProfile.getVirtualMachine() == null) {
|
||||
s_logger.trace("The VM is null, skipping pools reordering by disk provisioning type.");
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("The VM is null, skipping pools reordering by disk provisioning type.");
|
||||
}
|
||||
return pools;
|
||||
}
|
||||
|
||||
@ -199,6 +207,33 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
||||
return pools;
|
||||
}
|
||||
|
||||
List<StoragePool> reorderStoragePoolsBasedOnAlgorithm(List<StoragePool> pools, DeploymentPlan plan, Account account) {
|
||||
if (allocationAlgorithm.equals("random") || allocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) {
|
||||
reorderRandomPools(pools);
|
||||
} else if (StringUtils.equalsAny(allocationAlgorithm, "userdispersing", "firstfitleastconsumed")) {
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace(String.format("Using reordering algorithm [%s]", allocationAlgorithm));
|
||||
}
|
||||
|
||||
if (allocationAlgorithm.equals("userdispersing")) {
|
||||
pools = reorderPoolsByNumberOfVolumes(plan, pools, account);
|
||||
} else {
|
||||
pools = reorderPoolsByCapacity(plan, pools);
|
||||
}
|
||||
}
|
||||
return pools;
|
||||
}
|
||||
|
||||
void reorderRandomPools(List<StoragePool> pools) {
|
||||
StorageUtil.traceLogStoragePools(pools, s_logger, "pools to choose from: ");
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace(String.format("Shuffle this so that we don't check the pools in the same order. Algorithm == '%s' (or no account?)", allocationAlgorithm));
|
||||
}
|
||||
StorageUtil.traceLogStoragePools(pools, s_logger, "pools to shuffle: ");
|
||||
Collections.shuffle(pools, secureRandom);
|
||||
StorageUtil.traceLogStoragePools(pools, s_logger, "shuffled list of pools to choose from: ");
|
||||
}
|
||||
|
||||
private List<StoragePool> reorderPoolsByDiskProvisioningType(List<StoragePool> pools, DiskProfile diskProfile) {
|
||||
if (diskProfile != null && diskProfile.getProvisioningType() != null && !diskProfile.getProvisioningType().equals(Storage.ProvisioningType.THIN)) {
|
||||
List<StoragePool> reorderedPools = new ArrayList<>();
|
||||
|
||||
@ -0,0 +1,113 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.storage.allocator;
|
||||
|
||||
|
||||
import com.cloud.deploy.DeploymentPlan;
|
||||
import com.cloud.deploy.DeploymentPlanner;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class AbstractStoragePoolAllocatorTest {
|
||||
|
||||
AbstractStoragePoolAllocator allocator = Mockito.spy(MockStorapoolAllocater.class);
|
||||
|
||||
@Mock
|
||||
DeploymentPlan plan;
|
||||
|
||||
@Mock
|
||||
Account account;
|
||||
private List<StoragePool> pools;
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
pools = new ArrayList<>();
|
||||
for (int i = 0 ; i < 10 ; ++i) {
|
||||
pools.add(new StoragePoolVO(i, "pool-"+i, "pool-"+i+"-uuid", Storage.StoragePoolType.NetworkFilesystem,
|
||||
1, 1l, 10000000000l, 10000000000l, "10.10.10.10",
|
||||
1000, "/spool/share-" + i));
|
||||
}
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void reorderStoragePoolsBasedOnAlgorithm_random() {
|
||||
allocator.reorderStoragePoolsBasedOnAlgorithm(pools, plan, account);
|
||||
Mockito.verify(allocator, Mockito.times(0)).reorderPoolsByCapacity(plan, pools);
|
||||
Mockito.verify(allocator, Mockito.times(0)).reorderPoolsByNumberOfVolumes(plan, pools, account);
|
||||
Mockito.verify(allocator, Mockito.times(1)).reorderRandomPools(pools);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void reorderStoragePoolsBasedOnAlgorithm_userdispersing() {
|
||||
allocator.allocationAlgorithm = "userdispersing";
|
||||
Mockito.doReturn(pools).when(allocator).reorderPoolsByNumberOfVolumes(plan, pools, account);
|
||||
allocator.reorderStoragePoolsBasedOnAlgorithm(pools, plan, account);
|
||||
Mockito.verify(allocator, Mockito.times(0)).reorderPoolsByCapacity(plan, pools);
|
||||
Mockito.verify(allocator, Mockito.times(1)).reorderPoolsByNumberOfVolumes(plan, pools, account);
|
||||
Mockito.verify(allocator, Mockito.times(0)).reorderRandomPools(pools);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void reorderStoragePoolsBasedOnAlgorithm_firstfitleastconsumed() {
|
||||
allocator.allocationAlgorithm = "firstfitleastconsumed";
|
||||
Mockito.doReturn(pools).when(allocator).reorderPoolsByCapacity(plan, pools);
|
||||
allocator.reorderStoragePoolsBasedOnAlgorithm(pools, plan, account);
|
||||
Mockito.verify(allocator, Mockito.times(1)).reorderPoolsByCapacity(plan, pools);
|
||||
Mockito.verify(allocator, Mockito.times(0)).reorderPoolsByNumberOfVolumes(plan, pools, account);
|
||||
Mockito.verify(allocator, Mockito.times(0)).reorderRandomPools(pools);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void reorderRandomPools() {
|
||||
Set<Long> firstchoice = new HashSet<>();
|
||||
for (int i = 0; i <= 20; ++i) {
|
||||
allocator.reorderRandomPools(pools);
|
||||
firstchoice.add(pools.get(0).getId());
|
||||
}
|
||||
Assert.assertTrue(firstchoice.size() > 2);
|
||||
}
|
||||
}
|
||||
|
||||
class MockStorapoolAllocater extends AbstractStoragePoolAllocator {
|
||||
|
||||
@Override
|
||||
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, DeploymentPlanner.ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@ -37,6 +37,8 @@ import java.util.stream.Collectors;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.api.query.dao.UserVmJoinDao;
|
||||
import com.cloud.network.vpc.VpcVO;
|
||||
import org.apache.cloudstack.acl.ControlledEntity;
|
||||
import org.apache.cloudstack.acl.ControlledEntity.ACLType;
|
||||
import org.apache.cloudstack.affinity.AffinityGroup;
|
||||
@ -442,6 +444,8 @@ public class ApiResponseHelper implements ResponseGenerator {
|
||||
NetworkOfferingDao networkOfferingDao;
|
||||
@Inject
|
||||
Ipv6Service ipv6Service;
|
||||
@Inject
|
||||
UserVmJoinDao userVmJoinDao;
|
||||
|
||||
@Override
|
||||
public UserResponse createUserResponse(User user) {
|
||||
@ -4318,6 +4322,13 @@ public class ApiResponseHelper implements ResponseGenerator {
|
||||
response.setNsxLogicalSwitchPort(((NicVO)result).getNsxLogicalSwitchPortUuid());
|
||||
}
|
||||
}
|
||||
|
||||
UserVmJoinVO userVm = userVmJoinDao.findById(vm.getId());
|
||||
if (userVm != null && userVm.getVpcUuid() != null) {
|
||||
response.setVpcId(userVm.getVpcUuid());
|
||||
VpcVO vpc = _entityMgr.findByUuidIncludingRemoved(VpcVO.class, userVm.getVpcUuid());
|
||||
response.setVpcName(vpc.getName());
|
||||
}
|
||||
return response;
|
||||
}
|
||||
|
||||
|
||||
@ -28,6 +28,8 @@ import java.util.stream.Collectors;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.network.vpc.VpcVO;
|
||||
import com.cloud.network.vpc.dao.VpcDao;
|
||||
import com.cloud.storage.DiskOfferingVO;
|
||||
import org.apache.cloudstack.affinity.AffinityGroupResponse;
|
||||
import org.apache.cloudstack.annotation.AnnotationService;
|
||||
@ -88,6 +90,8 @@ public class UserVmJoinDaoImpl extends GenericDaoBaseWithTagInformation<UserVmJo
|
||||
@Inject
|
||||
private AnnotationDao annotationDao;
|
||||
@Inject
|
||||
private VpcDao vpcDao;
|
||||
@Inject
|
||||
UserStatisticsDao userStatsDao;
|
||||
|
||||
private final SearchBuilder<UserVmJoinVO> VmDetailSearch;
|
||||
@ -291,6 +295,12 @@ public class UserVmJoinDaoImpl extends GenericDaoBaseWithTagInformation<UserVmJo
|
||||
if (userVm.getGuestType() != null) {
|
||||
nicResponse.setType(userVm.getGuestType().toString());
|
||||
}
|
||||
|
||||
if (userVm.getVpcUuid() != null) {
|
||||
nicResponse.setVpcId(userVm.getVpcUuid());
|
||||
VpcVO vpc = vpcDao.findByUuidIncludingRemoved(userVm.getVpcUuid());
|
||||
nicResponse.setVpcName(vpc.getName());
|
||||
}
|
||||
nicResponse.setIsDefault(userVm.isDefaultNic());
|
||||
nicResponse.setDeviceId(String.valueOf(userVm.getNicDeviceId()));
|
||||
List<NicSecondaryIpVO> secondaryIps = ApiDBUtils.findNicSecondaryIps(userVm.getNicId());
|
||||
|
||||
@ -92,6 +92,7 @@ import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
|
||||
import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
|
||||
import org.apache.cloudstack.utils.identity.ManagementServerNode;
|
||||
import org.apache.cloudstack.utils.imagestore.ImageStoreUtil;
|
||||
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
|
||||
import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
@ -99,6 +100,8 @@ import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.lang3.builder.ToStringBuilder;
|
||||
import org.apache.commons.lang3.builder.ToStringStyle;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
@ -339,6 +342,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
private static final long GiB_TO_BYTES = 1024 * 1024 * 1024;
|
||||
|
||||
private static final String CUSTOM_DISK_OFFERING_UNIQUE_NAME = "Cloud.com-Custom";
|
||||
private static final List<Volume.State> validAttachStates = Arrays.asList(Volume.State.Allocated, Volume.State.Ready, Volume.State.Uploaded);
|
||||
|
||||
protected VolumeApiServiceImpl() {
|
||||
_volStateMachine = Volume.State.getStateMachine();
|
||||
@ -2082,6 +2086,16 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
}
|
||||
}
|
||||
}
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
String msg = "attaching volume %s/%s to a VM (%s/%s) with an existing volume %s/%s on primary storage %s";
|
||||
if (existingVolumeOfVm != null) {
|
||||
s_logger.trace(String.format(msg,
|
||||
volumeToAttach.getName(), volumeToAttach.getUuid(),
|
||||
vm.getName(), vm.getUuid(),
|
||||
existingVolumeOfVm.getName(), existingVolumeOfVm.getUuid(),
|
||||
existingVolumeOfVm.getPoolId()));
|
||||
}
|
||||
}
|
||||
|
||||
HypervisorType rootDiskHyperType = vm.getHypervisorType();
|
||||
HypervisorType volumeToAttachHyperType = _volsDao.getHypervisorType(volumeToAttach.getId());
|
||||
@ -2092,6 +2106,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
StoragePoolVO destPrimaryStorage = null;
|
||||
if (existingVolumeOfVm != null && !existingVolumeOfVm.getState().equals(Volume.State.Allocated)) {
|
||||
destPrimaryStorage = _storagePoolDao.findById(existingVolumeOfVm.getPoolId());
|
||||
if (s_logger.isTraceEnabled() && destPrimaryStorage != null) {
|
||||
s_logger.trace(String.format("decided on target storage: %s/%s", destPrimaryStorage.getName(), destPrimaryStorage.getUuid()));
|
||||
}
|
||||
}
|
||||
|
||||
boolean volumeOnSecondary = volumeToAttach.getState() == Volume.State.Uploaded;
|
||||
@ -2111,6 +2128,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
// reload the volume from db
|
||||
newVolumeOnPrimaryStorage = volFactory.getVolume(newVolumeOnPrimaryStorage.getId());
|
||||
boolean moveVolumeNeeded = needMoveVolume(existingVolumeOfVm, newVolumeOnPrimaryStorage);
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace(String.format("is this a new volume: %s == %s ?", volumeToAttach, newVolumeOnPrimaryStorage));
|
||||
s_logger.trace(String.format("is it needed to move the volume: %b?", moveVolumeNeeded));
|
||||
}
|
||||
|
||||
if (moveVolumeNeeded) {
|
||||
PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)newVolumeOnPrimaryStorage.getDataStore();
|
||||
@ -2146,24 +2167,184 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
public Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId) {
|
||||
Account caller = CallContext.current().getCallingAccount();
|
||||
|
||||
// Check that the volume ID is valid
|
||||
VolumeInfo volumeToAttach = volFactory.getVolume(volumeId);
|
||||
// Check that the volume is a data volume
|
||||
if (volumeToAttach == null || !(volumeToAttach.getVolumeType() == Volume.Type.DATADISK || volumeToAttach.getVolumeType() == Volume.Type.ROOT)) {
|
||||
throw new InvalidParameterValueException("Please specify a volume with the valid type: " + Volume.Type.ROOT.toString() + " or " + Volume.Type.DATADISK.toString());
|
||||
VolumeInfo volumeToAttach = getAndCheckVolumeInfo(volumeId);
|
||||
|
||||
UserVmVO vm = getAndCheckUserVmVO(vmId, volumeToAttach);
|
||||
|
||||
checkDeviceId(deviceId, volumeToAttach, vm);
|
||||
|
||||
checkNumberOfAttachedVolumes(deviceId, vm);
|
||||
|
||||
excludeLocalStorageIfNeeded(volumeToAttach);
|
||||
|
||||
checkForDevicesInCopies(vmId, vm);
|
||||
|
||||
checkRightsToAttach(caller, volumeToAttach, vm);
|
||||
|
||||
HypervisorType rootDiskHyperType = vm.getHypervisorType();
|
||||
HypervisorType volumeToAttachHyperType = _volsDao.getHypervisorType(volumeToAttach.getId());
|
||||
|
||||
StoragePoolVO volumeToAttachStoragePool = _storagePoolDao.findById(volumeToAttach.getPoolId());
|
||||
if (s_logger.isTraceEnabled() && volumeToAttachStoragePool != null) {
|
||||
s_logger.trace(String.format("volume to attach (%s/%s) has a primary storage assigned to begin with (%s/%s)",
|
||||
volumeToAttach.getName(), volumeToAttach.getUuid(), volumeToAttachStoragePool.getName(), volumeToAttachStoragePool.getUuid()));
|
||||
}
|
||||
|
||||
// Check that the volume is not currently attached to any VM
|
||||
if (volumeToAttach.getInstanceId() != null) {
|
||||
throw new InvalidParameterValueException("Please specify a volume that is not attached to any VM.");
|
||||
checkForMatchingHypervisorTypesIf(volumeToAttachStoragePool != null && !volumeToAttachStoragePool.isManaged(), rootDiskHyperType, volumeToAttachHyperType);
|
||||
|
||||
AsyncJobExecutionContext asyncExecutionContext = AsyncJobExecutionContext.getCurrentExecutionContext();
|
||||
|
||||
AsyncJob job = asyncExecutionContext.getJob();
|
||||
|
||||
if (s_logger.isInfoEnabled()) {
|
||||
s_logger.info(String.format("Trying to attach volume [%s/%s] to VM instance [%s/%s], update async job-%s progress status",
|
||||
volumeToAttach.getName(),
|
||||
volumeToAttach.getUuid(),
|
||||
vm.getName(),
|
||||
vm.getUuid(),
|
||||
job.getId()));
|
||||
}
|
||||
|
||||
// Check that the volume is not destroyed
|
||||
if (volumeToAttach.getState() == Volume.State.Destroy) {
|
||||
throw new InvalidParameterValueException("Please specify a volume that is not destroyed.");
|
||||
_jobMgr.updateAsyncJobAttachment(job.getId(), "Volume", volumeId);
|
||||
|
||||
if (asyncExecutionContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
|
||||
return safelyOrchestrateAttachVolume(vmId, volumeId, deviceId);
|
||||
} else {
|
||||
return getVolumeAttachJobResult(vmId, volumeId, deviceId);
|
||||
}
|
||||
}
|
||||
|
||||
@Nullable private Volume getVolumeAttachJobResult(Long vmId, Long volumeId, Long deviceId) {
|
||||
Outcome<Volume> outcome = attachVolumeToVmThroughJobQueue(vmId, volumeId, deviceId);
|
||||
|
||||
Volume vol = null;
|
||||
try {
|
||||
outcome.get();
|
||||
} catch (InterruptedException | ExecutionException e) {
|
||||
throw new CloudRuntimeException(String.format("Could not get attach volume job result for VM [%s], volume[%s] and device [%s], due to [%s].", vmId, volumeId, deviceId, e.getMessage()), e);
|
||||
}
|
||||
|
||||
// Check that the virtual machine ID is valid and it's a user vm
|
||||
Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob());
|
||||
if (jobResult != null) {
|
||||
if (jobResult instanceof ConcurrentOperationException) {
|
||||
throw (ConcurrentOperationException)jobResult;
|
||||
} else if (jobResult instanceof InvalidParameterValueException) {
|
||||
throw (InvalidParameterValueException)jobResult;
|
||||
} else if (jobResult instanceof RuntimeException) {
|
||||
throw (RuntimeException)jobResult;
|
||||
} else if (jobResult instanceof Throwable) {
|
||||
throw new RuntimeException("Unexpected exception", (Throwable)jobResult);
|
||||
} else if (jobResult instanceof Long) {
|
||||
vol = _volsDao.findById((Long)jobResult);
|
||||
}
|
||||
}
|
||||
return vol;
|
||||
}
|
||||
|
||||
private Volume safelyOrchestrateAttachVolume(Long vmId, Long volumeId, Long deviceId) {
|
||||
// avoid re-entrance
|
||||
|
||||
VmWorkJobVO placeHolder = null;
|
||||
placeHolder = createPlaceHolderWork(vmId);
|
||||
try {
|
||||
return orchestrateAttachVolumeToVM(vmId, volumeId, deviceId);
|
||||
} finally {
|
||||
_workJobDao.expunge(placeHolder.getId());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* managed storage can be used for different types of hypervisors
|
||||
* only perform this check if the volume's storage pool is not null and not managed
|
||||
*/
|
||||
private void checkForMatchingHypervisorTypesIf(boolean checkNeeded, HypervisorType rootDiskHyperType, HypervisorType volumeToAttachHyperType) {
|
||||
if (checkNeeded && volumeToAttachHyperType != HypervisorType.None && rootDiskHyperType != volumeToAttachHyperType) {
|
||||
throw new InvalidParameterValueException("Can't attach a volume created by: " + volumeToAttachHyperType + " to a " + rootDiskHyperType + " vm");
|
||||
}
|
||||
}
|
||||
|
||||
private void checkRightsToAttach(Account caller, VolumeInfo volumeToAttach, UserVmVO vm) {
|
||||
_accountMgr.checkAccess(caller, null, true, volumeToAttach, vm);
|
||||
|
||||
Account owner = _accountDao.findById(volumeToAttach.getAccountId());
|
||||
|
||||
if (!Arrays.asList(Volume.State.Allocated, Volume.State.Ready).contains(volumeToAttach.getState())) {
|
||||
try {
|
||||
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.primary_storage, volumeToAttach.getSize());
|
||||
} catch (ResourceAllocationException e) {
|
||||
s_logger.error("primary storage resource limit check failed", e);
|
||||
throw new InvalidParameterValueException(e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void checkForDevicesInCopies(Long vmId, UserVmVO vm) {
|
||||
// if target VM has associated VM snapshots
|
||||
List<VMSnapshotVO> vmSnapshots = _vmSnapshotDao.findByVm(vmId);
|
||||
if (vmSnapshots.size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format("Unable to attach volume to VM %s/%s, please specify a VM that does not have VM snapshots", vm.getName(), vm.getUuid()));
|
||||
}
|
||||
|
||||
// if target VM has backups
|
||||
if (vm.getBackupOfferingId() != null || vm.getBackupVolumeList().size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format("Unable to attach volume to VM %s/%s, please specify a VM that does not have any backups", vm.getName(), vm.getUuid()));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If local storage is disabled then attaching a volume with a local diskoffering is not allowed
|
||||
*/
|
||||
private void excludeLocalStorageIfNeeded(VolumeInfo volumeToAttach) {
|
||||
DataCenterVO dataCenter = _dcDao.findById(volumeToAttach.getDataCenterId());
|
||||
if (!dataCenter.isLocalStorageEnabled()) {
|
||||
DiskOfferingVO diskOffering = _diskOfferingDao.findById(volumeToAttach.getDiskOfferingId());
|
||||
if (diskOffering.isUseLocalStorage()) {
|
||||
throw new InvalidParameterValueException("Zone is not configured to use local storage but volume's disk offering " + diskOffering.getName() + " uses it");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that the number of data volumes attached to VM is less than the number that are supported by the hypervisor
|
||||
*/
|
||||
private void checkNumberOfAttachedVolumes(Long deviceId, UserVmVO vm) {
|
||||
if (deviceId == null || deviceId.longValue() != 0) {
|
||||
List<VolumeVO> existingDataVolumes = _volsDao.findByInstanceAndType(vm.getId(), Volume.Type.DATADISK);
|
||||
int maxAttachableDataVolumesSupported = getMaxDataVolumesSupported(vm);
|
||||
if (existingDataVolumes.size() >= maxAttachableDataVolumesSupported) {
|
||||
throw new InvalidParameterValueException(
|
||||
"The specified VM already has the maximum number of data disks (" + maxAttachableDataVolumesSupported + ") attached. Please specify another VM.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* validate ROOT volume type;
|
||||
* 1. vm shouldn't have any volume with deviceId 0
|
||||
* 2. volume can't be in Uploaded state
|
||||
*
|
||||
* @param deviceId requested device number to attach as
|
||||
* @param volumeToAttach
|
||||
* @param vm
|
||||
*/
|
||||
private void checkDeviceId(Long deviceId, VolumeInfo volumeToAttach, UserVmVO vm) {
|
||||
if (deviceId != null && deviceId.longValue() == 0) {
|
||||
validateRootVolumeDetachAttach(_volsDao.findById(volumeToAttach.getId()), vm);
|
||||
if (!_volsDao.findByInstanceAndDeviceId(vm.getId(), 0).isEmpty()) {
|
||||
throw new InvalidParameterValueException("Vm already has root volume attached to it");
|
||||
}
|
||||
if (volumeToAttach.getState() == Volume.State.Uploaded) {
|
||||
throw new InvalidParameterValueException("No support for Root volume attach in state " + Volume.State.Uploaded);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that the virtual machine ID is valid and it's a user vm
|
||||
*
|
||||
* @return the user vm vo object correcponding to the vmId to attach to
|
||||
*/
|
||||
@NotNull private UserVmVO getAndCheckUserVmVO(Long vmId, VolumeInfo volumeToAttach) {
|
||||
UserVmVO vm = _userVmDao.findById(vmId);
|
||||
if (vm == null || vm.getType() != VirtualMachine.Type.User) {
|
||||
throw new InvalidParameterValueException("Please specify a valid User VM.");
|
||||
@ -2178,138 +2359,36 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
if (vm.getDataCenterId() != volumeToAttach.getDataCenterId()) {
|
||||
throw new InvalidParameterValueException("Please specify a VM that is in the same zone as the volume.");
|
||||
}
|
||||
return vm;
|
||||
}
|
||||
|
||||
// Check that the device ID is valid
|
||||
if (deviceId != null) {
|
||||
// validate ROOT volume type
|
||||
if (deviceId.longValue() == 0) {
|
||||
validateRootVolumeDetachAttach(_volsDao.findById(volumeToAttach.getId()), vm);
|
||||
// vm shouldn't have any volume with deviceId 0
|
||||
if (!_volsDao.findByInstanceAndDeviceId(vm.getId(), 0).isEmpty()) {
|
||||
throw new InvalidParameterValueException("Vm already has root volume attached to it");
|
||||
}
|
||||
// volume can't be in Uploaded state
|
||||
if (volumeToAttach.getState() == Volume.State.Uploaded) {
|
||||
throw new InvalidParameterValueException("No support for Root volume attach in state " + Volume.State.Uploaded);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Check that the volume ID is valid
|
||||
* Check that the volume is a data volume
|
||||
* Check that the volume is not currently attached to any VM
|
||||
* Check that the volume is not destroyed
|
||||
*
|
||||
* @param volumeId the id of the volume to attach
|
||||
* @return the volume info object representing the volume to attach
|
||||
*/
|
||||
@NotNull private VolumeInfo getAndCheckVolumeInfo(Long volumeId) {
|
||||
VolumeInfo volumeToAttach = volFactory.getVolume(volumeId);
|
||||
if (volumeToAttach == null || !(volumeToAttach.getVolumeType() == Volume.Type.DATADISK || volumeToAttach.getVolumeType() == Volume.Type.ROOT)) {
|
||||
throw new InvalidParameterValueException("Please specify a volume with the valid type: " + Volume.Type.ROOT.toString() + " or " + Volume.Type.DATADISK.toString());
|
||||
}
|
||||
|
||||
// Check that the number of data volumes attached to VM is less than
|
||||
// that supported by hypervisor
|
||||
if (deviceId == null || deviceId.longValue() != 0) {
|
||||
List<VolumeVO> existingDataVolumes = _volsDao.findByInstanceAndType(vmId, Volume.Type.DATADISK);
|
||||
int maxAttachableDataVolumesSupported = getMaxDataVolumesSupported(vm);
|
||||
if (existingDataVolumes.size() >= maxAttachableDataVolumesSupported) {
|
||||
throw new InvalidParameterValueException(
|
||||
"The specified VM already has the maximum number of data disks (" + maxAttachableDataVolumesSupported + ") attached. Please specify another VM.");
|
||||
}
|
||||
if (volumeToAttach.getInstanceId() != null) {
|
||||
throw new InvalidParameterValueException("Please specify a volume that is not attached to any VM.");
|
||||
}
|
||||
|
||||
// If local storage is disabled then attaching a volume with local disk
|
||||
// offering not allowed
|
||||
DataCenterVO dataCenter = _dcDao.findById(volumeToAttach.getDataCenterId());
|
||||
if (!dataCenter.isLocalStorageEnabled()) {
|
||||
DiskOfferingVO diskOffering = _diskOfferingDao.findById(volumeToAttach.getDiskOfferingId());
|
||||
if (diskOffering.isUseLocalStorage()) {
|
||||
throw new InvalidParameterValueException("Zone is not configured to use local storage but volume's disk offering " + diskOffering.getName() + " uses it");
|
||||
}
|
||||
if (volumeToAttach.getState() == Volume.State.Destroy) {
|
||||
throw new InvalidParameterValueException("Please specify a volume that is not destroyed.");
|
||||
}
|
||||
|
||||
// if target VM has associated VM snapshots
|
||||
List<VMSnapshotVO> vmSnapshots = _vmSnapshotDao.findByVm(vmId);
|
||||
if (vmSnapshots.size() > 0) {
|
||||
throw new InvalidParameterValueException("Unable to attach volume, please specify a VM that does not have VM snapshots");
|
||||
}
|
||||
|
||||
// if target VM has backups
|
||||
if (vm.getBackupOfferingId() != null || vm.getBackupVolumeList().size() > 0) {
|
||||
throw new InvalidParameterValueException("Unable to attach volume, please specify a VM that does not have any backups");
|
||||
}
|
||||
|
||||
// permission check
|
||||
_accountMgr.checkAccess(caller, null, true, volumeToAttach, vm);
|
||||
|
||||
if (!(Volume.State.Allocated.equals(volumeToAttach.getState()) || Volume.State.Ready.equals(volumeToAttach.getState()) || Volume.State.Uploaded.equals(volumeToAttach.getState()))) {
|
||||
if (!validAttachStates.contains(volumeToAttach.getState())) {
|
||||
throw new InvalidParameterValueException("Volume state must be in Allocated, Ready or in Uploaded state");
|
||||
}
|
||||
|
||||
Account owner = _accountDao.findById(volumeToAttach.getAccountId());
|
||||
|
||||
if (!(volumeToAttach.getState() == Volume.State.Allocated || volumeToAttach.getState() == Volume.State.Ready)) {
|
||||
try {
|
||||
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.primary_storage, volumeToAttach.getSize());
|
||||
} catch (ResourceAllocationException e) {
|
||||
s_logger.error("primary storage resource limit check failed", e);
|
||||
throw new InvalidParameterValueException(e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
HypervisorType rootDiskHyperType = vm.getHypervisorType();
|
||||
HypervisorType volumeToAttachHyperType = _volsDao.getHypervisorType(volumeToAttach.getId());
|
||||
|
||||
StoragePoolVO volumeToAttachStoragePool = _storagePoolDao.findById(volumeToAttach.getPoolId());
|
||||
|
||||
// managed storage can be used for different types of hypervisors
|
||||
// only perform this check if the volume's storage pool is not null and not managed
|
||||
if (volumeToAttachStoragePool != null && !volumeToAttachStoragePool.isManaged()) {
|
||||
if (volumeToAttachHyperType != HypervisorType.None && rootDiskHyperType != volumeToAttachHyperType) {
|
||||
throw new InvalidParameterValueException("Can't attach a volume created by: " + volumeToAttachHyperType + " to a " + rootDiskHyperType + " vm");
|
||||
}
|
||||
}
|
||||
|
||||
AsyncJobExecutionContext asyncExecutionContext = AsyncJobExecutionContext.getCurrentExecutionContext();
|
||||
|
||||
if (asyncExecutionContext != null) {
|
||||
AsyncJob job = asyncExecutionContext.getJob();
|
||||
|
||||
if (s_logger.isInfoEnabled()) {
|
||||
s_logger.info("Trying to attaching volume " + volumeId + " to vm instance:" + vm.getId() + ", update async job-" + job.getId() + " progress status");
|
||||
}
|
||||
|
||||
_jobMgr.updateAsyncJobAttachment(job.getId(), "Volume", volumeId);
|
||||
}
|
||||
|
||||
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
|
||||
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
|
||||
// avoid re-entrance
|
||||
|
||||
VmWorkJobVO placeHolder = null;
|
||||
placeHolder = createPlaceHolderWork(vmId);
|
||||
try {
|
||||
return orchestrateAttachVolumeToVM(vmId, volumeId, deviceId);
|
||||
} finally {
|
||||
_workJobDao.expunge(placeHolder.getId());
|
||||
}
|
||||
|
||||
} else {
|
||||
Outcome<Volume> outcome = attachVolumeToVmThroughJobQueue(vmId, volumeId, deviceId);
|
||||
|
||||
Volume vol = null;
|
||||
try {
|
||||
outcome.get();
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException("Operation is interrupted", e);
|
||||
} catch (java.util.concurrent.ExecutionException e) {
|
||||
throw new RuntimeException("Execution excetion", e);
|
||||
}
|
||||
|
||||
Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob());
|
||||
if (jobResult != null) {
|
||||
if (jobResult instanceof ConcurrentOperationException) {
|
||||
throw (ConcurrentOperationException)jobResult;
|
||||
} else if (jobResult instanceof InvalidParameterValueException) {
|
||||
throw (InvalidParameterValueException)jobResult;
|
||||
} else if (jobResult instanceof RuntimeException) {
|
||||
throw (RuntimeException)jobResult;
|
||||
} else if (jobResult instanceof Throwable) {
|
||||
throw new RuntimeException("Unexpected exception", (Throwable)jobResult);
|
||||
} else if (jobResult instanceof Long) {
|
||||
vol = _volsDao.findById((Long)jobResult);
|
||||
}
|
||||
}
|
||||
return vol;
|
||||
}
|
||||
return volumeToAttach;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -2497,7 +2576,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
AsyncJob job = asyncExecutionContext.getJob();
|
||||
|
||||
if (s_logger.isInfoEnabled()) {
|
||||
s_logger.info("Trying to attaching volume " + volumeId + "to vm instance:" + vm.getId() + ", update async job-" + job.getId() + " progress status");
|
||||
s_logger.info(String.format("Trying to attach volume %s to VM instance %s, update async job-%s progress status",
|
||||
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volume, "name", "uuid"),
|
||||
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "name", "uuid"),
|
||||
job.getId()));
|
||||
}
|
||||
|
||||
_jobMgr.updateAsyncJobAttachment(job.getId(), "Volume", volumeId);
|
||||
@ -3757,6 +3839,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
boolean sendCommand = vm.getState() == State.Running;
|
||||
AttachAnswer answer = null;
|
||||
StoragePoolVO volumeToAttachStoragePool = _storagePoolDao.findById(volumeToAttach.getPoolId());
|
||||
if (s_logger.isTraceEnabled() && volumeToAttachStoragePool != null) {
|
||||
s_logger.trace(String.format("storage is gotten from volume to attach: %s/%s",volumeToAttachStoragePool.getName(),volumeToAttachStoragePool.getUuid()));
|
||||
}
|
||||
HostVO host = getHostForVmVolumeAttach(vm, volumeToAttachStoragePool);
|
||||
Long hostId = host == null ? null : host.getId();
|
||||
if (host != null && host.getHypervisorType() == HypervisorType.VMware) {
|
||||
|
||||
@ -276,8 +276,6 @@ public class VolumeApiServiceImplTest {
|
||||
|
||||
// managed root volume
|
||||
VolumeInfo managedVolume = Mockito.mock(VolumeInfo.class);
|
||||
when(managedVolume.getId()).thenReturn(7L);
|
||||
when(managedVolume.getDataCenterId()).thenReturn(1L);
|
||||
when(managedVolume.getVolumeType()).thenReturn(Volume.Type.ROOT);
|
||||
when(managedVolume.getInstanceId()).thenReturn(null);
|
||||
lenient().when(managedVolume.getPoolId()).thenReturn(2L);
|
||||
@ -286,7 +284,6 @@ public class VolumeApiServiceImplTest {
|
||||
VolumeVO managedVolume1 = new VolumeVO("root", 1L, 1L, 1L, 1L, 2L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT);
|
||||
managedVolume1.setPoolId(2L);
|
||||
managedVolume1.setDataCenterId(1L);
|
||||
when(volumeDaoMock.findById(7L)).thenReturn(managedVolume1);
|
||||
|
||||
// vm having root volume
|
||||
UserVmVO vmHavingRootVolume = new UserVmVO(4L, "vm", "vm", 1, HypervisorType.XenServer, 1L, false, false, 1L, 1L, 1, 1L, null, "vm");
|
||||
|
||||
@ -19,7 +19,7 @@ from nose.plugins.attrib import attr
|
||||
from component.test_vpc_vm_life_cycle import Services
|
||||
|
||||
|
||||
class TestVMLifeCycleSharedNwVPC(cloudstackTesTODOtCase):
|
||||
class TestVMLifeCycleSharedNwVPC(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
|
||||
@ -17,22 +17,19 @@
|
||||
""" BVT tests for Hosts Maintenance
|
||||
"""
|
||||
|
||||
# Import Local Modules
|
||||
from marvin.codes import FAILED
|
||||
from marvin.cloudstackTestCase import *
|
||||
from marvin.cloudstackAPI import *
|
||||
from marvin.lib.utils import *
|
||||
from marvin.lib.base import *
|
||||
from marvin.lib.common import *
|
||||
from nose.plugins.attrib import attr
|
||||
|
||||
from time import sleep
|
||||
|
||||
_multiprocess_shared_ = False
|
||||
|
||||
|
||||
class TestHostHA(cloudstackTestCase):
|
||||
|
||||
hostCountMsg = "Host HA can be tested with at least two hosts, only %s found"
|
||||
|
||||
def setUp(self):
|
||||
self.logger = logging.getLogger('TestHM')
|
||||
self.stream_handler = logging.StreamHandler()
|
||||
@ -86,17 +83,8 @@ class TestHostHA(cloudstackTestCase):
|
||||
"timeout": 10,
|
||||
}
|
||||
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
# Clean up, terminate the created templates
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
|
||||
return
|
||||
|
||||
super(TestHostHA, self).tearDown()
|
||||
|
||||
def createVMs(self, hostId, number, local):
|
||||
|
||||
@ -319,11 +307,9 @@ class TestHostHA(cloudstackTestCase):
|
||||
for host in listHost:
|
||||
self.logger.debug('Hypervisor = {}'.format(host.id))
|
||||
|
||||
|
||||
if len(listHost) != 2:
|
||||
self.logger.debug("Host HA can be tested with two host only %s, found" % len(listHost))
|
||||
raise unittest.SkipTest("Host HA can be tested with two host only %s, found" % len(listHost))
|
||||
|
||||
if len(listHost) < 2:
|
||||
self.logger.debug(self.hostCountMsg % len(listHost))
|
||||
raise unittest.SkipTest(self.hostCountMsg % len(listHost))
|
||||
|
||||
no_of_vms = self.noOfVMsOnHost(listHost[0].id)
|
||||
|
||||
@ -331,7 +317,6 @@ class TestHostHA(cloudstackTestCase):
|
||||
|
||||
self.logger.debug("Number of VMS on hosts = %s" % no_of_vms)
|
||||
|
||||
|
||||
if no_of_vms < 5:
|
||||
self.logger.debug("test_01: Create VMs as there are not enough vms to check host ha")
|
||||
no_vm_req = 5 - no_of_vms
|
||||
@ -396,10 +381,9 @@ class TestHostHA(cloudstackTestCase):
|
||||
for host in listHost:
|
||||
self.logger.debug('Hypervisor = {}'.format(host.id))
|
||||
|
||||
|
||||
if len(listHost) != 2:
|
||||
self.logger.debug("Host HA can be tested with two host only %s, found" % len(listHost))
|
||||
raise unittest.SkipTest("Host HA can be tested with two host only %s, found" % len(listHost))
|
||||
if len(listHost) < 2:
|
||||
self.logger.debug(self.hostCountMsg % len(listHost))
|
||||
raise unittest.SkipTest(self.hostCountMsg % len(listHost))
|
||||
|
||||
no_of_vms = self.noOfVMsOnHost(listHost[0].id)
|
||||
|
||||
@ -473,10 +457,9 @@ class TestHostHA(cloudstackTestCase):
|
||||
for host in listHost:
|
||||
self.logger.debug('Hypervisor = {}'.format(host.id))
|
||||
|
||||
|
||||
if len(listHost) != 2:
|
||||
self.logger.debug("Host HA can be tested with two host only %s, found" % len(listHost))
|
||||
raise unittest.SkipTest("Host HA can be tested with two host only %s, found" % len(listHost))
|
||||
if len(listHost) < 2:
|
||||
self.logger.debug(self.hostCountMsg % len(listHost))
|
||||
raise unittest.SkipTest(self.hostCountMsg % len(listHost))
|
||||
|
||||
no_of_vms = self.noOfVMsOnHost(listHost[0].id)
|
||||
|
||||
@ -548,10 +531,9 @@ class TestHostHA(cloudstackTestCase):
|
||||
for host in listHost:
|
||||
self.logger.debug('Hypervisor = {}'.format(host.id))
|
||||
|
||||
|
||||
if len(listHost) != 2:
|
||||
self.logger.debug("Host HA can be tested with two host only %s, found" % len(listHost))
|
||||
raise unittest.SkipTest("Host HA can be tested with two host only %s, found" % len(listHost))
|
||||
if len(listHost) < 2:
|
||||
self.logger.debug(self.hostCountMsg % len(listHost))
|
||||
raise unittest.SkipTest(self.hostCountMsg % len(listHost))
|
||||
|
||||
no_of_vms = self.noOfVMsOnHost(listHost[0].id)
|
||||
|
||||
@ -559,7 +541,6 @@ class TestHostHA(cloudstackTestCase):
|
||||
|
||||
self.logger.debug("Number of VMS on hosts = %s" % no_of_vms)
|
||||
|
||||
|
||||
if no_of_vms < 5:
|
||||
self.logger.debug("test_01: Create VMs as there are not enough vms to check host ha")
|
||||
no_vm_req = 5 - no_of_vms
|
||||
|
||||
@ -773,7 +773,7 @@ class TestResizeVolume(cloudstackTestCase):
|
||||
return
|
||||
|
||||
@attr(tags=["advanced"], required_hardware="true")
|
||||
def test_5_vmdeployment_with_size(self):
|
||||
def test_05_vmdeployment_with_size(self):
|
||||
"""Test vm deployment with new rootdisk size parameter
|
||||
|
||||
# Validate the following
|
||||
@ -855,7 +855,7 @@ class TestResizeVolume(cloudstackTestCase):
|
||||
|
||||
|
||||
@attr(tags=["advanced"], required_hardware="true")
|
||||
def test_6_resized_rootvolume_with_lessvalue(self):
|
||||
def test_06_resized_rootvolume_with_lessvalue(self):
|
||||
"""Test resize root volume with less than original volume size
|
||||
|
||||
# Validate the following
|
||||
@ -939,7 +939,7 @@ class TestResizeVolume(cloudstackTestCase):
|
||||
|
||||
# @attr(tags=["advanced"], required_hrdware="true")
|
||||
@attr(tags=["TODO"], required_hrdware="true")
|
||||
def test_7_usage_events_after_rootvolume_resized_(self):
|
||||
def test_07_usage_events_after_rootvolume_resized_(self):
|
||||
"""Test check usage events after root volume resize
|
||||
|
||||
# Validate the following
|
||||
|
||||
@ -18,11 +18,11 @@
|
||||
#Import Local Modules
|
||||
from marvin.cloudstackAPI import *
|
||||
from marvin.cloudstackTestCase import cloudstackTestCase
|
||||
import unittest
|
||||
from marvin.lib.utils import (cleanup_resources,
|
||||
validateList)
|
||||
from marvin.lib.utils import (validateList)
|
||||
from marvin.lib.base import (ServiceOffering,
|
||||
VirtualMachine,
|
||||
Host,
|
||||
StoragePool,
|
||||
Account,
|
||||
Volume,
|
||||
DiskOffering,
|
||||
@ -67,6 +67,7 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
|
||||
cls.apiclient,
|
||||
cls.services["disk_offering"]
|
||||
)
|
||||
cls._cleanup.append(cls.disk_offering)
|
||||
|
||||
template = get_template(
|
||||
cls.apiclient,
|
||||
@ -87,10 +88,14 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
|
||||
cls.services["account"],
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
cls._cleanup.append(cls.account)
|
||||
|
||||
cls.service_offering = ServiceOffering.create(
|
||||
cls.apiclient,
|
||||
cls.services["service_offering"]
|
||||
)
|
||||
cls._cleanup.append(cls.service_offering)
|
||||
|
||||
cls.virtual_machine = VirtualMachine.create(
|
||||
cls.apiclient,
|
||||
cls.services,
|
||||
@ -99,6 +104,7 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
|
||||
serviceofferingid=cls.service_offering.id,
|
||||
mode=cls.services["mode"]
|
||||
)
|
||||
cls._cleanup.append(cls.virtual_machine)
|
||||
|
||||
#Create volumes (data disks)
|
||||
cls.volume1 = Volume.create(
|
||||
@ -107,6 +113,7 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
|
||||
account=cls.account.name,
|
||||
domainid=cls.account.domainid
|
||||
)
|
||||
cls._cleanup.append(cls.volume1)
|
||||
|
||||
cls.volume2 = Volume.create(
|
||||
cls.apiclient,
|
||||
@ -114,6 +121,7 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
|
||||
account=cls.account.name,
|
||||
domainid=cls.account.domainid
|
||||
)
|
||||
cls._cleanup.append(cls.volume2)
|
||||
|
||||
cls.volume3 = Volume.create(
|
||||
cls.apiclient,
|
||||
@ -121,6 +129,7 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
|
||||
account=cls.account.name,
|
||||
domainid=cls.account.domainid
|
||||
)
|
||||
cls._cleanup.append(cls.volume3)
|
||||
|
||||
cls.volume4 = Volume.create(
|
||||
cls.apiclient,
|
||||
@ -128,18 +137,27 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
|
||||
account=cls.account.name,
|
||||
domainid=cls.account.domainid
|
||||
)
|
||||
cls._cleanup = [
|
||||
cls.service_offering,
|
||||
cls.disk_offering,
|
||||
cls.account
|
||||
]
|
||||
cls._cleanup.append(cls.volume4)
|
||||
|
||||
cls.volume5 = Volume.create(
|
||||
cls.apiclient,
|
||||
cls.services,
|
||||
account=cls.account.name,
|
||||
domainid=cls.account.domainid
|
||||
)
|
||||
cls._cleanup.append(cls.volume5)
|
||||
|
||||
cls.volume6 = Volume.create(
|
||||
cls.apiclient,
|
||||
cls.services,
|
||||
account=cls.account.name,
|
||||
domainid=cls.account.domainid
|
||||
)
|
||||
cls._cleanup.append(cls.volume6)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
cleanup_resources(cls.apiclient, cls._cleanup)
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
super(TestMultipleVolumeAttach, cls).tearDownClass()
|
||||
|
||||
def setUp(self):
|
||||
self.apiClient = self.testClient.getApiClient()
|
||||
@ -151,20 +169,54 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
|
||||
available")
|
||||
|
||||
def tearDown(self):
|
||||
cleanup_resources(self.apiClient, self.cleanup)
|
||||
return
|
||||
|
||||
self.detach_volume(self.volume1)
|
||||
self.detach_volume(self.volume2)
|
||||
self.detach_volume(self.volume3)
|
||||
self.detach_volume(self.volume4)
|
||||
self.detach_volume(self.volume5)
|
||||
self.detach_volume(self.volume6)
|
||||
|
||||
super(TestMultipleVolumeAttach, self).tearDown
|
||||
|
||||
# Method to attach volume but will return immediately as an asynchronous task does.
|
||||
def attach_volume(self, apiclient,virtualmachineid, volume):
|
||||
def attach_volume(self, virtualmachineid, volume):
|
||||
"""Attach volume to instance"""
|
||||
cmd = attachVolume.attachVolumeCmd()
|
||||
cmd.isAsync = "false"
|
||||
cmd.id = volume.id
|
||||
cmd.virtualmachineid = virtualmachineid
|
||||
return apiclient.attachVolume(cmd)
|
||||
return self.apiClient.attachVolume(cmd)
|
||||
|
||||
# Method to detach a volume.
|
||||
def detach_volume(self, volume):
|
||||
"""Detach volume from instance"""
|
||||
cmd = detachVolume.detachVolumeCmd()
|
||||
cmd.id = volume.id
|
||||
try:
|
||||
self.apiClient.detachVolume(cmd)
|
||||
except Exception as e:
|
||||
self.debug("======exception e %s " % e)
|
||||
if "The specified volume is not attached to a VM." not in str(e):
|
||||
raise e
|
||||
|
||||
# list Primare storage pools
|
||||
def check_storage_pools(self, virtualmachineid):
|
||||
"""
|
||||
list storage pools available to the VM
|
||||
"""
|
||||
vm = VirtualMachine.list(self.apiClient, id=virtualmachineid)[0]
|
||||
hostid = vm.histid
|
||||
host = Host.list(self.apiClient, id=hostid)[0]
|
||||
clusterid = host.clusterid
|
||||
storage_pools = StoragePool.list(self.apiClient, clusterid=clusterid)
|
||||
if len(storage_pools) < 2:
|
||||
self.skipTest("at least two accesible primary storage pools needed for the vm to perform this test")
|
||||
return storage_pools
|
||||
|
||||
|
||||
# Method to check the volume attach async jobs' status
|
||||
def query_async_job(self, apiclient, jobid):
|
||||
def query_async_job(self, jobid):
|
||||
"""Query the status for Async Job"""
|
||||
try:
|
||||
asyncTimeout = 3600
|
||||
@ -173,7 +225,7 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
|
||||
timeout = asyncTimeout
|
||||
async_response = FAILED
|
||||
while timeout > 0:
|
||||
async_response = apiclient.queryAsyncJobResult(cmd)
|
||||
async_response = self.apiClient.queryAsyncJobResult(cmd)
|
||||
if async_response != FAILED:
|
||||
job_status = async_response.jobstatus
|
||||
if job_status in [JOB_CANCELLED,
|
||||
@ -193,7 +245,7 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
|
||||
str(e))
|
||||
return FAILED
|
||||
|
||||
@attr(tags = ["advanced", "advancedns", "basic"], required_hardware="true")
|
||||
@attr(tags = ["advanced", "advancedns", "basic", "blob"], required_hardware="true")
|
||||
def test_attach_multiple_volumes(self):
|
||||
"""Attach multiple Volumes simultaneously to a Running VM
|
||||
"""
|
||||
@ -205,33 +257,33 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
|
||||
self.volume1.id,
|
||||
self.virtual_machine.id
|
||||
))
|
||||
vol1_jobId = self.attach_volume(self.apiClient, self.virtual_machine.id,self.volume1)
|
||||
vol1_jobId = self.attach_volume(self.virtual_machine.id,self.volume1)
|
||||
|
||||
self.debug(
|
||||
"Attaching volume (ID: %s) to VM (ID: %s)" % (
|
||||
self.volume2.id,
|
||||
self.virtual_machine.id
|
||||
))
|
||||
vol2_jobId = self.attach_volume(self.apiClient,self.virtual_machine.id, self.volume2)
|
||||
vol2_jobId = self.attach_volume(self.virtual_machine.id, self.volume2)
|
||||
|
||||
self.debug(
|
||||
"Attaching volume (ID: %s) to VM (ID: %s)" % (
|
||||
self.volume3.id,
|
||||
self.virtual_machine.id
|
||||
))
|
||||
vol3_jobId = self.attach_volume(self.apiClient,self.virtual_machine.id, self.volume3)
|
||||
vol3_jobId = self.attach_volume(self.virtual_machine.id, self.volume3)
|
||||
|
||||
self.debug(
|
||||
"Attaching volume (ID: %s) to VM (ID: %s)" % (
|
||||
self.volume4.id,
|
||||
self.virtual_machine.id
|
||||
))
|
||||
vol4_jobId = self.attach_volume(self.apiClient,self.virtual_machine.id, self.volume4)
|
||||
vol4_jobId = self.attach_volume(self.virtual_machine.id, self.volume4)
|
||||
|
||||
self.query_async_job(self.apiClient,vol1_jobId.jobid)
|
||||
self.query_async_job(self.apiClient,vol2_jobId.jobid)
|
||||
self.query_async_job(self.apiClient,vol3_jobId.jobid)
|
||||
self.query_async_job(self.apiClient,vol4_jobId.jobid)
|
||||
self.query_async_job(vol1_jobId.jobid)
|
||||
self.query_async_job(vol2_jobId.jobid)
|
||||
self.query_async_job(vol3_jobId.jobid)
|
||||
self.query_async_job(vol4_jobId.jobid)
|
||||
|
||||
# List all the volumes attached to the instance. Includes even the Root disk.
|
||||
list_volume_response = Volume.list(
|
||||
@ -241,6 +293,7 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
|
||||
account=self.account.name,
|
||||
domainid=self.account.domainid
|
||||
)
|
||||
self.debug("====== test paralalisation : %s ===" % str(list_volume_response).replace("}, {", "},\n {"))
|
||||
self.assertEqual(
|
||||
validateList(list_volume_response)[0],
|
||||
PASS,
|
||||
@ -254,3 +307,57 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
@attr(tags = ["advanced", "advancedns", "basic", "bla"], required_hardware="true")
|
||||
def test_attach_and_distribute_multiple_volumes(self):
|
||||
"""
|
||||
Test volume distribution over storages
|
||||
|
||||
Given multiple primary storage pools
|
||||
Attach multiple Volumes to a VM
|
||||
check to see if these do not all end up on the same storage pool
|
||||
"""
|
||||
storage_pools = self.check_storage_pools(self.virtual_machine.id)
|
||||
storage_usage={}
|
||||
for storage_pool in storage_pools:
|
||||
storage_usage[storage_pool.name] = 0
|
||||
self.debug("====== test pools for usage : %s ===" % str(storage_usage).replace("}, {", "},\n {"))
|
||||
|
||||
vol1_jobId = self.attach_volume(self.virtual_machine.id,self.volume1)
|
||||
vol2_jobId = self.attach_volume(self.virtual_machine.id,self.volume2)
|
||||
vol3_jobId = self.attach_volume(self.virtual_machine.id,self.volume3)
|
||||
vol4_jobId = self.attach_volume(self.virtual_machine.id,self.volume4)
|
||||
vol5_jobId = self.attach_volume(self.virtual_machine.id,self.volume5)
|
||||
vol6_jobId = self.attach_volume(self.virtual_machine.id,self.volume6)
|
||||
|
||||
self.query_async_job(vol1_jobId.jobid)
|
||||
self.query_async_job(vol2_jobId.jobid)
|
||||
self.query_async_job(vol3_jobId.jobid)
|
||||
self.query_async_job(vol4_jobId.jobid)
|
||||
self.query_async_job(vol5_jobId.jobid)
|
||||
self.query_async_job(vol6_jobId.jobid)
|
||||
|
||||
volumes = Volume.list(self.apiClient,
|
||||
virtualmachineid=self.virtual_machine.id,
|
||||
type="DATADISK",
|
||||
account=self.account.name,
|
||||
domainid=self.account.domainid
|
||||
)
|
||||
self.debug("====== test distribution : %s ===" % str(volumes).replace("}, {", "},\n {"))
|
||||
for volume in volumes:
|
||||
self.debug("====== checking storage : %s ===" % str(volume))
|
||||
storage_usage[volume.storage] += 1
|
||||
|
||||
self.debug("====== test distribution : %s ===" % str(storage_usage).replace("}, {", "},\n {"))
|
||||
# TODO decide what an acceptable 'random' distribution is
|
||||
# all of the loads on storages should be less than the number of volumes for a distribution to exist (statistically this may fail once every 6⁵ runs (7776)
|
||||
self.assertEqual(
|
||||
len(volumes),
|
||||
6,
|
||||
"All 6 data disks are not attached to VM Successfully"
|
||||
)
|
||||
for storage_use in storage_usage:
|
||||
self.assertTrue(
|
||||
storage_usage[storage_use] < 6,
|
||||
"not all volumes should be on one storage: %s" % storage_use)
|
||||
return
|
||||
Loading…
x
Reference in New Issue
Block a user