mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
storage,plugins: delegate allow zone-wide volume migration check and access grant check to storage drivers (#8762)
* storage,plugins: delegate allow zone-wide volume migration check and access grant to storage drivers Following checks have been delegated to storage drivers, - For volumes on zone-wide storage, whether they need storage migration when VM is migrated - Whther volume required grant access Apply fixes in resolving PrimaryDataStore * add tests Signed-off-by: Abhishek Kumar <abhishek.mrt22@gmail.com> * unused import Signed-off-by: Abhishek Kumar <abhishek.mrt22@gmail.com> * Update engine/orchestration/src/test/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestratorTest.java --------- Signed-off-by: Abhishek Kumar <abhishek.mrt22@gmail.com>
This commit is contained in:
parent
9acba90423
commit
ffd59720dd
@ -142,4 +142,19 @@ public interface PrimaryDataStoreDriver extends DataStoreDriver {
|
||||
boolean isStorageSupportHA(StoragePoolType type);
|
||||
|
||||
void detachVolumeFromAllStorageNodes(Volume volume);
|
||||
/**
|
||||
* Data store driver needs its grantAccess() method called for volumes in order for them to be used with a host.
|
||||
* @return true if we should call grantAccess() to use a volume
|
||||
*/
|
||||
default boolean volumesRequireGrantAccessWhenUsed() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zone-wide data store supports using a volume across clusters without the need for data motion
|
||||
* @return true if we don't need to data motion volumes across clusters for zone-wide use
|
||||
*/
|
||||
default boolean zoneWideVolumesAvailableWithoutClusterMotion() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1856,6 +1856,18 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
return _volsDao.persist(volume);
|
||||
}
|
||||
|
||||
protected void grantVolumeAccessToHostIfNeeded(PrimaryDataStore volumeStore, long volumeId, Host host, String volToString) {
|
||||
PrimaryDataStoreDriver driver = (PrimaryDataStoreDriver)volumeStore.getDriver();
|
||||
if (!driver.volumesRequireGrantAccessWhenUsed()) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
volService.grantAccess(volFactory.getVolume(volumeId), host, volumeStore);
|
||||
} catch (Exception e) {
|
||||
throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException, StorageAccessException {
|
||||
if (dest == null) {
|
||||
@ -1873,18 +1885,18 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
|
||||
List<VolumeTask> tasks = getTasks(vols, dest.getStorageForDisks(), vm);
|
||||
Volume vol = null;
|
||||
StoragePool pool;
|
||||
PrimaryDataStore store;
|
||||
for (VolumeTask task : tasks) {
|
||||
if (task.type == VolumeTaskType.NOP) {
|
||||
vol = task.volume;
|
||||
|
||||
String volToString = getReflectOnlySelectedFields(vol);
|
||||
|
||||
pool = (StoragePool)dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary);
|
||||
store = (PrimaryDataStore)dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary);
|
||||
|
||||
// For zone-wide managed storage, it is possible that the VM can be started in another
|
||||
// cluster. In that case, make sure that the volume is in the right access group.
|
||||
if (pool.isManaged()) {
|
||||
if (store.isManaged()) {
|
||||
Host lastHost = _hostDao.findById(vm.getVirtualMachine().getLastHostId());
|
||||
Host host = _hostDao.findById(vm.getVirtualMachine().getHostId());
|
||||
|
||||
@ -1893,37 +1905,27 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
|
||||
if (lastClusterId != clusterId) {
|
||||
if (lastHost != null) {
|
||||
storageMgr.removeStoragePoolFromCluster(lastHost.getId(), vol.get_iScsiName(), pool);
|
||||
|
||||
DataStore storagePool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
|
||||
|
||||
volService.revokeAccess(volFactory.getVolume(vol.getId()), lastHost, storagePool);
|
||||
storageMgr.removeStoragePoolFromCluster(lastHost.getId(), vol.get_iScsiName(), store);
|
||||
volService.revokeAccess(volFactory.getVolume(vol.getId()), lastHost, store);
|
||||
}
|
||||
|
||||
try {
|
||||
volService.grantAccess(volFactory.getVolume(vol.getId()), host, (DataStore)pool);
|
||||
volService.grantAccess(volFactory.getVolume(vol.getId()), host, store);
|
||||
} catch (Exception e) {
|
||||
throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host));
|
||||
}
|
||||
} else {
|
||||
// This might impact other managed storages, grant access for PowerFlex and Iscsi/Solidfire storage pool only
|
||||
if (pool.getPoolType() == Storage.StoragePoolType.PowerFlex || pool.getPoolType() == Storage.StoragePoolType.Iscsi) {
|
||||
try {
|
||||
volService.grantAccess(volFactory.getVolume(vol.getId()), host, (DataStore)pool);
|
||||
} catch (Exception e) {
|
||||
throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host));
|
||||
}
|
||||
}
|
||||
grantVolumeAccessToHostIfNeeded(store, vol.getId(), host, volToString);
|
||||
}
|
||||
} else {
|
||||
handleCheckAndRepairVolume(vol, vm.getVirtualMachine().getHostId());
|
||||
}
|
||||
} else if (task.type == VolumeTaskType.MIGRATE) {
|
||||
pool = (StoragePool)dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary);
|
||||
vol = migrateVolume(task.volume, pool);
|
||||
store = (PrimaryDataStore) dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary);
|
||||
vol = migrateVolume(task.volume, store);
|
||||
} else if (task.type == VolumeTaskType.RECREATE) {
|
||||
Pair<VolumeVO, DataStore> result = recreateVolume(task.volume, vm, dest);
|
||||
pool = (StoragePool)dataStoreMgr.getDataStore(result.second().getId(), DataStoreRole.Primary);
|
||||
store = (PrimaryDataStore) dataStoreMgr.getDataStore(result.second().getId(), DataStoreRole.Primary);
|
||||
vol = result.first();
|
||||
}
|
||||
|
||||
|
||||
@ -18,6 +18,13 @@ package org.apache.cloudstack.engine.orchestration;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
|
||||
import org.apache.commons.lang3.ObjectUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
@ -31,14 +38,22 @@ import org.mockito.junit.MockitoJUnitRunner;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import com.cloud.configuration.Resource;
|
||||
import com.cloud.exception.StorageAccessException;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.user.ResourceLimitService;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class VolumeOrchestratorTest {
|
||||
|
||||
@Mock
|
||||
protected ResourceLimitService resourceLimitMgr;
|
||||
@Mock
|
||||
protected VolumeService volumeService;
|
||||
@Mock
|
||||
protected VolumeDataFactory volumeDataFactory;
|
||||
|
||||
@Spy
|
||||
@InjectMocks
|
||||
@ -100,4 +115,44 @@ public class VolumeOrchestratorTest {
|
||||
public void testCheckAndUpdateVolumeAccountResourceCountLessSize() {
|
||||
runCheckAndUpdateVolumeAccountResourceCountTest(20L, 10L);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGrantVolumeAccessToHostIfNeededDriverNoNeed() {
|
||||
PrimaryDataStore store = Mockito.mock(PrimaryDataStore.class);
|
||||
PrimaryDataStoreDriver driver = Mockito.mock(PrimaryDataStoreDriver.class);
|
||||
Mockito.when(driver.volumesRequireGrantAccessWhenUsed()).thenReturn(false);
|
||||
Mockito.when(store.getDriver()).thenReturn(driver);
|
||||
volumeOrchestrator.grantVolumeAccessToHostIfNeeded(store, 1L,
|
||||
Mockito.mock(HostVO.class), "");
|
||||
Mockito.verify(volumeService, Mockito.never())
|
||||
.grantAccess(Mockito.any(DataObject.class), Mockito.any(Host.class), Mockito.any(DataStore.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGrantVolumeAccessToHostIfNeededDriverNeeds() {
|
||||
PrimaryDataStore store = Mockito.mock(PrimaryDataStore.class);
|
||||
PrimaryDataStoreDriver driver = Mockito.mock(PrimaryDataStoreDriver.class);
|
||||
Mockito.when(driver.volumesRequireGrantAccessWhenUsed()).thenReturn(true);
|
||||
Mockito.when(store.getDriver()).thenReturn(driver);
|
||||
Mockito.when(volumeDataFactory.getVolume(Mockito.anyLong())).thenReturn(Mockito.mock(VolumeInfo.class));
|
||||
Mockito.doReturn(true).when(volumeService)
|
||||
.grantAccess(Mockito.any(DataObject.class), Mockito.any(Host.class), Mockito.any(DataStore.class));
|
||||
volumeOrchestrator.grantVolumeAccessToHostIfNeeded(store, 1L,
|
||||
Mockito.mock(HostVO.class), "");
|
||||
Mockito.verify(volumeService, Mockito.times(1))
|
||||
.grantAccess(Mockito.any(DataObject.class), Mockito.any(Host.class), Mockito.any(DataStore.class));
|
||||
}
|
||||
|
||||
@Test(expected = StorageAccessException.class)
|
||||
public void testGrantVolumeAccessToHostIfNeededDriverNeedsButException() {
|
||||
PrimaryDataStore store = Mockito.mock(PrimaryDataStore.class);
|
||||
PrimaryDataStoreDriver driver = Mockito.mock(PrimaryDataStoreDriver.class);
|
||||
Mockito.when(driver.volumesRequireGrantAccessWhenUsed()).thenReturn(true);
|
||||
Mockito.when(store.getDriver()).thenReturn(driver);
|
||||
Mockito.when(volumeDataFactory.getVolume(Mockito.anyLong())).thenReturn(Mockito.mock(VolumeInfo.class));
|
||||
Mockito.doThrow(CloudRuntimeException.class).when(volumeService)
|
||||
.grantAccess(Mockito.any(DataObject.class), Mockito.any(Host.class), Mockito.any(DataStore.class));
|
||||
volumeOrchestrator.grantVolumeAccessToHostIfNeeded(store, 1L,
|
||||
Mockito.mock(HostVO.class), "");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1890,4 +1890,9 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
||||
@Override
|
||||
public void detachVolumeFromAllStorageNodes(Volume volume) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean volumesRequireGrantAccessWhenUsed() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -268,4 +268,9 @@ public class NexentaPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
||||
@Override
|
||||
public void detachVolumeFromAllStorageNodes(Volume volume) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean volumesRequireGrantAccessWhenUsed() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1430,4 +1430,14 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
||||
@Override
|
||||
public void detachVolumeFromAllStorageNodes(Volume volume) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean volumesRequireGrantAccessWhenUsed() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean zoneWideVolumesAvailableWithoutClusterMotion() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1672,4 +1672,9 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
||||
@Override
|
||||
public void detachVolumeFromAllStorageNodes(Volume volume) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean volumesRequireGrantAccessWhenUsed() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -43,8 +43,6 @@ import javax.crypto.spec.SecretKeySpec;
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.network.dao.PublicIpQuarantineDao;
|
||||
import com.cloud.hypervisor.HypervisorGuru;
|
||||
import org.apache.cloudstack.acl.ControlledEntity;
|
||||
import org.apache.cloudstack.acl.SecurityChecker;
|
||||
import org.apache.cloudstack.affinity.AffinityGroupProcessor;
|
||||
@ -225,8 +223,8 @@ import org.apache.cloudstack.api.command.admin.storage.ListSecondaryStagingStore
|
||||
import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.ListStorageProvidersCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.ListStorageTagsCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.MigrateSecondaryStorageDataCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.MigrateResourcesToAnotherSecondaryStorageCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.MigrateSecondaryStorageDataCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.PreparePrimaryStorageForMaintenanceCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.UpdateCloudToUseObjectStoreCmd;
|
||||
@ -606,6 +604,9 @@ import org.apache.cloudstack.config.Configuration;
|
||||
import org.apache.cloudstack.config.ConfigurationGroup;
|
||||
import org.apache.cloudstack.context.CallContext;
|
||||
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
|
||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
@ -717,6 +718,7 @@ import com.cloud.host.dao.HostTagsDao;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.hypervisor.HypervisorCapabilities;
|
||||
import com.cloud.hypervisor.HypervisorCapabilitiesVO;
|
||||
import com.cloud.hypervisor.HypervisorGuru;
|
||||
import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao;
|
||||
import com.cloud.hypervisor.kvm.dpdk.DpdkHelper;
|
||||
import com.cloud.info.ConsoleProxyInfo;
|
||||
@ -736,6 +738,7 @@ import com.cloud.network.dao.NetworkDao;
|
||||
import com.cloud.network.dao.NetworkDomainDao;
|
||||
import com.cloud.network.dao.NetworkDomainVO;
|
||||
import com.cloud.network.dao.NetworkVO;
|
||||
import com.cloud.network.dao.PublicIpQuarantineDao;
|
||||
import com.cloud.network.vpc.dao.VpcDao;
|
||||
import com.cloud.org.Cluster;
|
||||
import com.cloud.org.Grouping.AllocationState;
|
||||
@ -967,6 +970,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
||||
@Inject
|
||||
private PrimaryDataStoreDao _primaryDataStoreDao;
|
||||
@Inject
|
||||
private DataStoreManager dataStoreManager;
|
||||
@Inject
|
||||
private VolumeDataStoreDao _volumeStoreDao;
|
||||
@Inject
|
||||
private TemplateDataStoreDao _vmTemplateStoreDao;
|
||||
@ -1414,6 +1419,20 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
||||
return listHostsForMigrationOfVM(vm, startIndex, pageSize, keyword, Collections.emptyList());
|
||||
}
|
||||
|
||||
protected boolean zoneWideVolumeRequiresStorageMotion(PrimaryDataStore volumeDataStore,
|
||||
final Host sourceHost, final Host destinationHost) {
|
||||
if (volumeDataStore.isManaged() && sourceHost.getClusterId() != destinationHost.getClusterId()) {
|
||||
PrimaryDataStoreDriver driver = (PrimaryDataStoreDriver)volumeDataStore.getDriver();
|
||||
// Depends on the storage driver. For some storages simply
|
||||
// changing volume access to host should work: grant access on destination
|
||||
// host and revoke access on source host. For others, we still have to perform a storage migration
|
||||
// because we need to create a new target volume and copy the contents of the
|
||||
// source volume into it before deleting the source volume.
|
||||
return !driver.zoneWideVolumesAvailableWithoutClusterMotion();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> listHostsForMigrationOfVM(final VirtualMachine vm, final Long startIndex, final Long pageSize,
|
||||
final String keyword, List<VirtualMachine> vmList) {
|
||||
@ -1482,8 +1501,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
||||
filteredHosts = new ArrayList<>(allHosts);
|
||||
|
||||
for (final VolumeVO volume : volumes) {
|
||||
StoragePool storagePool = _poolDao.findById(volume.getPoolId());
|
||||
Long volClusterId = storagePool.getClusterId();
|
||||
PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStoreManager.getPrimaryDataStore(volume.getPoolId());
|
||||
Long volClusterId = primaryDataStore.getClusterId();
|
||||
|
||||
for (Iterator<HostVO> iterator = filteredHosts.iterator(); iterator.hasNext();) {
|
||||
final Host host = iterator.next();
|
||||
@ -1493,8 +1512,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
||||
}
|
||||
|
||||
if (volClusterId != null) {
|
||||
if (storagePool.isLocal() || !host.getClusterId().equals(volClusterId) || usesLocal) {
|
||||
if (storagePool.isManaged()) {
|
||||
if (primaryDataStore.isLocal() || !host.getClusterId().equals(volClusterId) || usesLocal) {
|
||||
if (primaryDataStore.isManaged()) {
|
||||
// At the time being, we do not support storage migration of a volume from managed storage unless the managed storage
|
||||
// is at the zone level and the source and target storage pool is the same.
|
||||
// If the source and target storage pool is the same and it is managed, then we still have to perform a storage migration
|
||||
@ -1512,22 +1531,12 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (storagePool.isManaged()) {
|
||||
if (srcHost.getClusterId() != host.getClusterId()) {
|
||||
if (storagePool.getPoolType() == Storage.StoragePoolType.PowerFlex) {
|
||||
// No need of new volume creation for zone wide PowerFlex/ScaleIO pool
|
||||
// Simply, changing volume access to host should work: grant access on dest host and revoke access on source host
|
||||
continue;
|
||||
}
|
||||
// If the volume's storage pool is managed and at the zone level, then we still have to perform a storage migration
|
||||
// because we need to create a new target volume and copy the contents of the source volume into it before deleting
|
||||
// the source volume.
|
||||
if (zoneWideVolumeRequiresStorageMotion(primaryDataStore, srcHost, host)) {
|
||||
requiresStorageMotion.put(host, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (CollectionUtils.isEmpty(filteredHosts)) {
|
||||
return new Ternary<>(new Pair<>(allHosts, allHostsPair.second()), new ArrayList<>(), new HashMap<>());
|
||||
|
||||
@ -54,6 +54,8 @@ import org.apache.cloudstack.api.command.user.userdata.DeleteUserDataCmd;
|
||||
import org.apache.cloudstack.api.command.user.userdata.ListUserDataCmd;
|
||||
import org.apache.cloudstack.api.command.user.userdata.RegisterUserDataCmd;
|
||||
import org.apache.cloudstack.context.CallContext;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.userdata.UserDataManager;
|
||||
import org.junit.After;
|
||||
@ -645,4 +647,41 @@ public class ManagementServerImplTest {
|
||||
Assert.assertNotNull(result.second());
|
||||
Assert.assertEquals(0, result.second().size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testZoneWideVolumeRequiresStorageMotionNonManaged() {
|
||||
PrimaryDataStore dataStore = Mockito.mock(PrimaryDataStore.class);
|
||||
Mockito.when(dataStore.isManaged()).thenReturn(false);
|
||||
Assert.assertFalse(spy.zoneWideVolumeRequiresStorageMotion(dataStore,
|
||||
Mockito.mock(Host.class), Mockito.mock(Host.class)));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testZoneWideVolumeRequiresStorageMotionSameClusterHost() {
|
||||
PrimaryDataStore dataStore = Mockito.mock(PrimaryDataStore.class);
|
||||
Mockito.when(dataStore.isManaged()).thenReturn(true);
|
||||
Host host1 = Mockito.mock(Host.class);
|
||||
Mockito.when(host1.getClusterId()).thenReturn(1L);
|
||||
Host host2 = Mockito.mock(Host.class);
|
||||
Mockito.when(host2.getClusterId()).thenReturn(1L);
|
||||
Assert.assertFalse(spy.zoneWideVolumeRequiresStorageMotion(dataStore, host1, host2));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testZoneWideVolumeRequiresStorageMotionDriverDependent() {
|
||||
PrimaryDataStore dataStore = Mockito.mock(PrimaryDataStore.class);
|
||||
Mockito.when(dataStore.isManaged()).thenReturn(true);
|
||||
PrimaryDataStoreDriver driver = Mockito.mock(PrimaryDataStoreDriver.class);
|
||||
Mockito.when(dataStore.getDriver()).thenReturn(driver);
|
||||
Host host1 = Mockito.mock(Host.class);
|
||||
Mockito.when(host1.getClusterId()).thenReturn(1L);
|
||||
Host host2 = Mockito.mock(Host.class);
|
||||
Mockito.when(host2.getClusterId()).thenReturn(2L);
|
||||
|
||||
Mockito.when(driver.zoneWideVolumesAvailableWithoutClusterMotion()).thenReturn(true);
|
||||
Assert.assertFalse(spy.zoneWideVolumeRequiresStorageMotion(dataStore, host1, host2));
|
||||
|
||||
Mockito.when(driver.zoneWideVolumesAvailableWithoutClusterMotion()).thenReturn(false);
|
||||
Assert.assertTrue(spy.zoneWideVolumeRequiresStorageMotion(dataStore, host1, host2));
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user