mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Added new logs to volume creation (#6689)
Co-authored-by: João Paraquetti <joao@scclouds.com.br>
This commit is contained in:
parent
3889e46eb6
commit
efbf74ee06
@ -359,10 +359,21 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||||||
s_logger.trace(String.format("we have a preferred pool: %b", storagePool.isPresent()));
|
s_logger.trace(String.format("we have a preferred pool: %b", storagePool.isPresent()));
|
||||||
}
|
}
|
||||||
|
|
||||||
return (storagePool.isPresent()) ? (StoragePool) this.dataStoreMgr.getDataStore(storagePool.get().getId(), DataStoreRole.Primary) :
|
StoragePool storage;
|
||||||
(StoragePool)dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary);
|
if (storagePool.isPresent()) {
|
||||||
|
storage = (StoragePool)this.dataStoreMgr.getDataStore(storagePool.get().getId(), DataStoreRole.Primary);
|
||||||
|
s_logger.debug(String.format("VM [%s] has a preferred storage pool [%s]. Volume Orchestrator found this storage using Storage Pool Allocator [%s] and will"
|
||||||
|
+ " use it.", vm, storage, allocator));
|
||||||
|
} else {
|
||||||
|
storage = (StoragePool)dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary);
|
||||||
|
s_logger.debug(String.format("VM [%s] does not have a preferred storage pool or it cannot be used. Volume Orchestrator will use the available Storage Pool"
|
||||||
|
+ " [%s], which was discovered using Storage Pool Allocator [%s].", vm, storage, allocator));
|
||||||
|
}
|
||||||
|
return storage;
|
||||||
}
|
}
|
||||||
|
s_logger.debug(String.format("Could not find any available Storage Pool using Storage Pool Allocator [%s].", allocator));
|
||||||
}
|
}
|
||||||
|
s_logger.info("Volume Orchestrator could not find any available Storage Pool.");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -28,11 +28,14 @@ import javax.inject.Inject;
|
|||||||
import javax.naming.ConfigurationException;
|
import javax.naming.ConfigurationException;
|
||||||
|
|
||||||
import com.cloud.exception.StorageUnavailableException;
|
import com.cloud.exception.StorageUnavailableException;
|
||||||
|
import com.cloud.storage.ScopeType;
|
||||||
import com.cloud.storage.StoragePoolStatus;
|
import com.cloud.storage.StoragePoolStatus;
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
|
||||||
|
import org.apache.commons.collections.CollectionUtils;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
@ -111,37 +114,41 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||||||
return reorderPools(pools, vmProfile, plan, dskCh);
|
return reorderPools(pools, vmProfile, plan, dskCh);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected List<StoragePool> reorderPoolsByCapacity(DeploymentPlan plan,
|
protected List<StoragePool> reorderPoolsByCapacity(DeploymentPlan plan, List<StoragePool> pools) {
|
||||||
List<StoragePool> pools) {
|
|
||||||
Long zoneId = plan.getDataCenterId();
|
Long zoneId = plan.getDataCenterId();
|
||||||
Long clusterId = plan.getClusterId();
|
Long clusterId = plan.getClusterId();
|
||||||
short capacityType;
|
short capacityType;
|
||||||
if(pools != null && pools.size() != 0){
|
|
||||||
capacityType = pools.get(0).getPoolType().isShared() ? Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED : Capacity.CAPACITY_TYPE_LOCAL_STORAGE;
|
if (CollectionUtils.isEmpty(pools)) {
|
||||||
} else{
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
List<Long> poolIdsByCapacity = capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType);
|
if (pools.get(0).getPoolType().isShared()) {
|
||||||
if (s_logger.isDebugEnabled()) {
|
capacityType = Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED;
|
||||||
s_logger.debug("List of pools in descending order of free capacity: "+ poolIdsByCapacity);
|
} else {
|
||||||
|
capacityType = Capacity.CAPACITY_TYPE_LOCAL_STORAGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
List<Long> poolIdsByCapacity = capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType);
|
||||||
|
|
||||||
|
s_logger.debug(String.format("List of pools in descending order of available capacity [%s].", poolIdsByCapacity));
|
||||||
|
|
||||||
|
|
||||||
//now filter the given list of Pools by this ordered list
|
//now filter the given list of Pools by this ordered list
|
||||||
Map<Long, StoragePool> poolMap = new HashMap<>();
|
Map<Long, StoragePool> poolMap = new HashMap<>();
|
||||||
for (StoragePool pool : pools) {
|
for (StoragePool pool : pools) {
|
||||||
poolMap.put(pool.getId(), pool);
|
poolMap.put(pool.getId(), pool);
|
||||||
}
|
}
|
||||||
List<Long> matchingPoolIds = new ArrayList<>(poolMap.keySet());
|
List<Long> matchingPoolIds = new ArrayList<>(poolMap.keySet());
|
||||||
|
|
||||||
poolIdsByCapacity.retainAll(matchingPoolIds);
|
poolIdsByCapacity.retainAll(matchingPoolIds);
|
||||||
|
|
||||||
List<StoragePool> reorderedPools = new ArrayList<>();
|
List<StoragePool> reorderedPools = new ArrayList<>();
|
||||||
for(Long id: poolIdsByCapacity){
|
for (Long id: poolIdsByCapacity) {
|
||||||
reorderedPools.add(poolMap.get(id));
|
reorderedPools.add(poolMap.get(id));
|
||||||
}
|
}
|
||||||
|
|
||||||
return reorderedPools;
|
return reorderedPools;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected List<StoragePool> reorderPoolsByNumberOfVolumes(DeploymentPlan plan, List<StoragePool> pools, Account account) {
|
protected List<StoragePool> reorderPoolsByNumberOfVolumes(DeploymentPlan plan, List<StoragePool> pools, Account account) {
|
||||||
@ -153,9 +160,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||||||
Long clusterId = plan.getClusterId();
|
Long clusterId = plan.getClusterId();
|
||||||
|
|
||||||
List<Long> poolIdsByVolCount = volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId());
|
List<Long> poolIdsByVolCount = volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId());
|
||||||
if (s_logger.isDebugEnabled()) {
|
s_logger.debug(String.format("List of pools in ascending order of number of volumes for account [%s] is [%s].", account, poolIdsByVolCount));
|
||||||
s_logger.debug("List of pools in ascending order of number of volumes for account id: " + account.getAccountId() + " is: " + poolIdsByVolCount);
|
|
||||||
}
|
|
||||||
|
|
||||||
// now filter the given list of Pools by this ordered list
|
// now filter the given list of Pools by this ordered list
|
||||||
Map<Long, StoragePool> poolMap = new HashMap<>();
|
Map<Long, StoragePool> poolMap = new HashMap<>();
|
||||||
@ -180,6 +185,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||||||
s_logger.trace("reordering pools");
|
s_logger.trace("reordering pools");
|
||||||
}
|
}
|
||||||
if (pools == null) {
|
if (pools == null) {
|
||||||
|
s_logger.trace("There are no pools to reorder; returning null.");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
if (s_logger.isTraceEnabled()) {
|
if (s_logger.isTraceEnabled()) {
|
||||||
@ -363,4 +369,18 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected void logDisabledStoragePools(long dcId, Long podId, Long clusterId, ScopeType scope) {
|
||||||
|
List<StoragePoolVO> disabledPools = storagePoolDao.findDisabledPoolsByScope(dcId, podId, clusterId, scope);
|
||||||
|
if (disabledPools != null && !disabledPools.isEmpty()) {
|
||||||
|
s_logger.trace(String.format("Ignoring pools [%s] as they are in disabled state.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(disabledPools)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void logStartOfSearch(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, int returnUpTo,
|
||||||
|
boolean bypassStorageTypeCheck){
|
||||||
|
s_logger.trace(String.format("%s is looking for storage pools that match the VM's disk profile [%s], virtual machine profile [%s] and "
|
||||||
|
+ "deployment plan [%s]. Returning up to [%d] and bypassStorageTypeCheck [%s].", this.getClass().getSimpleName(), dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck));
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -46,7 +46,7 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
|
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
|
||||||
s_logger.debug("ClusterScopeStoragePoolAllocator looking for storage pool");
|
logStartOfSearch(dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck);
|
||||||
|
|
||||||
if (!bypassStorageTypeCheck && dskCh.useLocalStorage()) {
|
if (!bypassStorageTypeCheck && dskCh.useLocalStorage()) {
|
||||||
return null;
|
return null;
|
||||||
@ -63,40 +63,34 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
|
|||||||
// clusterId == null here because it will break ClusterWide primary
|
// clusterId == null here because it will break ClusterWide primary
|
||||||
// storage volume operation where
|
// storage volume operation where
|
||||||
// only podId is passed into this call.
|
// only podId is passed into this call.
|
||||||
|
s_logger.debug("ClusterScopeStoragePoolAllocator is returning null since the pod ID is null. This may be a zone wide storage.");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
if (dskCh.getTags() != null && dskCh.getTags().length != 0) {
|
if (dskCh.getTags() != null && dskCh.getTags().length != 0) {
|
||||||
s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId + " having tags:" + Arrays.toString(dskCh.getTags()) +
|
s_logger.debug(String.format("Looking for pools in dc [%s], pod [%s], cluster [%s], and having tags [%s]. Disabled pools will be ignored.", dcId, podId, clusterId,
|
||||||
". Disabled pools will be ignored.");
|
Arrays.toString(dskCh.getTags())));
|
||||||
} else {
|
} else {
|
||||||
s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId + ". Disabled pools will be ignored.");
|
s_logger.debug(String.format("Looking for pools in dc [%s], pod [%s] and cluster [%s]. Disabled pools will be ignored.", dcId, podId, clusterId));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s_logger.isTraceEnabled()) {
|
if (s_logger.isTraceEnabled()) {
|
||||||
// Log the pools details that are ignored because they are in disabled state
|
// Log the pools details that are ignored because they are in disabled state
|
||||||
List<StoragePoolVO> disabledPools = storagePoolDao.findDisabledPoolsByScope(dcId, podId, clusterId, ScopeType.CLUSTER);
|
logDisabledStoragePools(dcId, podId, clusterId, ScopeType.CLUSTER);
|
||||||
if (disabledPools != null && !disabledPools.isEmpty()) {
|
|
||||||
for (StoragePoolVO pool : disabledPools) {
|
|
||||||
s_logger.trace("Ignoring pool " + pool + " as it is in disabled state.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
List<StoragePoolVO> pools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags());
|
List<StoragePoolVO> pools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags());
|
||||||
s_logger.debug("Found pools matching tags: " + pools);
|
s_logger.debug(String.format("Found pools [%s] that match with tags [%s].", pools, Arrays.toString(dskCh.getTags())));
|
||||||
|
|
||||||
// add remaining pools in cluster, that did not match tags, to avoid set
|
// add remaining pools in cluster, that did not match tags, to avoid set
|
||||||
List<StoragePoolVO> allPools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, null);
|
List<StoragePoolVO> allPools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, null);
|
||||||
allPools.removeAll(pools);
|
allPools.removeAll(pools);
|
||||||
for (StoragePoolVO pool : allPools) {
|
for (StoragePoolVO pool : allPools) {
|
||||||
s_logger.debug("Adding pool " + pool + " to avoid set since it did not match tags");
|
s_logger.trace(String.format("Adding pool [%s] to the 'avoid' set since it did not match any tags.", pool));
|
||||||
avoid.addPool(pool.getId());
|
avoid.addPool(pool.getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pools.size() == 0) {
|
if (pools.size() == 0) {
|
||||||
if (s_logger.isDebugEnabled()) {
|
s_logger.debug(String.format("No storage pools available for [%s] volume allocation.", ServiceOffering.StorageType.shared));
|
||||||
s_logger.debug("No storage pools available for " + ServiceOffering.StorageType.shared.toString() + " volume allocation, returning");
|
|
||||||
}
|
|
||||||
return suitablePools;
|
return suitablePools;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,15 +100,14 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
|
|||||||
}
|
}
|
||||||
StoragePool storagePool = (StoragePool)dataStoreMgr.getPrimaryDataStore(pool.getId());
|
StoragePool storagePool = (StoragePool)dataStoreMgr.getPrimaryDataStore(pool.getId());
|
||||||
if (filter(avoid, storagePool, dskCh, plan)) {
|
if (filter(avoid, storagePool, dskCh, plan)) {
|
||||||
|
s_logger.trace(String.format("Found suitable local storage pool [%s], adding to list.", pool));
|
||||||
suitablePools.add(storagePool);
|
suitablePools.add(storagePool);
|
||||||
} else {
|
} else {
|
||||||
avoid.addPool(pool.getId());
|
avoid.addPool(pool.getId());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s_logger.isDebugEnabled()) {
|
s_logger.debug(String.format("ClusterScopeStoragePoolAllocator is returning [%s] suitable storage pools [%s].", suitablePools.size(), suitablePools));
|
||||||
s_logger.debug("ClusterScopeStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools");
|
|
||||||
}
|
|
||||||
|
|
||||||
return suitablePools;
|
return suitablePools;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -48,7 +48,7 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
|
public List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
|
||||||
s_logger.debug("GarbageCollectingStoragePoolAllocator looking for storage pool");
|
logStartOfSearch(dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck);
|
||||||
if (!_storagePoolCleanupEnabled) {
|
if (!_storagePoolCleanupEnabled) {
|
||||||
s_logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped.");
|
s_logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped.");
|
||||||
return null;
|
return null;
|
||||||
|
|||||||
@ -61,20 +61,16 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
|
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
|
||||||
s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm");
|
logStartOfSearch(dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck);
|
||||||
|
|
||||||
if (!bypassStorageTypeCheck && !dskCh.useLocalStorage()) {
|
if (!bypassStorageTypeCheck && !dskCh.useLocalStorage()) {
|
||||||
|
s_logger.debug("LocalStoragePoolAllocator is returning null since the disk profile does not use local storage and bypassStorageTypeCheck is false.");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s_logger.isTraceEnabled()) {
|
if (s_logger.isTraceEnabled()) {
|
||||||
// Log the pools details that are ignored because they are in disabled state
|
// Log the pools details that are ignored because they are in disabled state
|
||||||
List<StoragePoolVO> disabledPools = storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), ScopeType.HOST);
|
logDisabledStoragePools(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), ScopeType.HOST);
|
||||||
if (disabledPools != null && !disabledPools.isEmpty()) {
|
|
||||||
for (StoragePoolVO pool : disabledPools) {
|
|
||||||
s_logger.trace("Ignoring pool " + pool + " as it is in disabled state.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
|
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
|
||||||
@ -86,7 +82,7 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||||||
if (pool != null && pool.isLocal()) {
|
if (pool != null && pool.isLocal()) {
|
||||||
StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
|
StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
|
||||||
if (filter(avoid, storagePool, dskCh, plan)) {
|
if (filter(avoid, storagePool, dskCh, plan)) {
|
||||||
s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
|
s_logger.trace(String.format("Found suitable local storage pool [%s], adding to list.", pool));
|
||||||
suitablePools.add(storagePool);
|
suitablePools.add(storagePool);
|
||||||
} else {
|
} else {
|
||||||
avoid.addPool(pool.getId());
|
avoid.addPool(pool.getId());
|
||||||
@ -100,6 +96,7 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||||||
} else {
|
} else {
|
||||||
if (plan.getPodId() == null) {
|
if (plan.getPodId() == null) {
|
||||||
// zone wide primary storage deployment
|
// zone wide primary storage deployment
|
||||||
|
s_logger.debug("LocalStoragePoolAllocator is returning null since both the host ID and pod ID are null. That means this should be a zone wide primary storage deployment.");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
List<StoragePoolVO> availablePools =
|
List<StoragePoolVO> availablePools =
|
||||||
@ -116,18 +113,14 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// add remaining pools in cluster, that did not match tags, to avoid
|
// add remaining pools in cluster to the 'avoid' set which did not match tags
|
||||||
// set
|
|
||||||
List<StoragePoolVO> allPools = storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), null);
|
List<StoragePoolVO> allPools = storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), null);
|
||||||
allPools.removeAll(availablePools);
|
allPools.removeAll(availablePools);
|
||||||
for (StoragePoolVO pool : allPools) {
|
for (StoragePoolVO pool : allPools) {
|
||||||
avoid.addPool(pool.getId());
|
avoid.addPool(pool.getId());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
s_logger.debug(String.format("LocalStoragePoolAllocator returning [%s] suitable storage pools [%s].", suitablePools.size(), suitablePools));
|
||||||
if (s_logger.isDebugEnabled()) {
|
|
||||||
s_logger.debug("LocalStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools");
|
|
||||||
}
|
|
||||||
|
|
||||||
return suitablePools;
|
return suitablePools;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -17,6 +17,7 @@
|
|||||||
package org.apache.cloudstack.storage.allocator;
|
package org.apache.cloudstack.storage.allocator;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
@ -50,7 +51,7 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
|
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
|
||||||
LOGGER.debug("ZoneWideStoragePoolAllocator to find storage pool");
|
logStartOfSearch(dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck);
|
||||||
|
|
||||||
if (!bypassStorageTypeCheck && dskCh.useLocalStorage()) {
|
if (!bypassStorageTypeCheck && dskCh.useLocalStorage()) {
|
||||||
return null;
|
return null;
|
||||||
@ -58,18 +59,14 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||||||
|
|
||||||
if (LOGGER.isTraceEnabled()) {
|
if (LOGGER.isTraceEnabled()) {
|
||||||
// Log the pools details that are ignored because they are in disabled state
|
// Log the pools details that are ignored because they are in disabled state
|
||||||
List<StoragePoolVO> disabledPools = storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(), null, null, ScopeType.ZONE);
|
logDisabledStoragePools(plan.getDataCenterId(), null, null, ScopeType.ZONE);
|
||||||
if (disabledPools != null && !disabledPools.isEmpty()) {
|
|
||||||
for (StoragePoolVO pool : disabledPools) {
|
|
||||||
LOGGER.trace("Ignoring pool " + pool + " as it is in disabled state.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
List<StoragePool> suitablePools = new ArrayList<>();
|
List<StoragePool> suitablePools = new ArrayList<>();
|
||||||
|
|
||||||
List<StoragePoolVO> storagePools = storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags());
|
List<StoragePoolVO> storagePools = storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags());
|
||||||
if (storagePools == null) {
|
if (storagePools == null) {
|
||||||
|
LOGGER.debug(String.format("Could not find any zone wide storage pool that matched with any of the following tags [%s].", Arrays.toString(dskCh.getTags())));
|
||||||
storagePools = new ArrayList<>();
|
storagePools = new ArrayList<>();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -97,6 +94,7 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||||||
}
|
}
|
||||||
StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId());
|
StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId());
|
||||||
if (filter(avoid, storagePool, dskCh, plan)) {
|
if (filter(avoid, storagePool, dskCh, plan)) {
|
||||||
|
LOGGER.trace(String.format("Found suitable local storage pool [%s], adding to list.", storage));
|
||||||
suitablePools.add(storagePool);
|
suitablePools.add(storagePool);
|
||||||
} else {
|
} else {
|
||||||
if (canAddStoragePoolToAvoidSet(storage)) {
|
if (canAddStoragePoolToAvoidSet(storage)) {
|
||||||
@ -104,6 +102,8 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
LOGGER.debug(String.format("ZoneWideStoragePoolAllocator is returning [%s] suitable storage pools [%s].", suitablePools.size(), suitablePools));
|
||||||
|
|
||||||
return suitablePools;
|
return suitablePools;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -2421,7 +2421,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||||||
|
|
||||||
ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, primaryStore.getUuid());
|
ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, primaryStore.getUuid());
|
||||||
if (morDatastore == null) {
|
if (morDatastore == null) {
|
||||||
throw new Exception("Unable to find datastore in vSphere");
|
throw new CloudRuntimeException(String.format("Unable to find datastore [%s] in vSphere.", primaryStore.getUuid()));
|
||||||
}
|
}
|
||||||
|
|
||||||
DatastoreMO dsMo = new DatastoreMO(context, morDatastore);
|
DatastoreMO dsMo = new DatastoreMO(context, morDatastore);
|
||||||
@ -2439,13 +2439,13 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||||||
newVol.setPath(file.getFileBaseName());
|
newVol.setPath(file.getFileBaseName());
|
||||||
newVol.setSize(volume.getSize());
|
newVol.setSize(volume.getSize());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
s_logger.debug("Create disk using vStorageObject manager failed due to exception " + e.getMessage() + ", retying using worker VM");
|
s_logger.error(String.format("Create disk using vStorageObject manager failed due to [%s], retrying using worker VM.", e.getMessage()), e);
|
||||||
String dummyVmName = hostService.getWorkerName(context, cmd, 0, dsMo);
|
String dummyVmName = hostService.getWorkerName(context, cmd, 0, dsMo);
|
||||||
try {
|
try {
|
||||||
s_logger.info("Create worker VM " + dummyVmName);
|
s_logger.info(String.format("Creating worker VM [%s].", dummyVmName));
|
||||||
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, dummyVmName, null);
|
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, dummyVmName, null);
|
||||||
if (vmMo == null) {
|
if (vmMo == null) {
|
||||||
throw new Exception("Unable to create a dummy VM for volume creation");
|
throw new CloudRuntimeException("Unable to create a dummy VM for volume creation.");
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
@ -2454,9 +2454,9 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||||||
vmMo.detachDisk(volumeDatastorePath, false);
|
vmMo.detachDisk(volumeDatastorePath, false);
|
||||||
}
|
}
|
||||||
catch (Exception e1) {
|
catch (Exception e1) {
|
||||||
s_logger.error("Deleting file " + volumeDatastorePath + " due to error: " + e1.getMessage());
|
s_logger.error(String.format("Deleting file [%s] due to [%s].", volumeDatastorePath, e1.getMessage()), e1);
|
||||||
VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, volumeUuid, dcMo, VmwareManager.s_vmwareSearchExcludeFolder.value());
|
VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, volumeUuid, dcMo, VmwareManager.s_vmwareSearchExcludeFolder.value());
|
||||||
throw new CloudRuntimeException("Unable to create volume due to: " + e1.getMessage());
|
throw new CloudRuntimeException(String.format("Unable to create volume due to [%s].", e1.getMessage()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2465,7 +2465,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||||||
newVol.setSize(volume.getSize());
|
newVol.setSize(volume.getSize());
|
||||||
return new CreateObjectAnswer(newVol);
|
return new CreateObjectAnswer(newVol);
|
||||||
} finally {
|
} finally {
|
||||||
s_logger.info("Destroy dummy VM after volume creation");
|
s_logger.info("Destroying dummy VM after volume creation.");
|
||||||
if (vmMo != null) {
|
if (vmMo != null) {
|
||||||
vmMo.detachAllDisksAndDestroy();
|
vmMo.detachAllDisksAndDestroy();
|
||||||
}
|
}
|
||||||
|
|||||||
@ -36,6 +36,7 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
|
public List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
|
||||||
|
logStartOfSearch(dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck);
|
||||||
|
|
||||||
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
|
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
|
||||||
|
|
||||||
@ -44,22 +45,22 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||||||
Long clusterId = plan.getClusterId();
|
Long clusterId = plan.getClusterId();
|
||||||
|
|
||||||
if (podId == null) {
|
if (podId == null) {
|
||||||
|
s_logger.debug("RandomStoragePoolAllocator is returning null since the pod ID is null. This may be a zone wide storage.");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId);
|
s_logger.debug(String.format("Looking for pools in dc [%s], pod [%s] and cluster [%s].", dcId, podId, clusterId));
|
||||||
List<StoragePoolVO> pools = storagePoolDao.listBy(dcId, podId, clusterId, ScopeType.CLUSTER);
|
List<StoragePoolVO> pools = storagePoolDao.listBy(dcId, podId, clusterId, ScopeType.CLUSTER);
|
||||||
if (pools.size() == 0) {
|
if (pools.size() == 0) {
|
||||||
if (s_logger.isDebugEnabled()) {
|
s_logger.debug(String.format("RandomStoragePoolAllocator found no storage pools available for allocation in dc [%s], pod [%s] and cluster [%s]. Returning an empty list.",
|
||||||
s_logger.debug("No storage pools available for allocation, returning");
|
dcId, podId, clusterId));
|
||||||
}
|
|
||||||
return suitablePools;
|
return suitablePools;
|
||||||
}
|
}
|
||||||
|
|
||||||
Collections.shuffle(pools);
|
Collections.shuffle(pools);
|
||||||
if (s_logger.isDebugEnabled()) {
|
|
||||||
s_logger.debug("RandomStoragePoolAllocator has " + pools.size() + " pools to check for allocation");
|
s_logger.debug(String.format("RandomStoragePoolAllocator has [%s] pools to check for allocation [%s].", pools.size(), pools));
|
||||||
}
|
|
||||||
for (StoragePoolVO pool : pools) {
|
for (StoragePoolVO pool : pools) {
|
||||||
if (suitablePools.size() == returnUpTo) {
|
if (suitablePools.size() == returnUpTo) {
|
||||||
break;
|
break;
|
||||||
@ -67,13 +68,12 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||||||
StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
|
StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
|
||||||
|
|
||||||
if (filter(avoid, pol, dskCh, plan)) {
|
if (filter(avoid, pol, dskCh, plan)) {
|
||||||
|
s_logger.trace(String.format("Found suitable local storage pool [%s], adding to list.", pool));
|
||||||
suitablePools.add(pol);
|
suitablePools.add(pol);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s_logger.isDebugEnabled()) {
|
s_logger.debug(String.format("RandomStoragePoolAllocator is returning [%s] suitable storage pools [%s].", suitablePools.size(), suitablePools));
|
||||||
s_logger.debug("RandomStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools");
|
|
||||||
}
|
|
||||||
|
|
||||||
return suitablePools;
|
return suitablePools;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -478,9 +478,7 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ManagedObjectReference findDatastore(String poolUuid) throws Exception {
|
public ManagedObjectReference findDatastore(String poolUuid) throws Exception {
|
||||||
|
s_logger.trace(String.format("Searching datastore in target MOR [%s] with poolUuid [%s].", _mor.getValue(), poolUuid));
|
||||||
if (s_logger.isTraceEnabled())
|
|
||||||
s_logger.trace("vCenter API trace - findDatastore(). target MOR: " + _mor.getValue() + ", poolUuid: " + poolUuid);
|
|
||||||
|
|
||||||
CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(_context, _context.getServiceContent().getCustomFieldsManager());
|
CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(_context, _context.getServiceContent().getCustomFieldsManager());
|
||||||
int key = cfmMo.getCustomFieldKey("Datastore", CustomFieldConstants.CLOUD_UUID);
|
int key = cfmMo.getCustomFieldKey("Datastore", CustomFieldConstants.CLOUD_UUID);
|
||||||
@ -489,18 +487,17 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost {
|
|||||||
ObjectContent[] ocs = getDatastorePropertiesOnHyperHost(new String[] {"name", String.format("value[%d]", key)});
|
ObjectContent[] ocs = getDatastorePropertiesOnHyperHost(new String[] {"name", String.format("value[%d]", key)});
|
||||||
if (ocs != null) {
|
if (ocs != null) {
|
||||||
for (ObjectContent oc : ocs) {
|
for (ObjectContent oc : ocs) {
|
||||||
if (oc.getPropSet().get(0).getVal().equals(poolUuid))
|
if (oc.getPropSet().get(0).getVal().equals(poolUuid)) {
|
||||||
|
s_logger.trace(String.format("Found datastore [%s] in target MOR [%s].", oc.getObj(), _mor.getValue()));
|
||||||
return oc.getObj();
|
return oc.getObj();
|
||||||
|
}
|
||||||
if (oc.getPropSet().size() > 1) {
|
if (oc.getPropSet().size() > 1) {
|
||||||
DynamicProperty prop = oc.getPropSet().get(1);
|
DynamicProperty prop = oc.getPropSet().get(1);
|
||||||
if (prop != null && prop.getVal() != null) {
|
if (prop != null && prop.getVal() != null) {
|
||||||
if (prop.getVal() instanceof CustomFieldStringValue) {
|
if (prop.getVal() instanceof CustomFieldStringValue) {
|
||||||
String val = ((CustomFieldStringValue)prop.getVal()).getValue();
|
String val = ((CustomFieldStringValue)prop.getVal()).getValue();
|
||||||
if (val.equalsIgnoreCase(poolUuid)) {
|
if (val.equalsIgnoreCase(poolUuid)) {
|
||||||
|
s_logger.trace(String.format("Found datastore [%s] in target MOR [%s].", oc.getObj(), _mor.getValue()));
|
||||||
if (s_logger.isTraceEnabled())
|
|
||||||
s_logger.trace("vCenter API trace - findDatastore() done(successfully)");
|
|
||||||
return oc.getObj();
|
return oc.getObj();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -509,8 +506,7 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s_logger.isTraceEnabled())
|
s_logger.trace(String.format("Failed to find a datastore with UUID [%s].", poolUuid));
|
||||||
s_logger.trace("vCenter API trace - findDatastore() done(failed)");
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -2303,7 +2303,7 @@ public class HypervisorHostHelper {
|
|||||||
String hiddenFolderPath = String.format("%s/%s", folderPath, VSPHERE_DATASTORE_HIDDEN_FOLDER);
|
String hiddenFolderPath = String.format("%s/%s", folderPath, VSPHERE_DATASTORE_HIDDEN_FOLDER);
|
||||||
|
|
||||||
if (!dsMo.folderExists(dsPath, VSPHERE_DATASTORE_BASE_FOLDER)) {
|
if (!dsMo.folderExists(dsPath, VSPHERE_DATASTORE_BASE_FOLDER)) {
|
||||||
s_logger.info(String.format("vSphere datastore base folder: %s does not exist, now creating on datastore: %s", VSPHERE_DATASTORE_BASE_FOLDER, dsMo.getName()));
|
s_logger.info(String.format("vSphere datastore base folder [%s] does not exist on datastore [%s]. We will create it.", VSPHERE_DATASTORE_BASE_FOLDER, dsMo.getName()));
|
||||||
dsMo.makeDirectory(folderPath, mor);
|
dsMo.makeDirectory(folderPath, mor);
|
||||||
// Adding another directory so vCentre doesn't remove the fcd directory when it's empty
|
// Adding another directory so vCentre doesn't remove the fcd directory when it's empty
|
||||||
dsMo.makeDirectory(hiddenFolderPath, mor);
|
dsMo.makeDirectory(hiddenFolderPath, mor);
|
||||||
|
|||||||
@ -43,6 +43,7 @@ import com.vmware.vim25.RuntimeFaultFaultMsg;
|
|||||||
import com.vmware.vim25.TaskInfo;
|
import com.vmware.vim25.TaskInfo;
|
||||||
import com.vmware.vim25.TaskInfoState;
|
import com.vmware.vim25.TaskInfoState;
|
||||||
import com.vmware.vim25.VirtualMachineTicket;
|
import com.vmware.vim25.VirtualMachineTicket;
|
||||||
|
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
|
||||||
import org.apache.commons.collections.CollectionUtils;
|
import org.apache.commons.collections.CollectionUtils;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
@ -675,7 +676,7 @@ public class VirtualMachineMO extends BaseMO {
|
|||||||
byte[] content = getContext().getResourceContent(url);
|
byte[] content = getContext().getResourceContent(url);
|
||||||
|
|
||||||
if (content == null || content.length < 1) {
|
if (content == null || content.length < 1) {
|
||||||
s_logger.warn("Snapshot descriptor file (vsd) does not exist anymore?");
|
s_logger.warn("Snapshot descriptor file (vsd) was not found.");
|
||||||
}
|
}
|
||||||
|
|
||||||
SnapshotDescriptor descriptor = new SnapshotDescriptor();
|
SnapshotDescriptor descriptor = new SnapshotDescriptor();
|
||||||
@ -1250,11 +1251,8 @@ public class VirtualMachineMO extends BaseMO {
|
|||||||
// vmdkDatastorePath: [datastore name] vmdkFilePath
|
// vmdkDatastorePath: [datastore name] vmdkFilePath
|
||||||
public void createDisk(String vmdkDatastorePath, VirtualDiskType diskType, VirtualDiskMode diskMode, String rdmDeviceName, long sizeInMb,
|
public void createDisk(String vmdkDatastorePath, VirtualDiskType diskType, VirtualDiskMode diskMode, String rdmDeviceName, long sizeInMb,
|
||||||
ManagedObjectReference morDs, int controllerKey, String vSphereStoragePolicyId) throws Exception {
|
ManagedObjectReference morDs, int controllerKey, String vSphereStoragePolicyId) throws Exception {
|
||||||
|
s_logger.trace(String.format("Creating disk in target MOR [%s] with values: vmdkDatastorePath [%s], sizeInMb [%s], diskType [%s], diskMode [%s], rdmDeviceName [%s]"
|
||||||
if (s_logger.isTraceEnabled())
|
+ ", datastore [%s], controllerKey [%s].", _mor.getValue(), vmdkDatastorePath, sizeInMb, diskType, diskMode, rdmDeviceName, morDs.getValue(), controllerKey));
|
||||||
s_logger.trace("vCenter API trace - createDisk(). target MOR: " + _mor.getValue() + ", vmdkDatastorePath: " + vmdkDatastorePath + ", sizeInMb: " + sizeInMb +
|
|
||||||
", diskType: " + diskType + ", diskMode: " + diskMode + ", rdmDeviceName: " + rdmDeviceName + ", datastore: " + morDs.getValue() + ", controllerKey: " +
|
|
||||||
controllerKey);
|
|
||||||
|
|
||||||
assert (vmdkDatastorePath != null);
|
assert (vmdkDatastorePath != null);
|
||||||
assert (morDs != null);
|
assert (morDs != null);
|
||||||
@ -1283,6 +1281,8 @@ public class VirtualMachineMO extends BaseMO {
|
|||||||
|
|
||||||
backingInfo.setDatastore(morDs);
|
backingInfo.setDatastore(morDs);
|
||||||
backingInfo.setFileName(vmdkDatastorePath);
|
backingInfo.setFileName(vmdkDatastorePath);
|
||||||
|
s_logger.trace(String.format("Created backing info with values [%s].", ReflectionToStringBuilderUtils.reflectOnlySelectedFields("diskMode", "thinProvisioned",
|
||||||
|
"eagerlyScrub", "datastore", "filename")));
|
||||||
newDisk.setBacking(backingInfo);
|
newDisk.setBacking(backingInfo);
|
||||||
} else if (diskType == VirtualDiskType.RDM || diskType == VirtualDiskType.RDMP) {
|
} else if (diskType == VirtualDiskType.RDM || diskType == VirtualDiskType.RDMP) {
|
||||||
VirtualDiskRawDiskMappingVer1BackingInfo backingInfo = new VirtualDiskRawDiskMappingVer1BackingInfo();
|
VirtualDiskRawDiskMappingVer1BackingInfo backingInfo = new VirtualDiskRawDiskMappingVer1BackingInfo();
|
||||||
@ -1298,6 +1298,8 @@ public class VirtualMachineMO extends BaseMO {
|
|||||||
|
|
||||||
backingInfo.setDatastore(morDs);
|
backingInfo.setDatastore(morDs);
|
||||||
backingInfo.setFileName(vmdkDatastorePath);
|
backingInfo.setFileName(vmdkDatastorePath);
|
||||||
|
s_logger.trace(String.format("Created backing info with values [%s].", ReflectionToStringBuilderUtils.reflectOnlySelectedFields("compatibilityMode", "deviceName",
|
||||||
|
"diskMode", "datastore", "filename")));
|
||||||
newDisk.setBacking(backingInfo);
|
newDisk.setBacking(backingInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1318,9 +1320,7 @@ public class VirtualMachineMO extends BaseMO {
|
|||||||
PbmProfileManagerMO profMgrMo = new PbmProfileManagerMO(getContext());
|
PbmProfileManagerMO profMgrMo = new PbmProfileManagerMO(getContext());
|
||||||
VirtualMachineDefinedProfileSpec diskProfileSpec = profMgrMo.getProfileSpec(vSphereStoragePolicyId);
|
VirtualMachineDefinedProfileSpec diskProfileSpec = profMgrMo.getProfileSpec(vSphereStoragePolicyId);
|
||||||
deviceConfigSpec.getProfile().add(diskProfileSpec);
|
deviceConfigSpec.getProfile().add(diskProfileSpec);
|
||||||
if (s_logger.isDebugEnabled()) {
|
s_logger.debug(String.format("Adding vSphere storage profile [%s] to volume [%s].", vSphereStoragePolicyId, vmdkDatastorePath));
|
||||||
s_logger.debug(String.format("Adding vSphere storage profile: %s to volume [%s]", vSphereStoragePolicyId, vmdkDatastorePath));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
reConfigSpec.getDeviceChange().add(deviceConfigSpec);
|
reConfigSpec.getDeviceChange().add(deviceConfigSpec);
|
||||||
|
|
||||||
@ -1328,15 +1328,10 @@ public class VirtualMachineMO extends BaseMO {
|
|||||||
boolean result = _context.getVimClient().waitForTask(morTask);
|
boolean result = _context.getVimClient().waitForTask(morTask);
|
||||||
|
|
||||||
if (!result) {
|
if (!result) {
|
||||||
if (s_logger.isTraceEnabled())
|
throw new CloudRuntimeException(String.format("Unable to create disk [%s] due to [%s].", vmdkDatastorePath, TaskMO.getTaskFailureInfo(_context, morTask)));
|
||||||
s_logger.trace("vCenter API trace - createDisk() done(failed)");
|
|
||||||
throw new Exception("Unable to create disk " + vmdkDatastorePath + " due to " + TaskMO.getTaskFailureInfo(_context, morTask));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_context.waitForTaskProgressDone(morTask);
|
_context.waitForTaskProgressDone(morTask);
|
||||||
|
|
||||||
if (s_logger.isTraceEnabled())
|
|
||||||
s_logger.trace("vCenter API trace - createDisk() done(successfully)");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void updateVmdkAdapter(String vmdkFileName, String diskController) throws Exception {
|
public void updateVmdkAdapter(String vmdkFileName, String diskController) throws Exception {
|
||||||
@ -1484,22 +1479,19 @@ public class VirtualMachineMO extends BaseMO {
|
|||||||
// vmdkDatastorePath: [datastore name] vmdkFilePath
|
// vmdkDatastorePath: [datastore name] vmdkFilePath
|
||||||
public List<Pair<String, ManagedObjectReference>> detachDisk(String vmdkDatastorePath, boolean deleteBackingFile) throws Exception {
|
public List<Pair<String, ManagedObjectReference>> detachDisk(String vmdkDatastorePath, boolean deleteBackingFile) throws Exception {
|
||||||
|
|
||||||
if (s_logger.isTraceEnabled())
|
s_logger.trace(String.format("Detaching disk in target MOR [%s], with vmdkDatastorePath [%s] and deleteBacking [%s].", _mor.getValue(), vmdkDatastorePath, deleteBackingFile));
|
||||||
s_logger.trace("vCenter API trace - detachDisk(). target MOR: " + _mor.getValue() + ", vmdkDatastorePath: " + vmdkDatastorePath + ", deleteBacking: " +
|
|
||||||
deleteBackingFile);
|
|
||||||
|
|
||||||
// Note: if VM has been taken snapshot, original backing file will be renamed, therefore, when we try to find the matching
|
// Note: if VM has been taken snapshot, original backing file will be renamed, therefore, when we try to find the matching
|
||||||
// VirtualDisk, we only perform prefix matching
|
// VirtualDisk, we only perform prefix matching
|
||||||
Pair<VirtualDisk, String> deviceInfo = getDiskDevice(vmdkDatastorePath);
|
Pair<VirtualDisk, String> deviceInfo = getDiskDevice(vmdkDatastorePath);
|
||||||
if (deviceInfo == null) {
|
if (deviceInfo == null) {
|
||||||
s_logger.warn("vCenter API trace - detachDisk() done (failed)");
|
throw new CloudRuntimeException(String.format("No such disk device [%s].", vmdkDatastorePath));
|
||||||
throw new Exception("No such disk device: " + vmdkDatastorePath);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// IDE virtual disk cannot be detached if VM is running
|
// IDE virtual disk cannot be detached if VM is running
|
||||||
if (deviceInfo.second() != null && deviceInfo.second().contains("ide")) {
|
if (deviceInfo.second() != null && deviceInfo.second().contains("ide")) {
|
||||||
if (getPowerState() == VirtualMachinePowerState.POWERED_ON) {
|
if (getPowerState() == VirtualMachinePowerState.POWERED_ON) {
|
||||||
throw new Exception("Removing a virtual disk over IDE controller is not supported while VM is running in VMware hypervisor. " +
|
throw new CloudRuntimeException("Removing a virtual disk over IDE controller is not supported while the VM is running in VMware hypervisor. " +
|
||||||
"Please re-try when VM is not running.");
|
"Please re-try when VM is not running.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1521,10 +1513,7 @@ public class VirtualMachineMO extends BaseMO {
|
|||||||
boolean result = _context.getVimClient().waitForTask(morTask);
|
boolean result = _context.getVimClient().waitForTask(morTask);
|
||||||
|
|
||||||
if (!result) {
|
if (!result) {
|
||||||
if (s_logger.isTraceEnabled())
|
throw new CloudRuntimeException(String.format("Failed to detach disk from VM [%s] due to [%s].", getVmName(), TaskMO.getTaskFailureInfo(_context, morTask)));
|
||||||
s_logger.trace("vCenter API trace - detachDisk() done (failed)");
|
|
||||||
|
|
||||||
throw new Exception("Failed to detach disk due to " + TaskMO.getTaskFailureInfo(_context, morTask));
|
|
||||||
}
|
}
|
||||||
_context.waitForTaskProgressDone(morTask);
|
_context.waitForTaskProgressDone(morTask);
|
||||||
|
|
||||||
@ -1533,7 +1522,7 @@ public class VirtualMachineMO extends BaseMO {
|
|||||||
try {
|
try {
|
||||||
snapshotDescriptor = getSnapshotDescriptor();
|
snapshotDescriptor = getSnapshotDescriptor();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
s_logger.info("Unable to retrieve snapshot descriptor, will skip updating snapshot reference");
|
s_logger.warn(String.format("Unable to retrieve snapshot descriptor due to [%s], we will not update the snapshot reference.", e.getMessage()), e);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (snapshotDescriptor != null) {
|
if (snapshotDescriptor != null) {
|
||||||
@ -1549,8 +1538,6 @@ public class VirtualMachineMO extends BaseMO {
|
|||||||
getContext().uploadResourceContent(url, snapshotDescriptor.getVmsdContent());
|
getContext().uploadResourceContent(url, snapshotDescriptor.getVmsdContent());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s_logger.isTraceEnabled())
|
|
||||||
s_logger.trace("vCenter API trace - detachDisk() done (successfully)");
|
|
||||||
return chain;
|
return chain;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2602,12 +2589,13 @@ public class VirtualMachineMO extends BaseMO {
|
|||||||
String trimmedSrcBaseName = VmwareHelper.trimSnapshotDeltaPostfix(srcBaseName);
|
String trimmedSrcBaseName = VmwareHelper.trimSnapshotDeltaPostfix(srcBaseName);
|
||||||
String srcDatastoreName = dsSrcFile.getDatastoreName() != null ? dsSrcFile.getDatastoreName() : zeroLengthString;
|
String srcDatastoreName = dsSrcFile.getDatastoreName() != null ? dsSrcFile.getDatastoreName() : zeroLengthString;
|
||||||
|
|
||||||
s_logger.info("Look for disk device info for volume : " + vmdkDatastorePath + " with base name: " + srcBaseName);
|
s_logger.info(String.format("Looking for disk device info for volume [%s] with base name [%s].", vmdkDatastorePath, srcBaseName));
|
||||||
|
|
||||||
if (devices != null && devices.size() > 0) {
|
if (devices != null && devices.size() > 0) {
|
||||||
for (VirtualDevice device : devices) {
|
for (VirtualDevice device : devices) {
|
||||||
if (device instanceof VirtualDisk) {
|
if (device instanceof VirtualDisk) {
|
||||||
s_logger.info("Test against disk device, controller key: " + device.getControllerKey() + ", unit number: " + device.getUnitNumber());
|
s_logger.info(String.format("Testing if disk device with controller key [%s] and unit number [%s] has backing of type VirtualDiskFlatVer2BackingInfo.",
|
||||||
|
device.getControllerKey(), device.getUnitNumber()));
|
||||||
|
|
||||||
VirtualDeviceBackingInfo backingInfo = device.getBacking();
|
VirtualDeviceBackingInfo backingInfo = device.getBacking();
|
||||||
|
|
||||||
@ -2615,12 +2603,12 @@ public class VirtualMachineMO extends BaseMO {
|
|||||||
VirtualDiskFlatVer2BackingInfo diskBackingInfo = (VirtualDiskFlatVer2BackingInfo)backingInfo;
|
VirtualDiskFlatVer2BackingInfo diskBackingInfo = (VirtualDiskFlatVer2BackingInfo)backingInfo;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
s_logger.info("Test against disk backing : " + diskBackingInfo.getFileName());
|
|
||||||
|
|
||||||
DatastoreFile dsBackingFile = new DatastoreFile(diskBackingInfo.getFileName());
|
DatastoreFile dsBackingFile = new DatastoreFile(diskBackingInfo.getFileName());
|
||||||
|
|
||||||
String backingDatastoreName = dsBackingFile.getDatastoreName() != null ? dsBackingFile.getDatastoreName() : zeroLengthString;
|
String backingDatastoreName = dsBackingFile.getDatastoreName() != null ? dsBackingFile.getDatastoreName() : zeroLengthString;
|
||||||
|
|
||||||
|
s_logger.info(String.format("Testing if backing datastore name [%s] from backing [%s] matches source datastore name [%s].", backingDatastoreName, diskBackingInfo.getFileName(), srcDatastoreName));
|
||||||
|
|
||||||
if (srcDatastoreName.equals(zeroLengthString)) {
|
if (srcDatastoreName.equals(zeroLengthString)) {
|
||||||
backingDatastoreName = zeroLengthString;
|
backingDatastoreName = zeroLengthString;
|
||||||
}
|
}
|
||||||
@ -2631,7 +2619,7 @@ public class VirtualMachineMO extends BaseMO {
|
|||||||
if (backingBaseName.equalsIgnoreCase(srcBaseName)) {
|
if (backingBaseName.equalsIgnoreCase(srcBaseName)) {
|
||||||
String deviceNumbering = getDeviceBusName(devices, device);
|
String deviceNumbering = getDeviceBusName(devices, device);
|
||||||
|
|
||||||
s_logger.info("Disk backing : " + diskBackingInfo.getFileName() + " matches ==> " + deviceNumbering);
|
s_logger.info(String.format("Disk backing [%s] matches device bus name [%s].", diskBackingInfo.getFileName(), deviceNumbering));
|
||||||
return new Pair<>((VirtualDisk)device, deviceNumbering);
|
return new Pair<>((VirtualDisk)device, deviceNumbering);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2650,24 +2638,25 @@ public class VirtualMachineMO extends BaseMO {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// No disk device was found with an exact match for the volume path, hence look for disk device that matches the trimmed name.
|
// No disk device was found with an exact match for the volume path, hence look for disk device that matches the trimmed name.
|
||||||
s_logger.info("No disk device with an exact match found for volume : " + vmdkDatastorePath + ". Look for disk device info against trimmed base name: " + srcBaseName);
|
s_logger.info(String.format("No disk device exactly matching [%s] was found for volume [%s]. Looking for disk device info against trimmed base name [%s].", srcBaseName,
|
||||||
|
vmdkDatastorePath, srcBaseName));
|
||||||
|
|
||||||
if (partialMatchingDiskDevices != null) {
|
if (partialMatchingDiskDevices != null) {
|
||||||
if (partialMatchingDiskDevices.size() == 1) {
|
if (partialMatchingDiskDevices.size() == 1) {
|
||||||
VirtualDiskFlatVer2BackingInfo matchingDiskBackingInfo = (VirtualDiskFlatVer2BackingInfo)partialMatchingDiskDevices.get(0).first().getBacking();
|
VirtualDiskFlatVer2BackingInfo matchingDiskBackingInfo = (VirtualDiskFlatVer2BackingInfo)partialMatchingDiskDevices.get(0).first().getBacking();
|
||||||
|
|
||||||
s_logger.info("Disk backing : " + matchingDiskBackingInfo.getFileName() + " matches ==> " + partialMatchingDiskDevices.get(0).second());
|
s_logger.info(String.format("Disk backing [%s] matches [%s].", matchingDiskBackingInfo.getFileName(), partialMatchingDiskDevices.get(0).second()));
|
||||||
|
|
||||||
return partialMatchingDiskDevices.get(0);
|
return partialMatchingDiskDevices.get(0);
|
||||||
} else if (partialMatchingDiskDevices.size() > 1) {
|
} else if (partialMatchingDiskDevices.size() > 1) {
|
||||||
s_logger.warn("Disk device info lookup for volume: " + vmdkDatastorePath + " failed as multiple disk devices were found to match" +
|
s_logger.warn(String.format("Disk device info lookup for volume [%s] failed as multiple disk devices were found to match volume's trimmed base name [%s].",
|
||||||
" volume's trimmed base name: " + trimmedSrcBaseName);
|
vmdkDatastorePath, trimmedSrcBaseName));
|
||||||
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s_logger.warn("Disk device info lookup for volume: " + vmdkDatastorePath + " failed as no matching disk device found");
|
s_logger.warn(String.format("Disk device info lookup for volume [%s] failed as no matching disk device was found.", vmdkDatastorePath));
|
||||||
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user