Added new logs to volume creation (#6689)

Co-authored-by: João Paraquetti <joao@scclouds.com.br>
This commit is contained in:
João Jandre 2022-09-26 19:11:14 -03:00 committed by GitHub
parent 3889e46eb6
commit efbf74ee06
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 132 additions and 130 deletions

View File

@ -359,10 +359,21 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
s_logger.trace(String.format("we have a preferred pool: %b", storagePool.isPresent()));
}
return (storagePool.isPresent()) ? (StoragePool) this.dataStoreMgr.getDataStore(storagePool.get().getId(), DataStoreRole.Primary) :
(StoragePool)dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary);
StoragePool storage;
if (storagePool.isPresent()) {
storage = (StoragePool)this.dataStoreMgr.getDataStore(storagePool.get().getId(), DataStoreRole.Primary);
s_logger.debug(String.format("VM [%s] has a preferred storage pool [%s]. Volume Orchestrator found this storage using Storage Pool Allocator [%s] and will"
+ " use it.", vm, storage, allocator));
} else {
storage = (StoragePool)dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary);
s_logger.debug(String.format("VM [%s] does not have a preferred storage pool or it cannot be used. Volume Orchestrator will use the available Storage Pool"
+ " [%s], which was discovered using Storage Pool Allocator [%s].", vm, storage, allocator));
}
return storage;
}
s_logger.debug(String.format("Could not find any available Storage Pool using Storage Pool Allocator [%s].", allocator));
}
s_logger.info("Volume Orchestrator could not find any available Storage Pool.");
return null;
}

View File

@ -28,11 +28,14 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.exception.StorageUnavailableException;
import com.cloud.storage.ScopeType;
import com.cloud.storage.StoragePoolStatus;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.commons.lang3.StringUtils;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Logger;
import com.cloud.utils.Pair;
@ -111,37 +114,41 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
return reorderPools(pools, vmProfile, plan, dskCh);
}
protected List<StoragePool> reorderPoolsByCapacity(DeploymentPlan plan,
List<StoragePool> pools) {
protected List<StoragePool> reorderPoolsByCapacity(DeploymentPlan plan, List<StoragePool> pools) {
Long zoneId = plan.getDataCenterId();
Long clusterId = plan.getClusterId();
short capacityType;
if(pools != null && pools.size() != 0){
capacityType = pools.get(0).getPoolType().isShared() ? Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED : Capacity.CAPACITY_TYPE_LOCAL_STORAGE;
} else{
if (CollectionUtils.isEmpty(pools)) {
return null;
}
List<Long> poolIdsByCapacity = capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType);
if (s_logger.isDebugEnabled()) {
s_logger.debug("List of pools in descending order of free capacity: "+ poolIdsByCapacity);
if (pools.get(0).getPoolType().isShared()) {
capacityType = Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED;
} else {
capacityType = Capacity.CAPACITY_TYPE_LOCAL_STORAGE;
}
List<Long> poolIdsByCapacity = capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType);
s_logger.debug(String.format("List of pools in descending order of available capacity [%s].", poolIdsByCapacity));
//now filter the given list of Pools by this ordered list
Map<Long, StoragePool> poolMap = new HashMap<>();
for (StoragePool pool : pools) {
poolMap.put(pool.getId(), pool);
}
List<Long> matchingPoolIds = new ArrayList<>(poolMap.keySet());
Map<Long, StoragePool> poolMap = new HashMap<>();
for (StoragePool pool : pools) {
poolMap.put(pool.getId(), pool);
}
List<Long> matchingPoolIds = new ArrayList<>(poolMap.keySet());
poolIdsByCapacity.retainAll(matchingPoolIds);
poolIdsByCapacity.retainAll(matchingPoolIds);
List<StoragePool> reorderedPools = new ArrayList<>();
for(Long id: poolIdsByCapacity){
reorderedPools.add(poolMap.get(id));
}
List<StoragePool> reorderedPools = new ArrayList<>();
for (Long id: poolIdsByCapacity) {
reorderedPools.add(poolMap.get(id));
}
return reorderedPools;
return reorderedPools;
}
protected List<StoragePool> reorderPoolsByNumberOfVolumes(DeploymentPlan plan, List<StoragePool> pools, Account account) {
@ -153,9 +160,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
Long clusterId = plan.getClusterId();
List<Long> poolIdsByVolCount = volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("List of pools in ascending order of number of volumes for account id: " + account.getAccountId() + " is: " + poolIdsByVolCount);
}
s_logger.debug(String.format("List of pools in ascending order of number of volumes for account [%s] is [%s].", account, poolIdsByVolCount));
// now filter the given list of Pools by this ordered list
Map<Long, StoragePool> poolMap = new HashMap<>();
@ -180,6 +185,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
s_logger.trace("reordering pools");
}
if (pools == null) {
s_logger.trace("There are no pools to reorder; returning null.");
return null;
}
if (s_logger.isTraceEnabled()) {
@ -363,4 +369,18 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
}
return true;
}
protected void logDisabledStoragePools(long dcId, Long podId, Long clusterId, ScopeType scope) {
List<StoragePoolVO> disabledPools = storagePoolDao.findDisabledPoolsByScope(dcId, podId, clusterId, scope);
if (disabledPools != null && !disabledPools.isEmpty()) {
s_logger.trace(String.format("Ignoring pools [%s] as they are in disabled state.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(disabledPools)));
}
}
protected void logStartOfSearch(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, int returnUpTo,
boolean bypassStorageTypeCheck){
s_logger.trace(String.format("%s is looking for storage pools that match the VM's disk profile [%s], virtual machine profile [%s] and "
+ "deployment plan [%s]. Returning up to [%d] and bypassStorageTypeCheck [%s].", this.getClass().getSimpleName(), dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck));
}
}

View File

@ -46,7 +46,7 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
@Override
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
s_logger.debug("ClusterScopeStoragePoolAllocator looking for storage pool");
logStartOfSearch(dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck);
if (!bypassStorageTypeCheck && dskCh.useLocalStorage()) {
return null;
@ -63,40 +63,34 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
// clusterId == null here because it will break ClusterWide primary
// storage volume operation where
// only podId is passed into this call.
s_logger.debug("ClusterScopeStoragePoolAllocator is returning null since the pod ID is null. This may be a zone wide storage.");
return null;
}
if (dskCh.getTags() != null && dskCh.getTags().length != 0) {
s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId + " having tags:" + Arrays.toString(dskCh.getTags()) +
". Disabled pools will be ignored.");
s_logger.debug(String.format("Looking for pools in dc [%s], pod [%s], cluster [%s], and having tags [%s]. Disabled pools will be ignored.", dcId, podId, clusterId,
Arrays.toString(dskCh.getTags())));
} else {
s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId + ". Disabled pools will be ignored.");
s_logger.debug(String.format("Looking for pools in dc [%s], pod [%s] and cluster [%s]. Disabled pools will be ignored.", dcId, podId, clusterId));
}
if (s_logger.isTraceEnabled()) {
// Log the pools details that are ignored because they are in disabled state
List<StoragePoolVO> disabledPools = storagePoolDao.findDisabledPoolsByScope(dcId, podId, clusterId, ScopeType.CLUSTER);
if (disabledPools != null && !disabledPools.isEmpty()) {
for (StoragePoolVO pool : disabledPools) {
s_logger.trace("Ignoring pool " + pool + " as it is in disabled state.");
}
}
logDisabledStoragePools(dcId, podId, clusterId, ScopeType.CLUSTER);
}
List<StoragePoolVO> pools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags());
s_logger.debug("Found pools matching tags: " + pools);
s_logger.debug(String.format("Found pools [%s] that match with tags [%s].", pools, Arrays.toString(dskCh.getTags())));
// add remaining pools in cluster, that did not match tags, to avoid set
List<StoragePoolVO> allPools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, null);
allPools.removeAll(pools);
for (StoragePoolVO pool : allPools) {
s_logger.debug("Adding pool " + pool + " to avoid set since it did not match tags");
s_logger.trace(String.format("Adding pool [%s] to the 'avoid' set since it did not match any tags.", pool));
avoid.addPool(pool.getId());
}
if (pools.size() == 0) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("No storage pools available for " + ServiceOffering.StorageType.shared.toString() + " volume allocation, returning");
}
s_logger.debug(String.format("No storage pools available for [%s] volume allocation.", ServiceOffering.StorageType.shared));
return suitablePools;
}
@ -106,15 +100,14 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
}
StoragePool storagePool = (StoragePool)dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, storagePool, dskCh, plan)) {
s_logger.trace(String.format("Found suitable local storage pool [%s], adding to list.", pool));
suitablePools.add(storagePool);
} else {
avoid.addPool(pool.getId());
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("ClusterScopeStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools");
}
s_logger.debug(String.format("ClusterScopeStoragePoolAllocator is returning [%s] suitable storage pools [%s].", suitablePools.size(), suitablePools));
return suitablePools;
}

View File

@ -48,7 +48,7 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl
@Override
public List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
s_logger.debug("GarbageCollectingStoragePoolAllocator looking for storage pool");
logStartOfSearch(dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck);
if (!_storagePoolCleanupEnabled) {
s_logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped.");
return null;

View File

@ -61,20 +61,16 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
@Override
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm");
logStartOfSearch(dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck);
if (!bypassStorageTypeCheck && !dskCh.useLocalStorage()) {
s_logger.debug("LocalStoragePoolAllocator is returning null since the disk profile does not use local storage and bypassStorageTypeCheck is false.");
return null;
}
if (s_logger.isTraceEnabled()) {
// Log the pools details that are ignored because they are in disabled state
List<StoragePoolVO> disabledPools = storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), ScopeType.HOST);
if (disabledPools != null && !disabledPools.isEmpty()) {
for (StoragePoolVO pool : disabledPools) {
s_logger.trace("Ignoring pool " + pool + " as it is in disabled state.");
}
}
logDisabledStoragePools(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), ScopeType.HOST);
}
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
@ -86,7 +82,7 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
if (pool != null && pool.isLocal()) {
StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, storagePool, dskCh, plan)) {
s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
s_logger.trace(String.format("Found suitable local storage pool [%s], adding to list.", pool));
suitablePools.add(storagePool);
} else {
avoid.addPool(pool.getId());
@ -100,6 +96,7 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
} else {
if (plan.getPodId() == null) {
// zone wide primary storage deployment
s_logger.debug("LocalStoragePoolAllocator is returning null since both the host ID and pod ID are null. That means this should be a zone wide primary storage deployment.");
return null;
}
List<StoragePoolVO> availablePools =
@ -116,18 +113,14 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
}
}
// add remaining pools in cluster, that did not match tags, to avoid
// set
// add remaining pools in cluster to the 'avoid' set which did not match tags
List<StoragePoolVO> allPools = storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), null);
allPools.removeAll(availablePools);
for (StoragePoolVO pool : allPools) {
avoid.addPool(pool.getId());
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("LocalStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools");
}
s_logger.debug(String.format("LocalStoragePoolAllocator returning [%s] suitable storage pools [%s].", suitablePools.size(), suitablePools));
return suitablePools;
}

View File

@ -17,6 +17,7 @@
package org.apache.cloudstack.storage.allocator;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -50,7 +51,7 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
@Override
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
LOGGER.debug("ZoneWideStoragePoolAllocator to find storage pool");
logStartOfSearch(dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck);
if (!bypassStorageTypeCheck && dskCh.useLocalStorage()) {
return null;
@ -58,18 +59,14 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
if (LOGGER.isTraceEnabled()) {
// Log the pools details that are ignored because they are in disabled state
List<StoragePoolVO> disabledPools = storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(), null, null, ScopeType.ZONE);
if (disabledPools != null && !disabledPools.isEmpty()) {
for (StoragePoolVO pool : disabledPools) {
LOGGER.trace("Ignoring pool " + pool + " as it is in disabled state.");
}
}
logDisabledStoragePools(plan.getDataCenterId(), null, null, ScopeType.ZONE);
}
List<StoragePool> suitablePools = new ArrayList<>();
List<StoragePoolVO> storagePools = storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags());
if (storagePools == null) {
LOGGER.debug(String.format("Could not find any zone wide storage pool that matched with any of the following tags [%s].", Arrays.toString(dskCh.getTags())));
storagePools = new ArrayList<>();
}
@ -97,6 +94,7 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
}
StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId());
if (filter(avoid, storagePool, dskCh, plan)) {
LOGGER.trace(String.format("Found suitable local storage pool [%s], adding to list.", storage));
suitablePools.add(storagePool);
} else {
if (canAddStoragePoolToAvoidSet(storage)) {
@ -104,6 +102,8 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
}
}
}
LOGGER.debug(String.format("ZoneWideStoragePoolAllocator is returning [%s] suitable storage pools [%s].", suitablePools.size(), suitablePools));
return suitablePools;
}

View File

@ -2421,7 +2421,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, primaryStore.getUuid());
if (morDatastore == null) {
throw new Exception("Unable to find datastore in vSphere");
throw new CloudRuntimeException(String.format("Unable to find datastore [%s] in vSphere.", primaryStore.getUuid()));
}
DatastoreMO dsMo = new DatastoreMO(context, morDatastore);
@ -2439,13 +2439,13 @@ public class VmwareStorageProcessor implements StorageProcessor {
newVol.setPath(file.getFileBaseName());
newVol.setSize(volume.getSize());
} catch (Exception e) {
s_logger.debug("Create disk using vStorageObject manager failed due to exception " + e.getMessage() + ", retying using worker VM");
s_logger.error(String.format("Create disk using vStorageObject manager failed due to [%s], retrying using worker VM.", e.getMessage()), e);
String dummyVmName = hostService.getWorkerName(context, cmd, 0, dsMo);
try {
s_logger.info("Create worker VM " + dummyVmName);
s_logger.info(String.format("Creating worker VM [%s].", dummyVmName));
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, dummyVmName, null);
if (vmMo == null) {
throw new Exception("Unable to create a dummy VM for volume creation");
throw new CloudRuntimeException("Unable to create a dummy VM for volume creation.");
}
synchronized (this) {
@ -2454,9 +2454,9 @@ public class VmwareStorageProcessor implements StorageProcessor {
vmMo.detachDisk(volumeDatastorePath, false);
}
catch (Exception e1) {
s_logger.error("Deleting file " + volumeDatastorePath + " due to error: " + e1.getMessage());
s_logger.error(String.format("Deleting file [%s] due to [%s].", volumeDatastorePath, e1.getMessage()), e1);
VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, volumeUuid, dcMo, VmwareManager.s_vmwareSearchExcludeFolder.value());
throw new CloudRuntimeException("Unable to create volume due to: " + e1.getMessage());
throw new CloudRuntimeException(String.format("Unable to create volume due to [%s].", e1.getMessage()));
}
}
@ -2465,7 +2465,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
newVol.setSize(volume.getSize());
return new CreateObjectAnswer(newVol);
} finally {
s_logger.info("Destroy dummy VM after volume creation");
s_logger.info("Destroying dummy VM after volume creation.");
if (vmMo != null) {
vmMo.detachAllDisksAndDestroy();
}

View File

@ -36,6 +36,7 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator {
@Override
public List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
logStartOfSearch(dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck);
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
@ -44,22 +45,22 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator {
Long clusterId = plan.getClusterId();
if (podId == null) {
s_logger.debug("RandomStoragePoolAllocator is returning null since the pod ID is null. This may be a zone wide storage.");
return null;
}
s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId);
s_logger.debug(String.format("Looking for pools in dc [%s], pod [%s] and cluster [%s].", dcId, podId, clusterId));
List<StoragePoolVO> pools = storagePoolDao.listBy(dcId, podId, clusterId, ScopeType.CLUSTER);
if (pools.size() == 0) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("No storage pools available for allocation, returning");
}
s_logger.debug(String.format("RandomStoragePoolAllocator found no storage pools available for allocation in dc [%s], pod [%s] and cluster [%s]. Returning an empty list.",
dcId, podId, clusterId));
return suitablePools;
}
Collections.shuffle(pools);
if (s_logger.isDebugEnabled()) {
s_logger.debug("RandomStoragePoolAllocator has " + pools.size() + " pools to check for allocation");
}
s_logger.debug(String.format("RandomStoragePoolAllocator has [%s] pools to check for allocation [%s].", pools.size(), pools));
for (StoragePoolVO pool : pools) {
if (suitablePools.size() == returnUpTo) {
break;
@ -67,13 +68,12 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator {
StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, pol, dskCh, plan)) {
s_logger.trace(String.format("Found suitable local storage pool [%s], adding to list.", pool));
suitablePools.add(pol);
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("RandomStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools");
}
s_logger.debug(String.format("RandomStoragePoolAllocator is returning [%s] suitable storage pools [%s].", suitablePools.size(), suitablePools));
return suitablePools;
}

View File

@ -478,9 +478,7 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost {
@Override
public ManagedObjectReference findDatastore(String poolUuid) throws Exception {
if (s_logger.isTraceEnabled())
s_logger.trace("vCenter API trace - findDatastore(). target MOR: " + _mor.getValue() + ", poolUuid: " + poolUuid);
s_logger.trace(String.format("Searching datastore in target MOR [%s] with poolUuid [%s].", _mor.getValue(), poolUuid));
CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(_context, _context.getServiceContent().getCustomFieldsManager());
int key = cfmMo.getCustomFieldKey("Datastore", CustomFieldConstants.CLOUD_UUID);
@ -489,18 +487,17 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost {
ObjectContent[] ocs = getDatastorePropertiesOnHyperHost(new String[] {"name", String.format("value[%d]", key)});
if (ocs != null) {
for (ObjectContent oc : ocs) {
if (oc.getPropSet().get(0).getVal().equals(poolUuid))
if (oc.getPropSet().get(0).getVal().equals(poolUuid)) {
s_logger.trace(String.format("Found datastore [%s] in target MOR [%s].", oc.getObj(), _mor.getValue()));
return oc.getObj();
}
if (oc.getPropSet().size() > 1) {
DynamicProperty prop = oc.getPropSet().get(1);
if (prop != null && prop.getVal() != null) {
if (prop.getVal() instanceof CustomFieldStringValue) {
String val = ((CustomFieldStringValue)prop.getVal()).getValue();
if (val.equalsIgnoreCase(poolUuid)) {
if (s_logger.isTraceEnabled())
s_logger.trace("vCenter API trace - findDatastore() done(successfully)");
s_logger.trace(String.format("Found datastore [%s] in target MOR [%s].", oc.getObj(), _mor.getValue()));
return oc.getObj();
}
}
@ -509,8 +506,7 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost {
}
}
if (s_logger.isTraceEnabled())
s_logger.trace("vCenter API trace - findDatastore() done(failed)");
s_logger.trace(String.format("Failed to find a datastore with UUID [%s].", poolUuid));
return null;
}

View File

@ -2303,7 +2303,7 @@ public class HypervisorHostHelper {
String hiddenFolderPath = String.format("%s/%s", folderPath, VSPHERE_DATASTORE_HIDDEN_FOLDER);
if (!dsMo.folderExists(dsPath, VSPHERE_DATASTORE_BASE_FOLDER)) {
s_logger.info(String.format("vSphere datastore base folder: %s does not exist, now creating on datastore: %s", VSPHERE_DATASTORE_BASE_FOLDER, dsMo.getName()));
s_logger.info(String.format("vSphere datastore base folder [%s] does not exist on datastore [%s]. We will create it.", VSPHERE_DATASTORE_BASE_FOLDER, dsMo.getName()));
dsMo.makeDirectory(folderPath, mor);
// Adding another directory so vCentre doesn't remove the fcd directory when it's empty
dsMo.makeDirectory(hiddenFolderPath, mor);

View File

@ -43,6 +43,7 @@ import com.vmware.vim25.RuntimeFaultFaultMsg;
import com.vmware.vim25.TaskInfo;
import com.vmware.vim25.TaskInfoState;
import com.vmware.vim25.VirtualMachineTicket;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
@ -675,7 +676,7 @@ public class VirtualMachineMO extends BaseMO {
byte[] content = getContext().getResourceContent(url);
if (content == null || content.length < 1) {
s_logger.warn("Snapshot descriptor file (vsd) does not exist anymore?");
s_logger.warn("Snapshot descriptor file (vsd) was not found.");
}
SnapshotDescriptor descriptor = new SnapshotDescriptor();
@ -1250,11 +1251,8 @@ public class VirtualMachineMO extends BaseMO {
// vmdkDatastorePath: [datastore name] vmdkFilePath
public void createDisk(String vmdkDatastorePath, VirtualDiskType diskType, VirtualDiskMode diskMode, String rdmDeviceName, long sizeInMb,
ManagedObjectReference morDs, int controllerKey, String vSphereStoragePolicyId) throws Exception {
if (s_logger.isTraceEnabled())
s_logger.trace("vCenter API trace - createDisk(). target MOR: " + _mor.getValue() + ", vmdkDatastorePath: " + vmdkDatastorePath + ", sizeInMb: " + sizeInMb +
", diskType: " + diskType + ", diskMode: " + diskMode + ", rdmDeviceName: " + rdmDeviceName + ", datastore: " + morDs.getValue() + ", controllerKey: " +
controllerKey);
s_logger.trace(String.format("Creating disk in target MOR [%s] with values: vmdkDatastorePath [%s], sizeInMb [%s], diskType [%s], diskMode [%s], rdmDeviceName [%s]"
+ ", datastore [%s], controllerKey [%s].", _mor.getValue(), vmdkDatastorePath, sizeInMb, diskType, diskMode, rdmDeviceName, morDs.getValue(), controllerKey));
assert (vmdkDatastorePath != null);
assert (morDs != null);
@ -1283,6 +1281,8 @@ public class VirtualMachineMO extends BaseMO {
backingInfo.setDatastore(morDs);
backingInfo.setFileName(vmdkDatastorePath);
s_logger.trace(String.format("Created backing info with values [%s].", ReflectionToStringBuilderUtils.reflectOnlySelectedFields("diskMode", "thinProvisioned",
"eagerlyScrub", "datastore", "filename")));
newDisk.setBacking(backingInfo);
} else if (diskType == VirtualDiskType.RDM || diskType == VirtualDiskType.RDMP) {
VirtualDiskRawDiskMappingVer1BackingInfo backingInfo = new VirtualDiskRawDiskMappingVer1BackingInfo();
@ -1298,6 +1298,8 @@ public class VirtualMachineMO extends BaseMO {
backingInfo.setDatastore(morDs);
backingInfo.setFileName(vmdkDatastorePath);
s_logger.trace(String.format("Created backing info with values [%s].", ReflectionToStringBuilderUtils.reflectOnlySelectedFields("compatibilityMode", "deviceName",
"diskMode", "datastore", "filename")));
newDisk.setBacking(backingInfo);
}
@ -1318,9 +1320,7 @@ public class VirtualMachineMO extends BaseMO {
PbmProfileManagerMO profMgrMo = new PbmProfileManagerMO(getContext());
VirtualMachineDefinedProfileSpec diskProfileSpec = profMgrMo.getProfileSpec(vSphereStoragePolicyId);
deviceConfigSpec.getProfile().add(diskProfileSpec);
if (s_logger.isDebugEnabled()) {
s_logger.debug(String.format("Adding vSphere storage profile: %s to volume [%s]", vSphereStoragePolicyId, vmdkDatastorePath));
}
s_logger.debug(String.format("Adding vSphere storage profile [%s] to volume [%s].", vSphereStoragePolicyId, vmdkDatastorePath));
}
reConfigSpec.getDeviceChange().add(deviceConfigSpec);
@ -1328,15 +1328,10 @@ public class VirtualMachineMO extends BaseMO {
boolean result = _context.getVimClient().waitForTask(morTask);
if (!result) {
if (s_logger.isTraceEnabled())
s_logger.trace("vCenter API trace - createDisk() done(failed)");
throw new Exception("Unable to create disk " + vmdkDatastorePath + " due to " + TaskMO.getTaskFailureInfo(_context, morTask));
throw new CloudRuntimeException(String.format("Unable to create disk [%s] due to [%s].", vmdkDatastorePath, TaskMO.getTaskFailureInfo(_context, morTask)));
}
_context.waitForTaskProgressDone(morTask);
if (s_logger.isTraceEnabled())
s_logger.trace("vCenter API trace - createDisk() done(successfully)");
}
public void updateVmdkAdapter(String vmdkFileName, String diskController) throws Exception {
@ -1484,22 +1479,19 @@ public class VirtualMachineMO extends BaseMO {
// vmdkDatastorePath: [datastore name] vmdkFilePath
public List<Pair<String, ManagedObjectReference>> detachDisk(String vmdkDatastorePath, boolean deleteBackingFile) throws Exception {
if (s_logger.isTraceEnabled())
s_logger.trace("vCenter API trace - detachDisk(). target MOR: " + _mor.getValue() + ", vmdkDatastorePath: " + vmdkDatastorePath + ", deleteBacking: " +
deleteBackingFile);
s_logger.trace(String.format("Detaching disk in target MOR [%s], with vmdkDatastorePath [%s] and deleteBacking [%s].", _mor.getValue(), vmdkDatastorePath, deleteBackingFile));
// Note: if VM has been taken snapshot, original backing file will be renamed, therefore, when we try to find the matching
// VirtualDisk, we only perform prefix matching
Pair<VirtualDisk, String> deviceInfo = getDiskDevice(vmdkDatastorePath);
if (deviceInfo == null) {
s_logger.warn("vCenter API trace - detachDisk() done (failed)");
throw new Exception("No such disk device: " + vmdkDatastorePath);
throw new CloudRuntimeException(String.format("No such disk device [%s].", vmdkDatastorePath));
}
// IDE virtual disk cannot be detached if VM is running
if (deviceInfo.second() != null && deviceInfo.second().contains("ide")) {
if (getPowerState() == VirtualMachinePowerState.POWERED_ON) {
throw new Exception("Removing a virtual disk over IDE controller is not supported while VM is running in VMware hypervisor. " +
throw new CloudRuntimeException("Removing a virtual disk over IDE controller is not supported while the VM is running in VMware hypervisor. " +
"Please re-try when VM is not running.");
}
}
@ -1521,10 +1513,7 @@ public class VirtualMachineMO extends BaseMO {
boolean result = _context.getVimClient().waitForTask(morTask);
if (!result) {
if (s_logger.isTraceEnabled())
s_logger.trace("vCenter API trace - detachDisk() done (failed)");
throw new Exception("Failed to detach disk due to " + TaskMO.getTaskFailureInfo(_context, morTask));
throw new CloudRuntimeException(String.format("Failed to detach disk from VM [%s] due to [%s].", getVmName(), TaskMO.getTaskFailureInfo(_context, morTask)));
}
_context.waitForTaskProgressDone(morTask);
@ -1533,7 +1522,7 @@ public class VirtualMachineMO extends BaseMO {
try {
snapshotDescriptor = getSnapshotDescriptor();
} catch (Exception e) {
s_logger.info("Unable to retrieve snapshot descriptor, will skip updating snapshot reference");
s_logger.warn(String.format("Unable to retrieve snapshot descriptor due to [%s], we will not update the snapshot reference.", e.getMessage()), e);
}
if (snapshotDescriptor != null) {
@ -1549,8 +1538,6 @@ public class VirtualMachineMO extends BaseMO {
getContext().uploadResourceContent(url, snapshotDescriptor.getVmsdContent());
}
if (s_logger.isTraceEnabled())
s_logger.trace("vCenter API trace - detachDisk() done (successfully)");
return chain;
}
@ -2602,12 +2589,13 @@ public class VirtualMachineMO extends BaseMO {
String trimmedSrcBaseName = VmwareHelper.trimSnapshotDeltaPostfix(srcBaseName);
String srcDatastoreName = dsSrcFile.getDatastoreName() != null ? dsSrcFile.getDatastoreName() : zeroLengthString;
s_logger.info("Look for disk device info for volume : " + vmdkDatastorePath + " with base name: " + srcBaseName);
s_logger.info(String.format("Looking for disk device info for volume [%s] with base name [%s].", vmdkDatastorePath, srcBaseName));
if (devices != null && devices.size() > 0) {
for (VirtualDevice device : devices) {
if (device instanceof VirtualDisk) {
s_logger.info("Test against disk device, controller key: " + device.getControllerKey() + ", unit number: " + device.getUnitNumber());
s_logger.info(String.format("Testing if disk device with controller key [%s] and unit number [%s] has backing of type VirtualDiskFlatVer2BackingInfo.",
device.getControllerKey(), device.getUnitNumber()));
VirtualDeviceBackingInfo backingInfo = device.getBacking();
@ -2615,12 +2603,12 @@ public class VirtualMachineMO extends BaseMO {
VirtualDiskFlatVer2BackingInfo diskBackingInfo = (VirtualDiskFlatVer2BackingInfo)backingInfo;
do {
s_logger.info("Test against disk backing : " + diskBackingInfo.getFileName());
DatastoreFile dsBackingFile = new DatastoreFile(diskBackingInfo.getFileName());
String backingDatastoreName = dsBackingFile.getDatastoreName() != null ? dsBackingFile.getDatastoreName() : zeroLengthString;
s_logger.info(String.format("Testing if backing datastore name [%s] from backing [%s] matches source datastore name [%s].", backingDatastoreName, diskBackingInfo.getFileName(), srcDatastoreName));
if (srcDatastoreName.equals(zeroLengthString)) {
backingDatastoreName = zeroLengthString;
}
@ -2631,7 +2619,7 @@ public class VirtualMachineMO extends BaseMO {
if (backingBaseName.equalsIgnoreCase(srcBaseName)) {
String deviceNumbering = getDeviceBusName(devices, device);
s_logger.info("Disk backing : " + diskBackingInfo.getFileName() + " matches ==> " + deviceNumbering);
s_logger.info(String.format("Disk backing [%s] matches device bus name [%s].", diskBackingInfo.getFileName(), deviceNumbering));
return new Pair<>((VirtualDisk)device, deviceNumbering);
}
@ -2650,24 +2638,25 @@ public class VirtualMachineMO extends BaseMO {
}
// No disk device was found with an exact match for the volume path, hence look for disk device that matches the trimmed name.
s_logger.info("No disk device with an exact match found for volume : " + vmdkDatastorePath + ". Look for disk device info against trimmed base name: " + srcBaseName);
s_logger.info(String.format("No disk device exactly matching [%s] was found for volume [%s]. Looking for disk device info against trimmed base name [%s].", srcBaseName,
vmdkDatastorePath, srcBaseName));
if (partialMatchingDiskDevices != null) {
if (partialMatchingDiskDevices.size() == 1) {
VirtualDiskFlatVer2BackingInfo matchingDiskBackingInfo = (VirtualDiskFlatVer2BackingInfo)partialMatchingDiskDevices.get(0).first().getBacking();
s_logger.info("Disk backing : " + matchingDiskBackingInfo.getFileName() + " matches ==> " + partialMatchingDiskDevices.get(0).second());
s_logger.info(String.format("Disk backing [%s] matches [%s].", matchingDiskBackingInfo.getFileName(), partialMatchingDiskDevices.get(0).second()));
return partialMatchingDiskDevices.get(0);
} else if (partialMatchingDiskDevices.size() > 1) {
s_logger.warn("Disk device info lookup for volume: " + vmdkDatastorePath + " failed as multiple disk devices were found to match" +
" volume's trimmed base name: " + trimmedSrcBaseName);
s_logger.warn(String.format("Disk device info lookup for volume [%s] failed as multiple disk devices were found to match volume's trimmed base name [%s].",
vmdkDatastorePath, trimmedSrcBaseName));
return null;
}
}
s_logger.warn("Disk device info lookup for volume: " + vmdkDatastorePath + " failed as no matching disk device found");
s_logger.warn(String.format("Disk device info lookup for volume [%s] failed as no matching disk device was found.", vmdkDatastorePath));
return null;
}