Merge release branch 4.13 to master

* 4.13:
  Snapshot deletion issues (#3969)
  server: Cannot list affinity group if there are hosts dedicated… (#4025)
  server: Search zone-wide storage pool when allocation algothrim is firstfitleastconsumed (#4002)
This commit is contained in:
Daan Hoogland 2020-04-11 16:45:00 +02:00
commit b984184b7a
11 changed files with 228 additions and 94 deletions

View File

@ -56,5 +56,5 @@ public interface CapacityDao extends GenericDao<CapacityVO, Long> {
float findClusterConsumption(Long clusterId, short capacityType, long computeRequested); float findClusterConsumption(Long clusterId, short capacityType, long computeRequested);
List<Long> orderHostsByFreeCapacity(Long clusterId, short capacityType); List<Long> orderHostsByFreeCapacity(Long zoneId, Long clusterId, short capacityType);
} }

View File

@ -903,20 +903,28 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
} }
@Override @Override
public List<Long> orderHostsByFreeCapacity(Long clusterId, short capacityTypeForOrdering){ public List<Long> orderHostsByFreeCapacity(Long zoneId, Long clusterId, short capacityTypeForOrdering){
TransactionLegacy txn = TransactionLegacy.currentTxn(); TransactionLegacy txn = TransactionLegacy.currentTxn();
PreparedStatement pstmt = null; PreparedStatement pstmt = null;
List<Long> result = new ArrayList<Long>(); List<Long> result = new ArrayList<Long>();
StringBuilder sql = new StringBuilder(ORDER_HOSTS_BY_FREE_CAPACITY_PART1); StringBuilder sql = new StringBuilder(ORDER_HOSTS_BY_FREE_CAPACITY_PART1);
if(clusterId != null) { if (zoneId != null) {
sql.append("AND cluster_id = ?"); sql.append(" AND data_center_id = ?");
} }
sql.append(ORDER_HOSTS_BY_FREE_CAPACITY_PART2); if (clusterId != null) {
sql.append(" AND cluster_id = ?");
}
sql.append(ORDER_HOSTS_BY_FREE_CAPACITY_PART2);
try { try {
pstmt = txn.prepareAutoCloseStatement(sql.toString()); pstmt = txn.prepareAutoCloseStatement(sql.toString());
pstmt.setShort(1, capacityTypeForOrdering); pstmt.setShort(1, capacityTypeForOrdering);
if(clusterId != null) { int index = 2;
pstmt.setLong(2, clusterId); if (zoneId != null) {
pstmt.setLong(index, zoneId);
index ++;
}
if (clusterId != null) {
pstmt.setLong(index, clusterId);
} }
ResultSet rs = pstmt.executeQuery(); ResultSet rs = pstmt.executeQuery();

View File

@ -49,7 +49,7 @@
<bean id="dataStoreProviderManagerImpl" class="org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManagerImpl" /> <bean id="dataStoreProviderManagerImpl" class="org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManagerImpl" />
<bean id="storageCacheManagerImpl" class="org.apache.cloudstack.storage.cache.manager.StorageCacheManagerImpl" /> <bean id="storageCacheManagerImpl" class="org.apache.cloudstack.storage.cache.manager.StorageCacheManagerImpl" />
<bean id="storageCacheRandomAllocator" class="org.apache.cloudstack.storage.cache.allocator.StorageCacheRandomAllocator" /> <bean id="storageCacheRandomAllocator" class="org.apache.cloudstack.storage.cache.allocator.StorageCacheRandomAllocator" />
<bean id="xenserverSnapshotStrategy" class="org.apache.cloudstack.storage.snapshot.XenserverSnapshotStrategy" /> <bean id="defaultSnapshotStrategy" class="org.apache.cloudstack.storage.snapshot.DefaultSnapshotStrategy" />
<bean id="bAREMETAL" class="org.apache.cloudstack.storage.image.format.BAREMETAL" /> <bean id="bAREMETAL" class="org.apache.cloudstack.storage.image.format.BAREMETAL" />
<bean id="dataMotionServiceImpl" class="org.apache.cloudstack.storage.motion.DataMotionServiceImpl" /> <bean id="dataMotionServiceImpl" class="org.apache.cloudstack.storage.motion.DataMotionServiceImpl" />
<bean id="dataObjectManagerImpl" class="org.apache.cloudstack.storage.datastore.DataObjectManagerImpl" /> <bean id="dataObjectManagerImpl" class="org.apache.cloudstack.storage.datastore.DataObjectManagerImpl" />

View File

@ -50,7 +50,7 @@
<bean id="ancientDataMotionStrategy" class="org.apache.cloudstack.storage.motion.AncientDataMotionStrategy" /> <bean id="ancientDataMotionStrategy" class="org.apache.cloudstack.storage.motion.AncientDataMotionStrategy" />
<bean id="storageCacheManagerImpl" class="org.apache.cloudstack.storage.cache.manager.StorageCacheManagerImpl" /> <bean id="storageCacheManagerImpl" class="org.apache.cloudstack.storage.cache.manager.StorageCacheManagerImpl" />
<bean id="storageCacheRandomAllocator" class="org.apache.cloudstack.storage.cache.allocator.StorageCacheRandomAllocator" /> <bean id="storageCacheRandomAllocator" class="org.apache.cloudstack.storage.cache.allocator.StorageCacheRandomAllocator" />
<bean id="xenserverSnapshotStrategy" class="org.apache.cloudstack.storage.snapshot.XenserverSnapshotStrategy" /> <bean id="defaultSnapshotStrategy" class="org.apache.cloudstack.storage.snapshot.DefaultSnapshotStrategy" />
<bean id="bAREMETAL" class="org.apache.cloudstack.storage.image.format.BAREMETAL" /> <bean id="bAREMETAL" class="org.apache.cloudstack.storage.image.format.BAREMETAL" />
<bean id="dataMotionServiceImpl" class="org.apache.cloudstack.storage.motion.DataMotionServiceImpl" /> <bean id="dataMotionServiceImpl" class="org.apache.cloudstack.storage.motion.DataMotionServiceImpl" />
<bean id="dataObjectManagerImpl" class="org.apache.cloudstack.storage.datastore.DataObjectManagerImpl" /> <bean id="dataObjectManagerImpl" class="org.apache.cloudstack.storage.datastore.DataObjectManagerImpl" />

View File

@ -21,7 +21,6 @@ import java.util.List;
import javax.inject.Inject; import javax.inject.Inject;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
@ -34,7 +33,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.framework.jobs.AsyncJob; import org.apache.cloudstack.framework.jobs.AsyncJob;
import org.apache.cloudstack.framework.jobs.dao.SyncQueueItemDao;
import org.apache.cloudstack.storage.command.CreateObjectAnswer; import org.apache.cloudstack.storage.command.CreateObjectAnswer;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
@ -71,9 +69,8 @@ import com.cloud.utils.db.DB;
import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.fsm.NoTransitionException;
@Component public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
public class XenserverSnapshotStrategy extends SnapshotStrategyBase { private static final Logger s_logger = Logger.getLogger(DefaultSnapshotStrategy.class);
private static final Logger s_logger = Logger.getLogger(XenserverSnapshotStrategy.class);
@Inject @Inject
SnapshotService snapshotSvr; SnapshotService snapshotSvr;
@ -90,12 +87,8 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase {
@Inject @Inject
SnapshotDataFactory snapshotDataFactory; SnapshotDataFactory snapshotDataFactory;
@Inject @Inject
private SnapshotDao _snapshotDao;
@Inject
private SnapshotDetailsDao _snapshotDetailsDao; private SnapshotDetailsDao _snapshotDetailsDao;
@Inject @Inject
private SyncQueueItemDao _syncQueueItemDao;
@Inject
VolumeDetailsDao _volumeDetailsDaoImpl; VolumeDetailsDao _volumeDetailsDaoImpl;
@Override @Override
@ -264,68 +257,147 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase {
return true; return true;
} }
if (!Snapshot.State.BackedUp.equals(snapshotVO.getState()) && !Snapshot.State.Error.equals(snapshotVO.getState()) && if (!Snapshot.State.BackedUp.equals(snapshotVO.getState()) &&
!Snapshot.State.Destroying.equals(snapshotVO.getState())) { !Snapshot.State.Destroying.equals(snapshotVO.getState())) {
throw new InvalidParameterValueException("Can't delete snapshotshot " + snapshotId + " due to it is in " + snapshotVO.getState() + " Status"); throw new InvalidParameterValueException("Can't delete snapshotshot " + snapshotId + " due to it is in " + snapshotVO.getState() + " Status");
} }
// first mark the snapshot as destroyed, so that ui can't see it, but we Boolean deletedOnSecondary = deleteOnSecondaryIfNeeded(snapshotId);
// may not destroy the snapshot on the storage, as other snapshots may boolean deletedOnPrimary = deleteOnPrimaryIfNeeded(snapshotId);
// depend on it.
if (deletedOnPrimary) {
s_logger.debug(String.format("Successfully deleted snapshot (id: %d) on primary storage.", snapshotId));
} else {
s_logger.debug(String.format("The snapshot (id: %d) could not be found/deleted on primary storage.", snapshotId));
}
if (null != deletedOnSecondary && deletedOnSecondary) {
s_logger.debug(String.format("Successfully deleted snapshot (id: %d) on secondary storage.", snapshotId));
}
return (deletedOnSecondary != null) && deletedOnSecondary || deletedOnPrimary;
}
private boolean deleteOnPrimaryIfNeeded(Long snapshotId) {
SnapshotVO snapshotVO;
boolean deletedOnPrimary = false;
snapshotVO = snapshotDao.findById(snapshotId);
SnapshotInfo snapshotOnPrimaryInfo = snapshotDataFactory.getSnapshot(snapshotId, DataStoreRole.Primary);
if (snapshotVO != null && snapshotVO.getState() == Snapshot.State.Destroyed) {
deletedOnPrimary = deleteSnapshotOnPrimary(snapshotId, snapshotOnPrimaryInfo);
} else {
// Here we handle snapshots which are to be deleted but are not marked as destroyed yet.
// This may occur for instance when they are created only on primary because
// snapshot.backup.to.secondary was set to false.
if (snapshotOnPrimaryInfo == null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug(String.format("Snapshot (id: %d) not found on primary storage, skipping snapshot deletion on primary and marking it destroyed", snapshotId));
}
snapshotVO.setState(Snapshot.State.Destroyed);
snapshotDao.update(snapshotId, snapshotVO);
deletedOnPrimary = true;
} else {
SnapshotObject obj = (SnapshotObject) snapshotOnPrimaryInfo;
try {
obj.processEvent(Snapshot.Event.DestroyRequested);
deletedOnPrimary = deleteSnapshotOnPrimary(snapshotId, snapshotOnPrimaryInfo);
if (!deletedOnPrimary) {
obj.processEvent(Snapshot.Event.OperationFailed);
} else {
obj.processEvent(Snapshot.Event.OperationSucceeded);
}
} catch (NoTransitionException e) {
s_logger.debug("Failed to set the state to destroying: ", e);
deletedOnPrimary = false;
}
}
}
return deletedOnPrimary;
}
private Boolean deleteOnSecondaryIfNeeded(Long snapshotId) {
Boolean deletedOnSecondary = null;
SnapshotInfo snapshotOnImage = snapshotDataFactory.getSnapshot(snapshotId, DataStoreRole.Image); SnapshotInfo snapshotOnImage = snapshotDataFactory.getSnapshot(snapshotId, DataStoreRole.Image);
if (snapshotOnImage == null) { if (snapshotOnImage == null) {
s_logger.debug("Can't find snapshot on backup storage, delete it in db"); s_logger.debug(String.format("Can't find snapshot [snapshot id: %d] on secondary storage", snapshotId));
snapshotDao.remove(snapshotId); } else {
return true; SnapshotObject obj = (SnapshotObject)snapshotOnImage;
} try {
deletedOnSecondary = deleteSnapshotOnSecondaryStorage(snapshotId, snapshotOnImage, obj);
SnapshotObject obj = (SnapshotObject)snapshotOnImage; if (!deletedOnSecondary) {
try { s_logger.debug(
obj.processEvent(Snapshot.Event.DestroyRequested); String.format("Failed to find/delete snapshot (id: %d) on secondary storage. Still necessary to check and delete snapshot on primary storage.",
List<VolumeDetailVO> volumesFromSnapshot; snapshotId));
volumesFromSnapshot = _volumeDetailsDaoImpl.findDetails("SNAPSHOT_ID", String.valueOf(snapshotId), null); } else {
s_logger.debug(String.format("Snapshot (id: %d) has been deleted on secondary storage.", snapshotId));
if (volumesFromSnapshot.size() > 0) {
try {
obj.processEvent(Snapshot.Event.OperationFailed);
} catch (NoTransitionException e1) {
s_logger.debug("Failed to change snapshot state: " + e1.toString());
} }
throw new InvalidParameterValueException("Unable to perform delete operation, Snapshot with id: " + snapshotId + " is in use "); } catch (NoTransitionException e) {
s_logger.debug("Failed to set the state to destroying: ", e);
// deletedOnSecondary remain null
} }
} catch (NoTransitionException e) {
s_logger.debug("Failed to set the state to destroying: ", e);
return false;
} }
return deletedOnSecondary;
}
try { /**
boolean result = deleteSnapshotChain(snapshotOnImage); * Deletes the snapshot on secondary storage.
obj.processEvent(Snapshot.Event.OperationSucceeded); * It can return false when the snapshot was stored on primary storage and not backed up on secondary; therefore, the snapshot should also be deleted on primary storage even when this method returns false.
if (result) { */
//snapshot is deleted on backup storage, need to delete it on primary storage private boolean deleteSnapshotOnSecondaryStorage(Long snapshotId, SnapshotInfo snapshotOnImage, SnapshotObject obj) throws NoTransitionException {
SnapshotDataStoreVO snapshotOnPrimary = snapshotStoreDao.findBySnapshot(snapshotId, DataStoreRole.Primary); obj.processEvent(Snapshot.Event.DestroyRequested);
if (snapshotOnPrimary != null) { List<VolumeDetailVO> volumesFromSnapshot;
SnapshotInfo snapshotOnPrimaryInfo = snapshotDataFactory.getSnapshot(snapshotId, DataStoreRole.Primary); volumesFromSnapshot = _volumeDetailsDaoImpl.findDetails("SNAPSHOT_ID", String.valueOf(snapshotId), null);
long volumeId = snapshotOnPrimary.getVolumeId();
VolumeVO volumeVO = volumeDao.findById(volumeId); if (volumesFromSnapshot.size() > 0) {
if (((PrimaryDataStoreImpl)snapshotOnPrimaryInfo.getDataStore()).getPoolType() == StoragePoolType.RBD && volumeVO != null) {
snapshotSvr.deleteSnapshot(snapshotOnPrimaryInfo);
}
snapshotOnPrimary.setState(State.Destroyed);
snapshotStoreDao.update(snapshotOnPrimary.getId(), snapshotOnPrimary);
}
}
} catch (Exception e) {
s_logger.debug("Failed to delete snapshot: ", e);
try { try {
obj.processEvent(Snapshot.Event.OperationFailed); obj.processEvent(Snapshot.Event.OperationFailed);
} catch (NoTransitionException e1) { } catch (NoTransitionException e1) {
s_logger.debug("Failed to change snapshot state: " + e.toString()); s_logger.debug("Failed to change snapshot state: " + e1.toString());
} }
return false; throw new InvalidParameterValueException("Unable to perform delete operation, Snapshot with id: " + snapshotId + " is in use ");
} }
return true; boolean result = deleteSnapshotChain(snapshotOnImage);
obj.processEvent(Snapshot.Event.OperationSucceeded);
return result;
}
/**
* Deletes the snapshot on primary storage. It returns true when the snapshot was not found on primary storage; </br>
* In case of failure while deleting the snapshot, it will throw one of the following exceptions: CloudRuntimeException, InterruptedException, or ExecutionException. </br>
*/
private boolean deleteSnapshotOnPrimary(Long snapshotId, SnapshotInfo snapshotOnPrimaryInfo) {
SnapshotDataStoreVO snapshotOnPrimary = snapshotStoreDao.findBySnapshot(snapshotId, DataStoreRole.Primary);
if (isSnapshotOnPrimaryStorage(snapshotId)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug(String.format("Snapshot reference is found on primary storage for snapshot id: %d, performing snapshot deletion on primary", snapshotId));
}
if (snapshotSvr.deleteSnapshot(snapshotOnPrimaryInfo)) {
snapshotOnPrimary.setState(State.Destroyed);
snapshotStoreDao.update(snapshotOnPrimary.getId(), snapshotOnPrimary);
if (s_logger.isDebugEnabled()) {
s_logger.debug(String.format("Successfully deleted snapshot id: %d on primary storage", snapshotId));
}
return true;
}
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug(String.format("Snapshot reference is not found on primary storage for snapshot id: %d, skipping snapshot deletion on primary", snapshotId));
}
return true;
}
return false;
}
/**
* Returns true if the snapshot volume is on primary storage.
*/
private boolean isSnapshotOnPrimaryStorage(long snapshotId) {
SnapshotDataStoreVO snapshotOnPrimary = snapshotStoreDao.findBySnapshot(snapshotId, DataStoreRole.Primary);
if (snapshotOnPrimary != null) {
long volumeId = snapshotOnPrimary.getVolumeId();
VolumeVO volumeVO = volumeDao.findById(volumeId);
return volumeVO != null && volumeVO.getRemoved() == null;
}
return false;
} }
@Override @Override

View File

@ -27,8 +27,8 @@
http://www.springframework.org/schema/context/spring-context.xsd" http://www.springframework.org/schema/context/spring-context.xsd"
> >
<bean id="xenserverSnapshotStrategy" <bean id="defaultSnapshotStrategy"
class="org.apache.cloudstack.storage.snapshot.XenserverSnapshotStrategy" /> class="org.apache.cloudstack.storage.snapshot.DefaultSnapshotStrategy" />
<bean id="storageSystemSnapshotStrategy" <bean id="storageSystemSnapshotStrategy"
class="org.apache.cloudstack.storage.snapshot.StorageSystemSnapshotStrategy" /> class="org.apache.cloudstack.storage.snapshot.StorageSystemSnapshotStrategy" />

View File

@ -94,6 +94,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
protected List<StoragePool> reorderPoolsByCapacity(DeploymentPlan plan, protected List<StoragePool> reorderPoolsByCapacity(DeploymentPlan plan,
List<StoragePool> pools) { List<StoragePool> pools) {
Long zoneId = plan.getDataCenterId();
Long clusterId = plan.getClusterId(); Long clusterId = plan.getClusterId();
short capacityType; short capacityType;
if(pools != null && pools.size() != 0){ if(pools != null && pools.size() != 0){
@ -102,7 +103,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
return null; return null;
} }
List<Long> poolIdsByCapacity = capacityDao.orderHostsByFreeCapacity(clusterId, capacityType); List<Long> poolIdsByCapacity = capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType);
if (s_logger.isDebugEnabled()) { if (s_logger.isDebugEnabled()) {
s_logger.debug("List of pools in descending order of free capacity: "+ poolIdsByCapacity); s_logger.debug("List of pools in descending order of free capacity: "+ poolIdsByCapacity);
} }

View File

@ -29,6 +29,8 @@ import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.Hypervisor.HypervisorType;
@ -43,6 +45,8 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
private static final Logger LOGGER = Logger.getLogger(ZoneWideStoragePoolAllocator.class); private static final Logger LOGGER = Logger.getLogger(ZoneWideStoragePoolAllocator.class);
@Inject @Inject
private DataStoreManager dataStoreMgr; private DataStoreManager dataStoreMgr;
@Inject
private CapacityDao capacityDao;
@Override @Override
@ -110,6 +114,40 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
return !ScopeType.ZONE.equals(storagePoolVO.getScope()) || !storagePoolVO.isManaged(); return !ScopeType.ZONE.equals(storagePoolVO.getScope()) || !storagePoolVO.isManaged();
} }
@Override
protected List<StoragePool> reorderPoolsByCapacity(DeploymentPlan plan,
List<StoragePool> pools) {
Long zoneId = plan.getDataCenterId();
short capacityType;
if(pools != null && pools.size() != 0){
capacityType = pools.get(0).getPoolType().isShared() ? Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED : Capacity.CAPACITY_TYPE_LOCAL_STORAGE;
} else{
return null;
}
List<Long> poolIdsByCapacity = capacityDao.orderHostsByFreeCapacity(zoneId, null, capacityType);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("List of zone-wide storage pools in descending order of free capacity: "+ poolIdsByCapacity);
}
//now filter the given list of Pools by this ordered list
Map<Long, StoragePool> poolMap = new HashMap<>();
for (StoragePool pool : pools) {
poolMap.put(pool.getId(), pool);
}
List<Long> matchingPoolIds = new ArrayList<>(poolMap.keySet());
poolIdsByCapacity.retainAll(matchingPoolIds);
List<StoragePool> reorderedPools = new ArrayList<>();
for(Long id: poolIdsByCapacity){
reorderedPools.add(poolMap.get(id));
}
return reorderedPools;
}
@Override @Override
protected List<StoragePool> reorderPoolsByNumberOfVolumes(DeploymentPlan plan, List<StoragePool> pools, Account account) { protected List<StoragePool> reorderPoolsByNumberOfVolumes(DeploymentPlan plan, List<StoragePool> pools, Account account) {
if (account == null) { if (account == null) {

View File

@ -128,6 +128,14 @@ public class KVMStorageProcessor implements StorageProcessor {
private String _manageSnapshotPath; private String _manageSnapshotPath;
private int _cmdsTimeout; private int _cmdsTimeout;
private static final String MANAGE_SNAPSTHOT_CREATE_OPTION = "-c";
private static final String MANAGE_SNAPSTHOT_DESTROY_OPTION = "-d";
private static final String NAME_OPTION = "-n";
private static final String CEPH_MON_HOST = "mon_host";
private static final String CEPH_AUTH_KEY = "key";
private static final String CEPH_CLIENT_MOUNT_TIMEOUT = "client_mount_timeout";
private static final String CEPH_DEFAULT_MOUNT_TIMEOUT = "30";
public KVMStorageProcessor(final KVMStoragePoolManager storagePoolMgr, final LibvirtComputingResource resource) { public KVMStorageProcessor(final KVMStoragePoolManager storagePoolMgr, final LibvirtComputingResource resource) {
this.storagePoolMgr = storagePoolMgr; this.storagePoolMgr = storagePoolMgr;
this.resource = resource; this.resource = resource;
@ -563,7 +571,7 @@ public class KVMStorageProcessor implements StorageProcessor {
final Script command = new Script(_createTmplPath, wait, s_logger); final Script command = new Script(_createTmplPath, wait, s_logger);
command.add("-f", disk.getPath()); command.add("-f", disk.getPath());
command.add("-t", tmpltPath); command.add("-t", tmpltPath);
command.add("-n", templateName + ".qcow2"); command.add(NAME_OPTION, templateName + ".qcow2");
final String result = command.execute(); final String result = command.execute();
@ -949,7 +957,7 @@ public class KVMStorageProcessor implements StorageProcessor {
} else { } else {
final Script command = new Script(_manageSnapshotPath, cmd.getWaitInMillSeconds(), s_logger); final Script command = new Script(_manageSnapshotPath, cmd.getWaitInMillSeconds(), s_logger);
command.add("-b", snapshotDisk.getPath()); command.add("-b", snapshotDisk.getPath());
command.add("-n", snapshotName); command.add(NAME_OPTION, snapshotName);
command.add("-p", snapshotDestPath); command.add("-p", snapshotDestPath);
if (isCreatedFromVmSnapshot) { if (isCreatedFromVmSnapshot) {
descName = UUID.randomUUID().toString(); descName = UUID.randomUUID().toString();
@ -1010,14 +1018,7 @@ public class KVMStorageProcessor implements StorageProcessor {
} }
} else { } else {
if (primaryPool.getType() != StoragePoolType.RBD) { if (primaryPool.getType() != StoragePoolType.RBD) {
final Script command = new Script(_manageSnapshotPath, _cmdsTimeout, s_logger); deleteSnapshotViaManageSnapshotScript(snapshotName, snapshotDisk);
command.add("-d", snapshotDisk.getPath());
command.add("-n", snapshotName);
final String result = command.execute();
if (result != null) {
s_logger.debug("Failed to delete snapshot on primary: " + result);
// return new CopyCmdAnswer("Failed to backup snapshot: " + result);
}
} }
} }
} catch (final Exception ex) { } catch (final Exception ex) {
@ -1035,6 +1036,16 @@ public class KVMStorageProcessor implements StorageProcessor {
} }
} }
private void deleteSnapshotViaManageSnapshotScript(final String snapshotName, KVMPhysicalDisk snapshotDisk) {
final Script command = new Script(_manageSnapshotPath, _cmdsTimeout, s_logger);
command.add(MANAGE_SNAPSTHOT_DESTROY_OPTION, snapshotDisk.getPath());
command.add(NAME_OPTION, snapshotName);
final String result = command.execute();
if (result != null) {
s_logger.debug("Failed to delete snapshot on primary: " + result);
}
}
protected synchronized String attachOrDetachISO(final Connect conn, final String vmName, String isoPath, final boolean isAttach) throws LibvirtException, URISyntaxException, protected synchronized String attachOrDetachISO(final Connect conn, final String vmName, String isoPath, final boolean isAttach) throws LibvirtException, URISyntaxException,
InternalErrorException { InternalErrorException {
String isoXml = null; String isoXml = null;
@ -1494,12 +1505,7 @@ public class KVMStorageProcessor implements StorageProcessor {
*/ */
if (primaryPool.getType() == StoragePoolType.RBD) { if (primaryPool.getType() == StoragePoolType.RBD) {
try { try {
final Rados r = new Rados(primaryPool.getAuthUserName()); Rados r = radosConnect(primaryPool);
r.confSet("mon_host", primaryPool.getSourceHost() + ":" + primaryPool.getSourcePort());
r.confSet("key", primaryPool.getAuthSecret());
r.confSet("client_mount_timeout", "30");
r.connect();
s_logger.debug("Succesfully connected to Ceph cluster at " + r.confGet("mon_host"));
final IoCTX io = r.ioCtxCreate(primaryPool.getSourceDir()); final IoCTX io = r.ioCtxCreate(primaryPool.getSourceDir());
final Rbd rbd = new Rbd(io); final Rbd rbd = new Rbd(io);
@ -1516,8 +1522,8 @@ public class KVMStorageProcessor implements StorageProcessor {
} else { } else {
/* VM is not running, create a snapshot by ourself */ /* VM is not running, create a snapshot by ourself */
final Script command = new Script(_manageSnapshotPath, _cmdsTimeout, s_logger); final Script command = new Script(_manageSnapshotPath, _cmdsTimeout, s_logger);
command.add("-c", disk.getPath()); command.add(MANAGE_SNAPSTHOT_CREATE_OPTION, disk.getPath());
command.add("-n", snapshotName); command.add(NAME_OPTION, snapshotName);
final String result = command.execute(); final String result = command.execute();
if (result != null) { if (result != null) {
s_logger.debug("Failed to manage snapshot: " + result); s_logger.debug("Failed to manage snapshot: " + result);
@ -1536,6 +1542,16 @@ public class KVMStorageProcessor implements StorageProcessor {
} }
} }
private Rados radosConnect(final KVMStoragePool primaryPool) throws RadosException {
Rados r = new Rados(primaryPool.getAuthUserName());
r.confSet(CEPH_MON_HOST, primaryPool.getSourceHost() + ":" + primaryPool.getSourcePort());
r.confSet(CEPH_AUTH_KEY, primaryPool.getAuthSecret());
r.confSet(CEPH_CLIENT_MOUNT_TIMEOUT, CEPH_DEFAULT_MOUNT_TIMEOUT);
r.connect();
s_logger.debug("Succesfully connected to Ceph cluster at " + r.confGet(CEPH_MON_HOST));
return r;
}
@Override @Override
public Answer deleteVolume(final DeleteCommand cmd) { public Answer deleteVolume(final DeleteCommand cmd) {
final VolumeObjectTO vol = (VolumeObjectTO)cmd.getData(); final VolumeObjectTO vol = (VolumeObjectTO)cmd.getData();
@ -1624,12 +1640,7 @@ public class KVMStorageProcessor implements StorageProcessor {
String snapshotName = snapshotFullPath.substring(snapshotFullPath.lastIndexOf("/") + 1); String snapshotName = snapshotFullPath.substring(snapshotFullPath.lastIndexOf("/") + 1);
snap_full_name = disk.getName() + "@" + snapshotName; snap_full_name = disk.getName() + "@" + snapshotName;
if (primaryPool.getType() == StoragePoolType.RBD) { if (primaryPool.getType() == StoragePoolType.RBD) {
Rados r = new Rados(primaryPool.getAuthUserName()); Rados r = radosConnect(primaryPool);
r.confSet("mon_host", primaryPool.getSourceHost() + ":" + primaryPool.getSourcePort());
r.confSet("key", primaryPool.getAuthSecret());
r.confSet("client_mount_timeout", "30");
r.connect();
s_logger.debug("Succesfully connected to Ceph cluster at " + r.confGet("mon_host"));
IoCTX io = r.ioCtxCreate(primaryPool.getSourceDir()); IoCTX io = r.ioCtxCreate(primaryPool.getSourceDir());
Rbd rbd = new Rbd(io); Rbd rbd = new Rbd(io);
RbdImage image = rbd.open(disk.getName()); RbdImage image = rbd.open(disk.getName());
@ -1649,6 +1660,9 @@ public class KVMStorageProcessor implements StorageProcessor {
rbd.close(image); rbd.close(image);
r.ioCtxDestroy(io); r.ioCtxDestroy(io);
} }
} else if (primaryPool.getType() == StoragePoolType.NetworkFilesystem) {
s_logger.info(String.format("Attempting to remove snapshot (id=%s, name=%s, path=%s, storage type=%s) on primary storage", snapshotTO.getId(), snapshotTO.getName(), snapshotTO.getPath(), primaryPool.getType()));
deleteSnapshotViaManageSnapshotScript(snapshotName, disk);
} else { } else {
s_logger.warn("Operation not implemented for storage pool type of " + primaryPool.getType().toString()); s_logger.warn("Operation not implemented for storage pool type of " + primaryPool.getType().toString());
throw new InternalErrorException("Operation not implemented for storage pool type of " + primaryPool.getType().toString()); throw new InternalErrorException("Operation not implemented for storage pool type of " + primaryPool.getType().toString());

View File

@ -371,6 +371,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
// Reorder hosts in the decreasing order of free capacity. // Reorder hosts in the decreasing order of free capacity.
private List<? extends Host> reorderHostsByCapacity(DeploymentPlan plan, List<? extends Host> hosts) { private List<? extends Host> reorderHostsByCapacity(DeploymentPlan plan, List<? extends Host> hosts) {
Long zoneId = plan.getDataCenterId();
Long clusterId = plan.getClusterId(); Long clusterId = plan.getClusterId();
//Get capacity by which we should reorder //Get capacity by which we should reorder
String capacityTypeToOrder = _configDao.getValue(Config.HostCapacityTypeToOrderClusters.key()); String capacityTypeToOrder = _configDao.getValue(Config.HostCapacityTypeToOrderClusters.key());
@ -378,7 +379,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
if("RAM".equalsIgnoreCase(capacityTypeToOrder)){ if("RAM".equalsIgnoreCase(capacityTypeToOrder)){
capacityType = CapacityVO.CAPACITY_TYPE_MEMORY; capacityType = CapacityVO.CAPACITY_TYPE_MEMORY;
} }
List<Long> hostIdsByFreeCapacity = _capacityDao.orderHostsByFreeCapacity(clusterId, capacityType); List<Long> hostIdsByFreeCapacity = _capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType);
if (s_logger.isDebugEnabled()) { if (s_logger.isDebugEnabled()) {
s_logger.debug("List of hosts in descending order of free capacity in the cluster: "+ hostIdsByFreeCapacity); s_logger.debug("List of hosts in descending order of free capacity in the cluster: "+ hostIdsByFreeCapacity);
} }

View File

@ -3706,7 +3706,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
affinityGroups.addAll(listDomainLevelAffinityGroups(scDomain, searchFilter, domainId)); affinityGroups.addAll(listDomainLevelAffinityGroups(scDomain, searchFilter, domainId));
} }
return new Pair<List<AffinityGroupJoinVO>, Integer>(affinityGroups, uniqueGroupsPair.second()); return new Pair<List<AffinityGroupJoinVO>, Integer>(affinityGroups, affinityGroups.size());
} }