CLOUDSTACK-9827: Storage tags stored in multiple places

This commit is contained in:
nvazquez 2017-03-08 15:19:55 -03:00 committed by nvazquez
parent ec2d4dd422
commit edf0e2b26f
18 changed files with 870 additions and 354 deletions

View File

@ -267,7 +267,6 @@
<bean id="storagePoolDetailsDaoImpl" class="com.cloud.storage.dao.StoragePoolDetailsDaoImpl" />
<bean id="storagePoolJoinDaoImpl" class="com.cloud.api.query.dao.StoragePoolJoinDaoImpl" />
<bean id="storagePoolTagsDaoImpl" class="com.cloud.storage.dao.StoragePoolTagsDaoImpl" />
<bean id="storageTagDaoImpl" class="com.cloud.api.query.dao.StorageTagDaoImpl" />
<bean id="hostTagDaoImpl" class="com.cloud.api.query.dao.HostTagDaoImpl" />
<bean id="storagePoolWorkDaoImpl" class="com.cloud.storage.dao.StoragePoolWorkDaoImpl" />
<bean id="templatePrimaryDataStoreDaoImpl" class="org.apache.cloudstack.storage.volume.db.TemplatePrimaryDataStoreDaoImpl" />

View File

@ -18,6 +18,8 @@ package com.cloud.storage.dao;
import java.util.List;
import org.apache.cloudstack.api.response.StorageTagResponse;
import com.cloud.storage.StoragePoolTagVO;
import com.cloud.utils.db.GenericDao;
@ -26,5 +28,7 @@ public interface StoragePoolTagsDao extends GenericDao<StoragePoolTagVO, Long> {
void persist(long poolId, List<String> storagePoolTags);
List<String> getStoragePoolTags(long poolId);
void deleteTags(long poolId);
List<StoragePoolTagVO> searchByIds(Long... stIds);
StorageTagResponse newStorageTagResponse(StoragePoolTagVO tag);
}

View File

@ -18,6 +18,12 @@ package com.cloud.storage.dao;
import java.util.ArrayList;
import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.api.response.StorageTagResponse;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import com.cloud.storage.StoragePoolTagVO;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
@ -26,12 +32,21 @@ import com.cloud.utils.db.TransactionLegacy;
public class StoragePoolTagsDaoImpl extends GenericDaoBase<StoragePoolTagVO, Long> implements StoragePoolTagsDao {
@Inject
private ConfigurationDao _configDao;
protected final SearchBuilder<StoragePoolTagVO> StoragePoolSearch;
private final SearchBuilder<StoragePoolTagVO> StoragePoolIdsSearch;
private static final int DEFAULT_BATCH_QUERY_SIZE = 2000;
public StoragePoolTagsDaoImpl() {
StoragePoolSearch = createSearchBuilder();
StoragePoolSearch.and("poolId", StoragePoolSearch.entity().getPoolId(), SearchCriteria.Op.EQ);
StoragePoolSearch.done();
StoragePoolIdsSearch = createSearchBuilder();
StoragePoolIdsSearch.and("idIN", StoragePoolIdsSearch.entity().getId(), SearchCriteria.Op.IN);
StoragePoolIdsSearch.done();
}
@Override
@ -77,4 +92,69 @@ public class StoragePoolTagsDaoImpl extends GenericDaoBase<StoragePoolTagVO, Lon
txn.commit();
}
@Override
public List<StoragePoolTagVO> searchByIds(Long... stIds) {
final int detailsBatchSize = getDetailsBatchSize();
// query details by batches
List<StoragePoolTagVO> uvList = new ArrayList<StoragePoolTagVO>();
int curr_index = 0;
while ((curr_index + detailsBatchSize) <= stIds.length) {
searchForStoragePoolIdsInternal(curr_index, detailsBatchSize, stIds, uvList);
curr_index += detailsBatchSize;
}
if (curr_index < stIds.length) {
int batch_size = (stIds.length - curr_index);
searchForStoragePoolIdsInternal(curr_index, batch_size, stIds, uvList);
}
return uvList;
}
/**
* Search for storage pools based on their IDs.
* The search is executed in batch, this means that we will load a batch of size {@link StoragePoolTagsDaoImpl#getDetailsBatchSize()}
* {@link StoragePoolTagVO} at each time.
* The loaded storage pools are added in the pools parameter.
* @param currIndex current index
* @param batchSize batch size
* @param stIds storage tags array
* @param pools list in which storage pools are added
*/
protected void searchForStoragePoolIdsInternal(int currIndex, int batchSize, Long[] stIds, List<StoragePoolTagVO> pools) {
Long[] ids = new Long[batchSize];
for (int k = 0, j = currIndex; j < currIndex + batchSize; j++, k++) {
ids[k] = stIds[j];
}
SearchCriteria<StoragePoolTagVO> sc = StoragePoolIdsSearch.create();
sc.setParameters("idIN", (Object[])ids);
List<StoragePoolTagVO> vms = searchIncludingRemoved(sc, null, null, false);
if (vms != null) {
pools.addAll(vms);
}
}
/**
* Retrieve {@code detail.batch.query.size} configuration value. If not available, return default value {@link StoragePoolTagsDaoImpl#DEFAULT_BATCH_QUERY_SIZE}
* @return detail.batch.query.size configuration value
*/
protected int getDetailsBatchSize() {
String batchCfg = _configDao.getValue("detail.batch.query.size");
return batchCfg != null ? Integer.parseInt(batchCfg) : DEFAULT_BATCH_QUERY_SIZE;
}
@Override
public StorageTagResponse newStorageTagResponse(StoragePoolTagVO tag) {
StorageTagResponse tagResponse = new StorageTagResponse();
tagResponse.setName(tag.getTag());
tagResponse.setPoolId(tag.getPoolId());
tagResponse.setObjectName("storagetag");
return tagResponse;
}
}

View File

@ -53,7 +53,7 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
*/
void updateCapacityIops(long id, long capacityIops);
StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details);
StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags);
/**
* Find pool by name.
@ -100,7 +100,7 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
Map<String, String> getDetails(long poolId);
List<String> searchForStoragePoolDetails(long poolId, String value);
List<String> searchForStoragePoolTags(long poolId);
List<StoragePoolVO> findIfDuplicatePoolsExistByUUID(String uuid);
@ -121,4 +121,6 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
List<StoragePoolVO> findLocalStoragePoolsByHostAndTags(long hostId, String[] tags);
List<StoragePoolVO> listLocalStoragePoolByPath(long datacenterId, String path);
void deletePoolTags(long poolId);
}

View File

@ -20,19 +20,22 @@ import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.commons.collections.CollectionUtils;
import com.cloud.host.Status;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.ScopeType;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.storage.StoragePoolTagVO;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.StoragePoolTagsDao;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
@ -58,15 +61,28 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
protected StoragePoolDetailsDao _detailsDao;
@Inject
protected StoragePoolHostDao _hostDao;
@Inject
protected StoragePoolTagsDao _tagsDao;
private final String DetailsSqlPrefix =
protected final String DetailsSqlPrefix =
"SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and (";
private final String DetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?";
private final String ZoneWideDetailsSqlPrefix =
"SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and storage_pool.scope = ? and (";
private final String ZoneWideDetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?";
protected final String DetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?";
protected final String ZoneWideTagsSqlPrefix =
"SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_tags ON storage_pool.id = storage_pool_tags.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and storage_pool.scope = ? and (";
protected final String ZoneWideTagsSqlSuffix = ") GROUP BY storage_pool_tags.pool_id HAVING COUNT(storage_pool_tags.tag) >= ?";
private final String FindPoolTagDetails = "SELECT storage_pool_details.name FROM storage_pool_details WHERE pool_id = ? and value = ?";
// Storage tags are now separate from storage_pool_details, leaving only details on that table
protected final String TagsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_tags ON storage_pool.id = storage_pool_tags.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and (";
protected final String TagsSqlSuffix = ") GROUP BY storage_pool_tags.pool_id HAVING COUNT(storage_pool_tags.tag) >= ?";
protected final String FindPoolTags = "SELECT storage_pool_tags.tag FROM storage_pool_tags WHERE pool_id = ?";
/**
* Used in method findPoolsByDetailsOrTagsInternal
*/
protected enum ValueType {
DETAILS, TAGS;
}
public PrimaryDataStoreDaoImpl() {
AllFieldSearch = createSearchBuilder();
@ -256,7 +272,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
@Override
@DB
public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details) {
public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
txn.start();
pool = super.persist(pool);
@ -266,57 +282,132 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
_detailsDao.persist(vo);
}
}
if (CollectionUtils.isNotEmpty(tags)) {
_tagsDao.persist(pool.getId(), tags);
}
txn.commit();
return pool;
}
/**
* Internal helper method to retrieve storage pools by given details or storage tags.
* @param dcId data center id
* @param podId pod id
* @param clusterId cluster id
* @param scope score
* @param sqlValues sql string containing details or storage tags values required to query
* @param valuesType enumerate to indicate if values are related to details or storage tags
* @param valuesLength values length
* @return list of storage pools matching conditions
*/
protected List<StoragePoolVO> findPoolsByDetailsOrTagsInternal(long dcId, long podId, Long clusterId, ScopeType scope, String sqlValues, ValueType valuesType, int valuesLength) {
String sqlPrefix = valuesType.equals(ValueType.DETAILS) ? DetailsSqlPrefix : TagsSqlPrefix;
String sqlSuffix = valuesType.equals(ValueType.DETAILS) ? DetailsSqlSuffix : TagsSqlSuffix;
String sql = getSqlPreparedStatement(sqlPrefix, sqlSuffix, sqlValues, clusterId);
return searchStoragePoolsPreparedStatement(sql, dcId, podId, clusterId, scope, valuesLength);
}
/**
* Search storage pools in a transaction
* @param sql prepared statement sql
* @param dcId data center id
* @param podId pod id
* @param clusterId cluster id
* @param scope scope
* @param valuesLength values length
* @return storage pools matching criteria
*/
@DB
@Override
public List<StoragePoolVO> findPoolsByDetails(long dcId, long podId, Long clusterId, Map<String, String> details, ScopeType scope) {
StringBuilder sql = new StringBuilder(DetailsSqlPrefix);
protected List<StoragePoolVO> searchStoragePoolsPreparedStatement(String sql, long dcId, Long podId, Long clusterId, ScopeType scope, int valuesLength) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
List<StoragePoolVO> pools = new ArrayList<StoragePoolVO>();
try (PreparedStatement pstmt = txn.prepareStatement(sql);){
if (pstmt != null) {
int i = 1;
pstmt.setLong(i++, dcId);
if (podId != null) {
pstmt.setLong(i++, podId);
}
pstmt.setString(i++, scope.toString());
if (clusterId != null) {
pstmt.setLong(i++, clusterId);
}
pstmt.setInt(i++, valuesLength);
try(ResultSet rs = pstmt.executeQuery();) {
while (rs.next()) {
pools.add(toEntityBean(rs, false));
}
}catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute :" + e.getMessage(), e);
}
}
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute :" + e.getMessage(), e);
}
return pools;
}
/**
* Return SQL prepared statement given prefix, values and suffix
* @param sqlPrefix prefix
* @param sqlSuffix suffix
* @param sqlValues tags or details values
* @param clusterId cluster id
* @return sql prepared statement
*/
protected String getSqlPreparedStatement(String sqlPrefix, String sqlSuffix, String sqlValues, Long clusterId) {
StringBuilder sql = new StringBuilder(sqlPrefix);
if (clusterId != null) {
sql.append("storage_pool.cluster_id = ? OR storage_pool.cluster_id IS NULL) AND (");
}
sql.append(sqlValues);
sql.append(sqlSuffix);
return sql.toString();
}
/**
* Return SQL string from details, to be placed between SQL Prefix and SQL Suffix when creating storage tags PreparedStatement.
* @param details storage pool details
* @return SQL string containing storage tag values to be Prefix and Suffix when creating PreparedStatement.
* @throws NullPointerException if details is null
* @throws IndexOutOfBoundsException if details is not null, but empty
*/
protected String getSqlValuesFromDetails(Map<String, String> details) {
StringBuilder sqlValues = new StringBuilder();
for (Map.Entry<String, String> detail : details.entrySet()) {
sql.append("((storage_pool_details.name='")
sqlValues.append("((storage_pool_details.name='")
.append(detail.getKey())
.append("') AND (storage_pool_details.value='")
.append(detail.getValue())
.append("')) OR ");
}
sql.delete(sql.length() - 4, sql.length());
sql.append(DetailsSqlSuffix);
TransactionLegacy txn = TransactionLegacy.currentTxn();
try (PreparedStatement pstmt = txn.prepareStatement(sql.toString());){
List<StoragePoolVO> pools = new ArrayList<StoragePoolVO>();
int i = 1;
pstmt.setLong(i++, dcId);
pstmt.setLong(i++, podId);
pstmt.setString(i++, scope.toString());
if (clusterId != null) {
pstmt.setLong(i++, clusterId);
}
pstmt.setInt(i++, details.size());
try(ResultSet rs = pstmt.executeQuery();) {
while (rs.next()) {
pools.add(toEntityBean(rs, false));
}
}catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute :" + e.getMessage(), e);
}
return pools;
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute :" + e.getMessage(), e);
}
sqlValues.delete(sqlValues.length() - 4, sqlValues.length());
return sqlValues.toString();
}
protected Map<String, String> tagsToDetails(String[] tags) {
Map<String, String> details = new HashMap<String, String>(tags.length);
/**
* Return SQL string from storage tags, to be placed between SQL Prefix and SQL Suffix when creating storage tags PreparedStatement.
* @param tags storage tags array
* @return SQL string containing storage tag values to be placed between Prefix and Suffix when creating PreparedStatement.
* @throws NullPointerException if tags is null
* @throws IndexOutOfBoundsException if tags is not null, but empty
*/
protected String getSqlValuesFromStorageTags(String[] tags) throws NullPointerException, IndexOutOfBoundsException {
StringBuilder sqlValues = new StringBuilder();
for (String tag : tags) {
details.put(tag, "true");
sqlValues.append("(storage_pool_tags.tag='")
.append(tag)
.append("') OR ");
}
return details;
sqlValues.delete(sqlValues.length() - 4, sqlValues.length());
return sqlValues.toString();
}
@DB
@Override
public List<StoragePoolVO> findPoolsByDetails(long dcId, long podId, Long clusterId, Map<String, String> details, ScopeType scope) {
String sqlValues = getSqlValuesFromDetails(details);
return findPoolsByDetailsOrTagsInternal(dcId, podId, clusterId, scope, sqlValues, ValueType.DETAILS, details.size());
}
@Override
@ -325,8 +416,8 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
if (tags == null || tags.length == 0) {
storagePools = listBy(dcId, podId, clusterId, ScopeType.CLUSTER);
} else {
Map<String, String> details = tagsToDetails(tags);
storagePools = findPoolsByDetails(dcId, podId, clusterId, details, ScopeType.CLUSTER);
String sqlValues = getSqlValuesFromStorageTags(tags);
storagePools = findPoolsByDetailsOrTagsInternal(dcId, podId, clusterId, ScopeType.CLUSTER, sqlValues, ValueType.TAGS, tags.length);
}
return storagePools;
@ -358,8 +449,8 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
if (tags == null || tags.length == 0) {
storagePools = listBy(dcId, podId, clusterId, ScopeType.HOST);
} else {
Map<String, String> details = tagsToDetails(tags);
storagePools = findPoolsByDetails(dcId, podId, clusterId, details, ScopeType.HOST);
String sqlValues = getSqlValuesFromStorageTags(tags);
storagePools = findPoolsByDetailsOrTagsInternal(dcId, podId, clusterId, ScopeType.HOST, sqlValues, ValueType.TAGS, tags.length);
}
return storagePools;
@ -369,7 +460,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
public List<StoragePoolVO> findLocalStoragePoolsByHostAndTags(long hostId, String[] tags) {
SearchBuilder<StoragePoolVO> hostSearch = createSearchBuilder();
SearchBuilder<StoragePoolHostVO> hostPoolSearch = _hostDao.createSearchBuilder();
SearchBuilder<StoragePoolDetailVO> tagPoolSearch = _detailsDao.createSearchBuilder();;
SearchBuilder<StoragePoolTagVO> tagPoolSearch = _tagsDao.createSearchBuilder();;
// Search for pools on the host
hostPoolSearch.and("hostId", hostPoolSearch.entity().getHostId(), Op.EQ);
@ -380,9 +471,8 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
hostSearch.join("hostJoin", hostPoolSearch, hostSearch.entity().getId(), hostPoolSearch.entity().getPoolId(), JoinBuilder.JoinType.INNER);
if (!(tags == null || tags.length == 0 )) {
tagPoolSearch.and("name", tagPoolSearch.entity().getName(), Op.EQ);
tagPoolSearch.and("value", tagPoolSearch.entity().getValue(), Op.EQ);
hostSearch.join("tagJoin", tagPoolSearch, hostSearch.entity().getId(), tagPoolSearch.entity().getResourceId(), JoinBuilder.JoinType.INNER);
tagPoolSearch.and("tag", tagPoolSearch.entity().getTag(), Op.EQ);
hostSearch.join("tagJoin", tagPoolSearch, hostSearch.entity().getId(), tagPoolSearch.entity().getPoolId(), JoinBuilder.JoinType.INNER);
}
SearchCriteria<StoragePoolVO> sc = hostSearch.create();
@ -391,10 +481,8 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
sc.setParameters("status", Status.Up.toString());
if (!(tags == null || tags.length == 0 )) {
Map<String, String> details = tagsToDetails(tags);
for (Map.Entry<String, String> detail : details.entrySet()) {
sc.setJoinParameters("tagJoin","name", detail.getKey());
sc.setJoinParameters("tagJoin", "value", detail.getValue());
for (String tag : tags) {
sc.setJoinParameters("tagJoin", "tag", tag);
}
}
return listBy(sc);
@ -409,68 +497,15 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
sc.and(sc.entity().getScope(), Op.EQ, ScopeType.ZONE);
return sc.list();
} else {
Map<String, String> details = tagsToDetails(tags);
StringBuilder sql = new StringBuilder(ZoneWideDetailsSqlPrefix);
for (int i=0;i<details.size();i++){
sql.append("((storage_pool_details.name=?) AND (storage_pool_details.value=?)) OR ");
}
sql.delete(sql.length() - 4, sql.length());
sql.append(ZoneWideDetailsSqlSuffix);
TransactionLegacy txn = TransactionLegacy.currentTxn();
try (PreparedStatement pstmt = txn.prepareStatement(sql.toString());){
List<StoragePoolVO> pools = new ArrayList<StoragePoolVO>();
if (pstmt != null) {
int i = 1;
pstmt.setLong(i++, dcId);
pstmt.setString(i++, ScopeType.ZONE.toString());
for (Map.Entry<String, String> detail : details.entrySet()) {
pstmt.setString(i++, detail.getKey());
pstmt.setString(i++, detail.getValue());
}
pstmt.setInt(i++, details.size());
try(ResultSet rs = pstmt.executeQuery();) {
while (rs.next()) {
pools.add(toEntityBean(rs, false));
}
}catch (SQLException e) {
throw new CloudRuntimeException("findZoneWideStoragePoolsByTags:Exception:" + e.getMessage(), e);
}
}
return pools;
} catch (SQLException e) {
throw new CloudRuntimeException("findZoneWideStoragePoolsByTags:Exception:" + e.getMessage(), e);
}
String sqlValues = getSqlValuesFromStorageTags(tags);
String sql = getSqlPreparedStatement(ZoneWideTagsSqlPrefix, ZoneWideTagsSqlSuffix, sqlValues, null);
return searchStoragePoolsPreparedStatement(sql, dcId, null, null, ScopeType.ZONE, tags.length);
}
}
@Override
@DB
public List<String> searchForStoragePoolDetails(long poolId, String value) {
StringBuilder sql = new StringBuilder(FindPoolTagDetails);
TransactionLegacy txn = TransactionLegacy.currentTxn();
List<String> tags = new ArrayList<String>();
try(PreparedStatement pstmt = txn.prepareStatement(sql.toString());) {
if (pstmt != null) {
pstmt.setLong(1, poolId);
pstmt.setString(2, value);
try(ResultSet rs = pstmt.executeQuery();) {
while (rs.next()) {
tags.add(rs.getString("name"));
}
}catch (SQLException e) {
throw new CloudRuntimeException("searchForStoragePoolDetails:Exception:" + e.getMessage(), e);
}
}
return tags;
} catch (SQLException e) {
throw new CloudRuntimeException("searchForStoragePoolDetails:Exception:" + e.getMessage(), e);
}
public List<String> searchForStoragePoolTags(long poolId) {
return _tagsDao.getStoragePoolTags(poolId);
}
@Override
@ -530,4 +565,10 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
sc.and(sc.entity().getHypervisor(), Op.EQ, hypervisorType);
return sc.list();
}
@Override
public void deletePoolTags(long poolId) {
_tagsDao.deleteTags(poolId);
}
}

View File

@ -0,0 +1,128 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage.dao;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.Spy;
import org.powermock.modules.junit4.PowerMockRunner;
import com.cloud.storage.StoragePoolTagVO;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.doNothing;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import junit.framework.TestCase;
@RunWith(PowerMockRunner.class)
public class StoragePoolTagsDaoImplTest extends TestCase {
@Mock
ConfigurationDao _configDao;
@Mock
SearchBuilder<StoragePoolTagVO> StoragePoolIdsSearch;
@Spy
@InjectMocks
private StoragePoolTagsDaoImpl _storagePoolTagsDaoImpl = new StoragePoolTagsDaoImpl();
@Mock
StoragePoolTagVO storagePoolTag1;
@Mock
StoragePoolTagVO storagePoolTag2;
private final String batchSizeConfigurationKey = "detail.batch.query.size";
private final String batchSizeValue = "2800";
private final String batchSizeDefaultValue = "2000";
private final String batchSizeLow = "2";
private final Long[] storageTagsIds = {1l,2l,3l,4l,5l};
private final List<StoragePoolTagVO> storagePoolTagList = Arrays.asList(storagePoolTag1, storagePoolTag2);
@Before
public void setup() {
when(_configDao.getValue(batchSizeConfigurationKey)).thenReturn(batchSizeValue);
doReturn(storagePoolTagList).when(_storagePoolTagsDaoImpl).searchIncludingRemoved(
Matchers.any(SearchCriteria.class), Matchers.isNull(Filter.class), Matchers.isNull(Boolean.class), Matchers.eq(false));
}
@Test
public void testGetDetailsBatchSizeNotNull() {
assertEquals(Integer.parseInt(batchSizeValue), _storagePoolTagsDaoImpl.getDetailsBatchSize());
}
@Test
public void testGetDetailsBatchSizeNull() {
when(_configDao.getValue(batchSizeConfigurationKey)).thenReturn(null);
assertEquals(Integer.parseInt(batchSizeDefaultValue), _storagePoolTagsDaoImpl.getDetailsBatchSize());
}
@Test
public void testSearchForStoragePoolIdsInternalStorageTagsNotNullSearch() {
List<StoragePoolTagVO> storagePoolTags = new ArrayList<StoragePoolTagVO>();
_storagePoolTagsDaoImpl.searchForStoragePoolIdsInternal(0, storageTagsIds.length, storageTagsIds, storagePoolTags);
verify(_storagePoolTagsDaoImpl).searchIncludingRemoved(Matchers.any(SearchCriteria.class), Matchers.isNull(Filter.class), Matchers.isNull(Boolean.class), Matchers.eq(false));
assertEquals(2, storagePoolTags.size());
}
@Test
public void testSearchForStoragePoolIdsInternalStorageTagsNullSearch() {
List<StoragePoolTagVO> storagePoolTags = new ArrayList<StoragePoolTagVO>();
doReturn(null).when(_storagePoolTagsDaoImpl).searchIncludingRemoved(
Matchers.any(SearchCriteria.class), Matchers.isNull(Filter.class), Matchers.isNull(Boolean.class), Matchers.eq(false));
_storagePoolTagsDaoImpl.searchForStoragePoolIdsInternal(0, storageTagsIds.length, storageTagsIds, storagePoolTags);
verify(_storagePoolTagsDaoImpl).searchIncludingRemoved(Matchers.any(SearchCriteria.class), Matchers.isNull(Filter.class), Matchers.isNull(Boolean.class), Matchers.eq(false));
assertEquals(0, storagePoolTags.size());
}
@Test
public void testSearchByIdsStorageTagsIdsGreaterOrEqualThanBatchSize() {
when(_configDao.getValue(batchSizeConfigurationKey)).thenReturn(batchSizeLow);
doNothing().when(_storagePoolTagsDaoImpl).searchForStoragePoolIdsInternal(Matchers.anyInt(), Matchers.anyInt(), Matchers.any(Long[].class), Matchers.anyList());
_storagePoolTagsDaoImpl.searchByIds(storageTagsIds);
int batchSize = Integer.parseInt(batchSizeLow);
int difference = storageTagsIds.length - 2 * batchSize;
verify(_storagePoolTagsDaoImpl, Mockito.times(2)).searchForStoragePoolIdsInternal(Matchers.anyInt(), Matchers.eq(batchSize), Matchers.any(Long[].class), Matchers.anyList());
verify(_storagePoolTagsDaoImpl).searchForStoragePoolIdsInternal(Matchers.eq(2 * batchSize), Matchers.eq(difference), Matchers.any(Long[].class), Matchers.anyList());
}
@Test
public void testSearchByIdsStorageTagsIdsLowerThanBatchSize() {
doNothing().when(_storagePoolTagsDaoImpl).searchForStoragePoolIdsInternal(Matchers.anyInt(), Matchers.anyInt(), Matchers.any(Long[].class), Matchers.anyList());
_storagePoolTagsDaoImpl.searchByIds(storageTagsIds);
verify(_storagePoolTagsDaoImpl).searchForStoragePoolIdsInternal(Matchers.eq(0), Matchers.eq(storageTagsIds.length), Matchers.any(Long[].class), Matchers.anyList());
}
}

View File

@ -0,0 +1,151 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.db;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.verify;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDaoImpl.ValueType;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.Spy;
import org.powermock.modules.junit4.PowerMockRunner;
import com.cloud.storage.ScopeType;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.StoragePoolTagsDao;
import junit.framework.TestCase;
@RunWith(PowerMockRunner.class)
public class PrimaryDataStoreDaoImplTest extends TestCase {
@Mock
StoragePoolDetailsDao _detailsDao;
@Mock
StoragePoolHostDao _hostDao;
@Mock
StoragePoolTagsDao _tagsDao;
@Spy
@InjectMocks
private static PrimaryDataStoreDaoImpl primaryDataStoreDao = new PrimaryDataStoreDaoImpl();
@Mock
StoragePoolVO storagePoolVO;
private static final String STORAGE_TAG_1 = "NFS-A";
private static final String STORAGE_TAG_2 = "NFS-B";
private static final String[] STORAGE_TAGS_ARRAY = {STORAGE_TAG_1, STORAGE_TAG_2};
private static final String DETAIL_KEY = "storage.overprovisioning.factor";
private static final String DETAIL_VALUE = "2.0";
private static final Map<String, String> STORAGE_POOL_DETAILS = new HashMap<String, String>(){{ put(DETAIL_KEY, DETAIL_VALUE); }};
private static final String EXPECTED_RESULT_SQL_STORAGE_TAGS = "(storage_pool_tags.tag='" + STORAGE_TAG_1 + "') OR (storage_pool_tags.tag='" + STORAGE_TAG_2 + "')";
private static final String EXPECTED_RESULT_SQL_DETAILS = "((storage_pool_details.name='" + DETAIL_KEY + "') AND (storage_pool_details.value='" + DETAIL_VALUE +"'))";
private static final String SQL_PREFIX = "XXXXXXXXXXXXXXXX";
private static final String SQL_SUFFIX = "ZZZZZZZZZZZZZZZZ";
private static final String SQL_VALUES = "YYYYYYYYYYYYYYYY";
private static final Long DATACENTER_ID = 1l;
private static final Long POD_ID = 1l;
private static final Long CLUSTER_ID = null;
private static final ScopeType SCOPE = ScopeType.ZONE;
@Before
public void setup() {
doReturn(Arrays.asList(storagePoolVO)).when(primaryDataStoreDao).
searchStoragePoolsPreparedStatement(Matchers.anyString(), Matchers.anyLong(), Matchers.anyLong(), Matchers.anyLong(),
Matchers.any(ScopeType.class), Matchers.anyInt());
}
@Test
public void testGetSqlValuesFromStorageTagsNotNullStorageTags() {
assertEquals(EXPECTED_RESULT_SQL_STORAGE_TAGS, primaryDataStoreDao.getSqlValuesFromStorageTags(STORAGE_TAGS_ARRAY));
}
@Test(expected=NullPointerException.class)
public void testGetSqlValuesFromStorageTagsNullStorageTags() {
primaryDataStoreDao.getSqlValuesFromStorageTags(null);
}
@Test(expected=IndexOutOfBoundsException.class)
public void testGetSqlValuesFromStorageTagsEmptyStorageTags() {
String[] emptyStorageTags = {};
primaryDataStoreDao.getSqlValuesFromStorageTags(emptyStorageTags);
}
@Test
public void testGetSqlValuesFromDetailsNotNullDetails() {
assertEquals(EXPECTED_RESULT_SQL_DETAILS, primaryDataStoreDao.getSqlValuesFromDetails(STORAGE_POOL_DETAILS));
}
@Test(expected=NullPointerException.class)
public void testGetSqlValuesFromDetailsNullDetails() {
primaryDataStoreDao.getSqlValuesFromDetails(null);
}
@Test(expected=IndexOutOfBoundsException.class)
public void testGetSqlValuesFromDetailsEmptyDetailss() {
Map<String,String> emptyDetails = new HashMap<String, String>();
primaryDataStoreDao.getSqlValuesFromDetails(emptyDetails);
}
@Test
public void testGetSqlPreparedStatementNullClusterId() {
String sqlPreparedStatement = primaryDataStoreDao.getSqlPreparedStatement(SQL_PREFIX, SQL_SUFFIX, SQL_VALUES, null);
assertEquals(SQL_PREFIX + SQL_VALUES + SQL_SUFFIX, sqlPreparedStatement);
}
@Test
public void testGetSqlPreparedStatementNotNullClusterId() {
String clusterSql = "storage_pool.cluster_id = ? OR storage_pool.cluster_id IS NULL) AND (";
String sqlPreparedStatement = primaryDataStoreDao.getSqlPreparedStatement(SQL_PREFIX, SQL_SUFFIX, SQL_VALUES, 1l);
assertEquals(SQL_PREFIX + clusterSql + SQL_VALUES + SQL_SUFFIX, sqlPreparedStatement);
}
@Test
public void testFindPoolsByDetailsOrTagsInternalStorageTagsType() {
List<StoragePoolVO> storagePools = primaryDataStoreDao.findPoolsByDetailsOrTagsInternal(DATACENTER_ID, POD_ID, CLUSTER_ID, SCOPE, SQL_VALUES, ValueType.TAGS, STORAGE_TAGS_ARRAY.length);
assertEquals(Arrays.asList(storagePoolVO), storagePools);
verify(primaryDataStoreDao).getSqlPreparedStatement(
primaryDataStoreDao.TagsSqlPrefix, primaryDataStoreDao.TagsSqlSuffix, SQL_VALUES, CLUSTER_ID);
String expectedSql = primaryDataStoreDao.TagsSqlPrefix + SQL_VALUES + primaryDataStoreDao.TagsSqlSuffix;
verify(primaryDataStoreDao).searchStoragePoolsPreparedStatement(expectedSql, DATACENTER_ID, POD_ID, CLUSTER_ID, SCOPE, STORAGE_TAGS_ARRAY.length);
}
@Test
public void testFindPoolsByDetailsOrTagsInternalDetailsType() {
List<StoragePoolVO> storagePools = primaryDataStoreDao.findPoolsByDetailsOrTagsInternal(DATACENTER_ID, POD_ID, CLUSTER_ID, SCOPE, SQL_VALUES, ValueType.DETAILS, STORAGE_POOL_DETAILS.size());
assertEquals(Arrays.asList(storagePoolVO), storagePools);
verify(primaryDataStoreDao).getSqlPreparedStatement(
primaryDataStoreDao.DetailsSqlPrefix, primaryDataStoreDao.DetailsSqlSuffix, SQL_VALUES, CLUSTER_ID);
String expectedSql = primaryDataStoreDao.DetailsSqlPrefix + SQL_VALUES + primaryDataStoreDao.DetailsSqlSuffix;
verify(primaryDataStoreDao).searchStoragePoolsPreparedStatement(expectedSql, DATACENTER_ID, POD_ID, CLUSTER_ID, SCOPE, STORAGE_POOL_DETAILS.size());
}
}

View File

@ -20,6 +20,7 @@ package org.apache.cloudstack.storage.volume.datastore;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@ -118,6 +119,8 @@ public class PrimaryDataStoreHelper {
dataStoreVO.setPath(updatedPath);
}
String tags = params.getTags();
List<String> storageTags = new ArrayList<String>();
if (tags != null) {
String[] tokens = tags.split(",");
@ -126,10 +129,10 @@ public class PrimaryDataStoreHelper {
if (tag.length() == 0) {
continue;
}
details.put(tag, "true");
storageTags.add(tag);
}
}
dataStoreVO = dataStoreDao.persist(dataStoreVO, details);
dataStoreVO = dataStoreDao.persist(dataStoreVO, details, storageTags);
return dataStoreMgr.getDataStore(dataStoreVO.getId(), DataStoreRole.Primary);
}
@ -231,6 +234,7 @@ public class PrimaryDataStoreHelper {
poolVO.setUuid(null);
this.dataStoreDao.update(poolVO.getId(), poolVO);
dataStoreDao.remove(poolVO.getId());
dataStoreDao.deletePoolTags(poolVO.getId());
deletePoolStats(poolVO.getId());
// Delete op_host_capacity entries
this._capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, null, null, null, poolVO.getId());

View File

@ -89,7 +89,6 @@ import com.cloud.api.query.dao.ResourceTagJoinDao;
import com.cloud.api.query.dao.SecurityGroupJoinDao;
import com.cloud.api.query.dao.ServiceOfferingJoinDao;
import com.cloud.api.query.dao.StoragePoolJoinDao;
import com.cloud.api.query.dao.StorageTagDao;
import com.cloud.api.query.dao.TemplateJoinDao;
import com.cloud.api.query.dao.UserAccountJoinDao;
import com.cloud.api.query.dao.UserVmJoinDao;
@ -113,7 +112,6 @@ import com.cloud.api.query.vo.ResourceTagJoinVO;
import com.cloud.api.query.vo.SecurityGroupJoinVO;
import com.cloud.api.query.vo.ServiceOfferingJoinVO;
import com.cloud.api.query.vo.StoragePoolJoinVO;
import com.cloud.api.query.vo.StorageTagVO;
import com.cloud.api.query.vo.TemplateJoinVO;
import com.cloud.api.query.vo.UserAccountJoinVO;
import com.cloud.api.query.vo.UserVmJoinVO;
@ -255,6 +253,7 @@ import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolTagVO;
import com.cloud.storage.StorageStats;
import com.cloud.storage.UploadVO;
import com.cloud.storage.VMTemplateVO;
@ -266,6 +265,7 @@ import com.cloud.storage.dao.GuestOSCategoryDao;
import com.cloud.storage.dao.GuestOSDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotPolicyDao;
import com.cloud.storage.dao.StoragePoolTagsDao;
import com.cloud.storage.dao.UploadDao;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VMTemplateDetailsDao;
@ -400,7 +400,7 @@ public class ApiDBUtils {
static HostJoinDao s_hostJoinDao;
static VolumeJoinDao s_volJoinDao;
static StoragePoolJoinDao s_poolJoinDao;
static StorageTagDao s_tagDao;
static StoragePoolTagsDao s_tagDao;
static HostTagDao s_hostTagDao;
static ImageStoreJoinDao s_imageStoreJoinDao;
static AccountJoinDao s_accountJoinDao;
@ -600,7 +600,7 @@ public class ApiDBUtils {
@Inject
private StoragePoolJoinDao poolJoinDao;
@Inject
private StorageTagDao tagDao;
private StoragePoolTagsDao tagDao;
@Inject
private HostTagDao hosttagDao;
@Inject
@ -1800,7 +1800,7 @@ public class ApiDBUtils {
return s_poolJoinDao.newStoragePoolResponse(vr);
}
public static StorageTagResponse newStorageTagResponse(StorageTagVO vr) {
public static StorageTagResponse newStorageTagResponse(StoragePoolTagVO vr) {
return s_tagDao.newStorageTagResponse(vr);
}

View File

@ -125,7 +125,6 @@ import com.cloud.api.query.dao.ResourceTagJoinDao;
import com.cloud.api.query.dao.SecurityGroupJoinDao;
import com.cloud.api.query.dao.ServiceOfferingJoinDao;
import com.cloud.api.query.dao.StoragePoolJoinDao;
import com.cloud.api.query.dao.StorageTagDao;
import com.cloud.api.query.dao.TemplateJoinDao;
import com.cloud.api.query.dao.UserAccountJoinDao;
import com.cloud.api.query.dao.UserVmJoinDao;
@ -149,7 +148,6 @@ import com.cloud.api.query.vo.ResourceTagJoinVO;
import com.cloud.api.query.vo.SecurityGroupJoinVO;
import com.cloud.api.query.vo.ServiceOfferingJoinVO;
import com.cloud.api.query.vo.StoragePoolJoinVO;
import com.cloud.api.query.vo.StorageTagVO;
import com.cloud.api.query.vo.TemplateJoinVO;
import com.cloud.api.query.vo.UserAccountJoinVO;
import com.cloud.api.query.vo.UserVmJoinVO;
@ -190,8 +188,10 @@ import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.TemplateType;
import com.cloud.storage.StoragePoolTagVO;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.Volume;
import com.cloud.storage.dao.StoragePoolTagsDao;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.tags.ResourceTagVO;
import com.cloud.tags.dao.ResourceTagDao;
@ -306,7 +306,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
private StoragePoolJoinDao _poolJoinDao;
@Inject
private StorageTagDao _storageTagDao;
private StoragePoolTagsDao _storageTagDao;
@Inject
private HostTagDao _hostTagDao;
@ -2268,43 +2268,43 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
@Override
public ListResponse<StorageTagResponse> searchForStorageTags(ListStorageTagsCmd cmd) {
Pair<List<StorageTagVO>, Integer> result = searchForStorageTagsInternal(cmd);
Pair<List<StoragePoolTagVO>, Integer> result = searchForStorageTagsInternal(cmd);
ListResponse<StorageTagResponse> response = new ListResponse<StorageTagResponse>();
List<StorageTagResponse> tagResponses = ViewResponseHelper.createStorageTagResponse(result.first().toArray(new StorageTagVO[result.first().size()]));
List<StorageTagResponse> tagResponses = ViewResponseHelper.createStorageTagResponse(result.first().toArray(new StoragePoolTagVO[result.first().size()]));
response.setResponses(tagResponses, result.second());
return response;
}
private Pair<List<StorageTagVO>, Integer> searchForStorageTagsInternal(ListStorageTagsCmd cmd) {
Filter searchFilter = new Filter(StorageTagVO.class, "id", Boolean.TRUE, null, null);
private Pair<List<StoragePoolTagVO>, Integer> searchForStorageTagsInternal(ListStorageTagsCmd cmd) {
Filter searchFilter = new Filter(StoragePoolTagVO.class, "id", Boolean.TRUE, null, null);
SearchBuilder<StorageTagVO> sb = _storageTagDao.createSearchBuilder();
SearchBuilder<StoragePoolTagVO> sb = _storageTagDao.createSearchBuilder();
sb.select(null, Func.DISTINCT, sb.entity().getId()); // select distinct
SearchCriteria<StorageTagVO> sc = sb.create();
SearchCriteria<StoragePoolTagVO> sc = sb.create();
// search storage tag details by ids
Pair<List<StorageTagVO>, Integer> uniqueTagPair = _storageTagDao.searchAndCount(sc, searchFilter);
Pair<List<StoragePoolTagVO>, Integer> uniqueTagPair = _storageTagDao.searchAndCount(sc, searchFilter);
Integer count = uniqueTagPair.second();
if (count.intValue() == 0) {
return uniqueTagPair;
}
List<StorageTagVO> uniqueTags = uniqueTagPair.first();
List<StoragePoolTagVO> uniqueTags = uniqueTagPair.first();
Long[] vrIds = new Long[uniqueTags.size()];
int i = 0;
for (StorageTagVO v : uniqueTags) {
for (StoragePoolTagVO v : uniqueTags) {
vrIds[i++] = v.getId();
}
List<StorageTagVO> vrs = _storageTagDao.searchByIds(vrIds);
List<StoragePoolTagVO> vrs = _storageTagDao.searchByIds(vrIds);
return new Pair<List<StorageTagVO>, Integer>(vrs, count);
return new Pair<List<StoragePoolTagVO>, Integer>(vrs, count);
}
@Override

View File

@ -73,11 +73,11 @@ import com.cloud.api.query.vo.ResourceTagJoinVO;
import com.cloud.api.query.vo.SecurityGroupJoinVO;
import com.cloud.api.query.vo.ServiceOfferingJoinVO;
import com.cloud.api.query.vo.StoragePoolJoinVO;
import com.cloud.api.query.vo.StorageTagVO;
import com.cloud.api.query.vo.TemplateJoinVO;
import com.cloud.api.query.vo.UserAccountJoinVO;
import com.cloud.api.query.vo.UserVmJoinVO;
import com.cloud.api.query.vo.VolumeJoinVO;
import com.cloud.storage.StoragePoolTagVO;
import com.cloud.user.Account;
/**
@ -294,10 +294,10 @@ public class ViewResponseHelper {
return new ArrayList<StoragePoolResponse>(vrDataList.values());
}
public static List<StorageTagResponse> createStorageTagResponse(StorageTagVO... storageTags) {
public static List<StorageTagResponse> createStorageTagResponse(StoragePoolTagVO... storageTags) {
ArrayList<StorageTagResponse> list = new ArrayList<StorageTagResponse>();
for (StorageTagVO vr : storageTags) {
for (StoragePoolTagVO vr : storageTags) {
list.add(ApiDBUtils.newStorageTagResponse(vr));
}

View File

@ -1,30 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.api.query.dao;
import java.util.List;
import org.apache.cloudstack.api.response.StorageTagResponse;
import com.cloud.api.query.vo.StorageTagVO;
import com.cloud.utils.db.GenericDao;
public interface StorageTagDao extends GenericDao<StorageTagVO, Long> {
StorageTagResponse newStorageTagResponse(StorageTagVO storageTag);
List<StorageTagVO> searchByIds(Long... storageTagIds);
}

View File

@ -1,124 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.api.query.dao;
import java.util.ArrayList;
import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.api.response.StorageTagResponse;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.api.query.vo.StorageTagVO;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
@Component
public class StorageTagDaoImpl extends GenericDaoBase<StorageTagVO, Long> implements StorageTagDao {
public static final Logger s_logger = Logger.getLogger(StorageTagDaoImpl.class);
@Inject
private ConfigurationDao _configDao;
private final SearchBuilder<StorageTagVO> stSearch;
private final SearchBuilder<StorageTagVO> stIdSearch;
protected StorageTagDaoImpl() {
stSearch = createSearchBuilder();
stSearch.and("idIN", stSearch.entity().getId(), SearchCriteria.Op.IN);
stSearch.done();
stIdSearch = createSearchBuilder();
stIdSearch.and("id", stIdSearch.entity().getId(), SearchCriteria.Op.EQ);
stIdSearch.done();
_count = "select count(distinct id) from storage_tag_view WHERE ";
}
@Override
public StorageTagResponse newStorageTagResponse(StorageTagVO tag) {
StorageTagResponse tagResponse = new StorageTagResponse();
tagResponse.setName(tag.getName());
tagResponse.setPoolId(tag.getPoolId());
tagResponse.setObjectName("storagetag");
return tagResponse;
}
@Override
public List<StorageTagVO> searchByIds(Long... stIds) {
String batchCfg = _configDao.getValue("detail.batch.query.size");
final int detailsBatchSize = batchCfg != null ? Integer.parseInt(batchCfg) : 2000;
// query details by batches
List<StorageTagVO> uvList = new ArrayList<StorageTagVO>();
int curr_index = 0;
if (stIds.length > detailsBatchSize) {
while ((curr_index + detailsBatchSize) <= stIds.length) {
Long[] ids = new Long[detailsBatchSize];
for (int k = 0, j = curr_index; j < curr_index + detailsBatchSize; j++, k++) {
ids[k] = stIds[j];
}
SearchCriteria<StorageTagVO> sc = stSearch.create();
sc.setParameters("idIN", (Object[])ids);
List<StorageTagVO> vms = searchIncludingRemoved(sc, null, null, false);
if (vms != null) {
uvList.addAll(vms);
}
curr_index += detailsBatchSize;
}
}
if (curr_index < stIds.length) {
int batch_size = (stIds.length - curr_index);
// set the ids value
Long[] ids = new Long[batch_size];
for (int k = 0, j = curr_index; j < curr_index + batch_size; j++, k++) {
ids[k] = stIds[j];
}
SearchCriteria<StorageTagVO> sc = stSearch.create();
sc.setParameters("idIN", (Object[])ids);
List<StorageTagVO> vms = searchIncludingRemoved(sc, null, null, false);
if (vms != null) {
uvList.addAll(vms);
}
}
return uvList;
}
}

View File

@ -1,61 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.api.query.vo;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.Id;
import javax.persistence.Table;
import org.apache.cloudstack.api.InternalIdentity;
/**
* Storage Tags DB view.
*
*/
@Entity
@Table(name = "storage_tag_view")
public class StorageTagVO extends BaseViewVO implements InternalIdentity {
private static final long serialVersionUID = 1L;
@Id
@Column(name = "id")
private long id;
@Column(name = "name")
private String name;
@Column(name = "pool_id")
long poolId;
@Override
public long getId() {
return id;
}
public String getName() {
return name;
}
public long getPoolId() {
return poolId;
}
public void setPoolId(long poolId) {
this.poolId = poolId;
}
}

View File

@ -524,7 +524,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@Override
public String getStoragePoolTags(long poolId) {
return StringUtils.listToCsvTags(_storagePoolDao.searchForStoragePoolDetails(poolId, "true"));
return StringUtils.listToCsvTags(_storagePoolDao.searchForStoragePoolTags(poolId));
}
@Override

View File

@ -48,7 +48,8 @@
<bean id="vGPUTypesDaoImpl" class="com.cloud.gpu.dao.VGPUTypesDaoImpl" />
<bean id="usageEventDaoImpl" class="com.cloud.event.dao.UsageEventDaoImpl" />
<bean id="usageEventDetailsDaoImpl" class="com.cloud.event.dao.UsageEventDetailsDaoImpl" />
<bean id="storagePoolHostDaoImpl" class="com.cloud.storage.dao.StoragePoolHostDaoImpl" />
<bean id="storagePoolHostDaoImpl" class="com.cloud.storage.dao.StoragePoolHostDaoImpl" />
<bean id="storagePoolTagsDaoImpl" class="com.cloud.storage.dao.StoragePoolTagsDaoImpl" />
<bean id="primaryDataStoreDaoImpl" class="org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDaoImpl" />
<bean id="userIpAddressDetailsDao" class="org.apache.cloudstack.resourcedetail.dao.UserIpAddressDetailsDaoImpl" />

View File

@ -229,3 +229,6 @@ JOIN `cloud`.`service_offering` o ON (v.service_offering_id = o.id)
JOIN `cloud`.`vm_snapshots` s ON (s.service_offering_id = o.id AND s.vm_id = v.id)
WHERE (o.cpu is null AND o.speed IS NULL AND o.ram_size IS NULL) AND
(d.name = 'cpuNumber' OR d.name = 'cpuSpeed' OR d.name = 'memory');
-- CLOUDSTACK-9827: Storage tags stored in multiple places
DROP VIEW IF EXISTS `cloud`.`storage_tag_view`;

View File

@ -24,6 +24,8 @@ from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
import logging
from marvin.lib.decoratorGenerators import skipTestIf
#Import System modules
import time
@ -246,3 +248,319 @@ class TestPrimaryStorageServices(cloudstackTestCase):
self.cleanup = []
return
class StorageTagsServices:
"""Test Storage Tags Data Class.
"""
def __init__(self):
self.storage_tags = {
"a" : "NFS-A",
"b" : "NFS-B"
}
class TestStorageTags(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.logger = logging.getLogger('TestStorageTags')
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
test_case = super(TestStorageTags, cls)
testClient = test_case.getClsTestClient()
cls.config = test_case.getClsConfig()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.pod = get_pod(cls.apiclient, cls.zone.id)
cls.hypervisor = testClient.getHypervisorInfo()
cls.domain = get_domain(cls.apiclient)
cls.template = get_template(
cls.apiclient,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.services["storage_tags"] = StorageTagsServices().storage_tags
cls.hypervisorNotSupported = False
if cls.hypervisor.lower() in ["hyperv"]:
cls.hypervisorNotSupported = True
cls._cleanup = []
if not cls.hypervisorNotSupported:
cls.clusters = list_clusters(
cls.apiclient,
zoneid=cls.zone.id
)
assert isinstance(cls.clusters, list) and len(cls.clusters) > 0
# Create PS with Storage Tag
cls.storage_pool_1 = StoragePool.create(cls.apiclient,
cls.services["nfs"],
clusterid=cls.clusters[0].id,
zoneid=cls.zone.id,
podid=cls.pod.id,
tags=cls.services["storage_tags"]["a"]
)
cls._cleanup.append(cls.storage_pool_1)
assert cls.storage_pool_1.state == 'Up'
storage_pools_response = list_storage_pools(cls.apiclient,
id=cls.storage_pool_1.id)
assert isinstance(storage_pools_response, list) and len(storage_pools_response) > 0
storage_response = storage_pools_response[0]
assert storage_response.id == cls.storage_pool_1.id and storage_response.type == cls.storage_pool_1.type
# Create Service Offerings with different Storage Tags
cls.service_offering_1 = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["tiny"],
tags=cls.services["storage_tags"]["a"]
)
cls._cleanup.append(cls.service_offering_1)
cls.service_offering_2 = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["tiny"],
tags=cls.services["storage_tags"]["b"]
)
cls._cleanup.append(cls.service_offering_2)
# Create Disk Offerings with different Storage Tags
cls.disk_offering_1 = DiskOffering.create(
cls.apiclient,
cls.services["disk_offering"],
tags=cls.services["storage_tags"]["a"]
)
cls._cleanup.append(cls.disk_offering_1)
cls.disk_offering_2 = DiskOffering.create(
cls.apiclient,
cls.services["disk_offering"],
tags=cls.services["storage_tags"]["b"]
)
cls._cleanup.append(cls.disk_offering_2)
# Create Account
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=cls.domain.id
)
cls._cleanup.append(cls.account)
# Create VM-1 with using Service Offering 1
cls.virtual_machine_1 = VirtualMachine.create(
cls.apiclient,
cls.services["virtual_machine"],
accountid=cls.account.name,
domainid=cls.account.domainid,
templateid=cls.template.id,
serviceofferingid=cls.service_offering_1.id,
hypervisor=cls.hypervisor,
mode=cls.zone.networktype
)
# VM-1 not appended to _cleanup, it is expunged on tearDownClass before cleaning up resources
return
@classmethod
def tearDownClass(cls):
try:
# First expunge vm, so PS can be cleaned up
cls.virtual_machine_1.delete(cls.apiclient, expunge=True)
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Cleanup failed with %s" % e)
def setUp(self):
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
@attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
@skipTestIf("hypervisorNotSupported")
def test_01_deploy_vms_storage_tags(self):
"""Test Deploy VMS using different Service Offerings with Storage Tags
"""
# Save cleanup size before trying to deploy VM-2
cleanup_size = len(self.cleanup)
# Try deploying VM-2 using CO-2 -> Should fail to find storage and fail deployment
try:
self.virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
templateid=self.template.id,
serviceofferingid=self.service_offering_2.id,
hypervisor=self.hypervisor
)
self.cleanup.append(self.virtual_machine_2)
except Exception as e:
self.debug("Expected exception %s: " % e)
self.debug("Asssert that vm2 was not deployed, so it couldn't be appended to cleanup")
self.assertEquals(cleanup_size, len(self.cleanup))
# Create V-1 using DO-1
self.volume_1 = Volume.create(
self.apiclient,
self.services,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering_1.id
)
self.cleanup.append(self.volume_1)
# Create V-2 using DO-2
self.volume_2 = Volume.create(
self.apiclient,
self.services,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering_2.id
)
self.cleanup.append(self.volume_2)
# Try attaching V-2 to VM-1 -> Should fail finding storage and fail attachment
try:
self.virtual_machine_1.attach_volume(
self.apiclient,
self.volume_2
)
except Exception as e:
self.debug("Expected exception %s: " % e)
vm_1_volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine_1.id,
type='DATADISK',
listall=True
)
self.debug("VM-1 Volumes: %s" % vm_1_volumes)
self.assertEquals(None, vm_1_volumes, "Check that volume V-2 has not been attached to VM-1")
# Attach V_1 to VM_1
self.virtual_machine_1.attach_volume(self.apiclient,self.volume_1)
vm_1_volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine_1.id,
type='DATADISK',
listall=True
)
self.debug("VM-1 Volumes: %s" % vm_1_volumes)
self.assertEquals(vm_1_volumes[0].id, self.volume_1.id, "Check that volume V-1 has been attached to VM-1")
self.virtual_machine_1.detach_volume(self.apiclient, self.volume_1)
return
def check_storage_pool_tag(self, poolid, tag):
cmd = listStorageTags.listStorageTagsCmd()
storage_tags_response = self.apiclient.listStorageTags(cmd)
pool_tags = filter(lambda x: x.poolid == poolid, storage_tags_response)
self.assertEquals(1, len(pool_tags), "Check storage tags size")
self.assertEquals(tag, pool_tags[0].name, "Check storage tag on storage pool")
@attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
@skipTestIf("hypervisorNotSupported")
def test_02_edit_primary_storage_tags(self):
""" Test Edit Storage Tags
"""
qresultset = self.dbclient.execute(
"select id from storage_pool where uuid = '%s';"
% str(self.storage_pool_1.id)
)
self.assertEquals(1, len(qresultset), "Check DB Query result set")
qresult = qresultset[0]
storage_pool_db_id = qresult[0]
self.check_storage_pool_tag(storage_pool_db_id, self.services["storage_tags"]["a"])
# Update Storage Tag
StoragePool.update(
self.apiclient,
id=self.storage_pool_1.id,
tags=self.services["storage_tags"]["b"]
)
self.check_storage_pool_tag(storage_pool_db_id, self.services["storage_tags"]["b"])
# Revert Storage Tag
StoragePool.update(
self.apiclient,
id=self.storage_pool_1.id,
tags=self.services["storage_tags"]["a"]
)
self.check_storage_pool_tag(storage_pool_db_id, self.services["storage_tags"]["a"])
return
@attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
@skipTestIf("hypervisorNotSupported")
def test_03_migration_options_storage_tags(self):
""" Test Volume migration options for Storage Pools with different Storage Tags
"""
# Create PS-2 using Storage Tag
storage_pool_2 = StoragePool.create(self.apiclient,
self.services["nfs2"],
clusterid=self.clusters[0].id,
zoneid=self.zone.id,
podid=self.pod.id,
tags=self.services["storage_tags"]["a"]
)
self.cleanup.append(storage_pool_2)
assert storage_pool_2.state == 'Up'
storage_pools_response = list_storage_pools(self.apiclient,
id=storage_pool_2.id)
assert isinstance(storage_pools_response, list) and len(storage_pools_response) > 0
storage_response = storage_pools_response[0]
assert storage_response.id == storage_pool_2.id and storage_response.type == storage_pool_2.type
vm_1_volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine_1.id,
type='ROOT',
listall=True
)
vol = vm_1_volumes[0]
# Check migration options for volume
pools_response = StoragePool.listForMigration(
self.apiclient,
id=vol.id
)
pools_suitable = filter(lambda p : p.suitableformigration, pools_response)
self.assertEquals(1, len(pools_suitable), "Check that there is only one item on the list")
self.assertEquals(pools_suitable[0].id, storage_pool_2.id, "Check that PS-2 is the migration option for volume")
# Update PS-2 Storage Tags
StoragePool.update(
self.apiclient,
id=storage_pool_2.id,
tags=self.services["storage_tags"]["b"]
)
# Check migration options for volume after updating PS-2 Storage Tags
pools_response = StoragePool.listForMigration(
self.apiclient,
id=vol.id
)
pools_suitable = filter(lambda p : p.suitableformigration, pools_response)
self.assertEquals(0, len(pools_suitable), "Check that there is no migration option for volume")
return