StoragePoolType as class (#8544)

* StoragePoolType as a class

* Fix agent side StoragePoolType enum to class

* Handle StoragePoolType for StoragePoolJoinVO

* Since StoragePoolType is a class, it cannot be converted by @Enumerated annotation.
Implemented conveter class and logic to utilize @Convert annotation.

* Fix UserVMJoinVO for StoragePoolType

* fixed missing imports

* Since StoragePoolType is a class, it cannot be converted by @Enumerated annotation.
Implemented conveter class and logic to utilize @Convert annotation.

* Fixed equals for the enum.

* removed not needed try/catch for prepareAttribute

* Added license to the file.

* Implemented "supportsPhysicalDiskCopy" for storage adaptor.

Co-authored-by: mprokopchuk <mprokopchuk@apple.com>

* Add javadoc to StoragePoolType class

* Add unit test for StoragePoolType comparisons

* StoragePoolType "==" and ".equals()" fix.

* Fix StoragePoolType for FiberChannelAdapter

* Fix for abstract storage adaptor set up issue

* review comments

* Pass StoragePoolType object for poolType dao attribute

---------

Co-authored-by: Marcus Sorensen <mls@apple.com>
Co-authored-by: mprokopchuk <mprokopchuk@apple.com>
Co-authored-by: mprokopchuk <mprokopchuk@gmail.com>
This commit is contained in:
Suresh Kumar Anaparti 2024-02-05 13:27:15 +05:30 committed by GitHub
parent d353fcc599
commit 8ea9fc911d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
34 changed files with 440 additions and 122 deletions

View File

@ -16,10 +16,14 @@
// under the License.
package com.cloud.storage;
import org.apache.commons.lang.NotImplementedException;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import org.apache.commons.lang.NotImplementedException;
import org.apache.commons.lang3.StringUtils;
public class Storage {
public static enum ImageFormat {
@ -135,37 +139,72 @@ public class Storage {
ISODISK /* Template corresponding to a iso (non root disk) present in an OVA */
}
public static enum StoragePoolType {
Filesystem(false, true, true), // local directory
NetworkFilesystem(true, true, true), // NFS
IscsiLUN(true, false, false), // shared LUN, with a clusterfs overlay
Iscsi(true, false, false), // for e.g., ZFS Comstar
ISO(false, false, false), // for iso image
LVM(false, false, false), // XenServer local LVM SR
CLVM(true, false, false),
RBD(true, true, false), // http://libvirt.org/storage.html#StorageBackendRBD
SharedMountPoint(true, false, true),
VMFS(true, true, false), // VMware VMFS storage
PreSetup(true, true, false), // for XenServer, Storage Pool is set up by customers.
EXT(false, true, false), // XenServer local EXT SR
OCFS2(true, false, false),
SMB(true, false, false),
Gluster(true, false, false),
PowerFlex(true, true, true), // Dell EMC PowerFlex/ScaleIO (formerly VxFlexOS)
ManagedNFS(true, false, false),
Linstor(true, true, false),
DatastoreCluster(true, true, false), // for VMware, to abstract pool of clusters
StorPool(true, true, true),
FiberChannel(true, true, false); // Fiber Channel Pool for KVM hypervisors is used to find the volume by WWN value (/dev/disk/by-id/wwn-<wwnvalue>)
/**
* StoragePoolTypes carry some details about the format and capabilities of a storage pool. While not necessarily a
* 1:1 with PrimaryDataStoreDriver (and for KVM agent, KVMStoragePool and StorageAdaptor) implementations, it is
* often used to decide which storage plugin or storage command to call, so it may be necessary for new storage
* plugins to add a StoragePoolType. This can be done by adding it below, or by creating a new public static final
* instance of StoragePoolType in the plugin itself, which registers it with the map.
*
* Note that if the StoragePoolType is for KVM and defined in plugin code rather than below, care must be taken to
* ensure this is available on the agent side as well. This is best done by defining the StoragePoolType in a common
* package available on both management server and agent plugin jars.
*/
public static class StoragePoolType {
private static final Map<String, StoragePoolType> map = new LinkedHashMap<>();
public static final StoragePoolType Filesystem = new StoragePoolType("Filesystem", false, true, true);
public static final StoragePoolType NetworkFilesystem = new StoragePoolType("NetworkFilesystem", true, true, true);
public static final StoragePoolType IscsiLUN = new StoragePoolType("IscsiLUN", true, false, false);
public static final StoragePoolType Iscsi = new StoragePoolType("Iscsi", true, false, false);
public static final StoragePoolType ISO = new StoragePoolType("ISO", false, false, false);
public static final StoragePoolType LVM = new StoragePoolType("LVM", false, false, false);
public static final StoragePoolType CLVM = new StoragePoolType("CLVM", true, false, false);
public static final StoragePoolType RBD = new StoragePoolType("RBD", true, true, false);
public static final StoragePoolType SharedMountPoint = new StoragePoolType("SharedMountPoint", true, false, true);
public static final StoragePoolType VMFS = new StoragePoolType("VMFS", true, true, false);
public static final StoragePoolType PreSetup = new StoragePoolType("PreSetup", true, true, false);
public static final StoragePoolType EXT = new StoragePoolType("EXT", false, true, false);
public static final StoragePoolType OCFS2 = new StoragePoolType("OCFS2", true, false, false);
public static final StoragePoolType SMB = new StoragePoolType("SMB", true, false, false);
public static final StoragePoolType Gluster = new StoragePoolType("Gluster", true, false, false);
public static final StoragePoolType PowerFlex = new StoragePoolType("PowerFlex", true, true, true);
public static final StoragePoolType ManagedNFS = new StoragePoolType("ManagedNFS", true, false, false);
public static final StoragePoolType Linstor = new StoragePoolType("Linstor", true, true, false);
public static final StoragePoolType DatastoreCluster = new StoragePoolType("DatastoreCluster", true, true, false);
public static final StoragePoolType StorPool = new StoragePoolType("StorPool", true,true,true);
public static final StoragePoolType FiberChannel = new StoragePoolType("FiberChannel", true,true,false);
private final String name;
private final boolean shared;
private final boolean overprovisioning;
private final boolean encryption;
StoragePoolType(boolean shared, boolean overprovisioning, boolean encryption) {
/**
* New StoragePoolType, set the name to check with it in Dao (Note: Do not register it into the map of pool types).
* @param name name of the StoragePoolType.
*/
public StoragePoolType(String name) {
this.name = name;
this.shared = false;
this.overprovisioning = false;
this.encryption = false;
}
/**
* Define a new StoragePoolType, and register it into the map of pool types known to the management server.
* @param name Simple unique name of the StoragePoolType.
* @param shared Storage pool is shared/accessible to multiple hypervisors
* @param overprovisioning Storage pool supports overprovisioning
* @param encryption Storage pool supports encrypted volumes
*/
public StoragePoolType(String name, boolean shared, boolean overprovisioning, boolean encryption) {
this.name = name;
this.shared = shared;
this.overprovisioning = overprovisioning;
this.encryption = encryption;
addStoragePoolType(this);
}
public boolean isShared() {
@ -177,6 +216,48 @@ public class Storage {
}
public boolean supportsEncryption() { return encryption; }
private static void addStoragePoolType(StoragePoolType storagePoolType) {
map.putIfAbsent(storagePoolType.name, storagePoolType);
}
public static StoragePoolType[] values() {
return map.values().toArray(StoragePoolType[]::new).clone();
}
public static StoragePoolType valueOf(String name) {
if (StringUtils.isBlank(name)) {
return null;
}
StoragePoolType storage = map.get(name);
if (storage == null) {
throw new IllegalArgumentException("StoragePoolType '" + name + "' not found");
}
return storage;
}
@Override
public String toString() {
return name;
}
public String name() {
return name;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
StoragePoolType that = (StoragePoolType) o;
return Objects.equals(name, that.name);
}
@Override
public int hashCode() {
return Objects.hash(name);
}
}
public static List<StoragePoolType> getNonSharedStoragePoolTypes() {

View File

@ -74,4 +74,13 @@ public class StorageTest {
Assert.assertTrue(StoragePoolType.DatastoreCluster.supportsOverProvisioning());
Assert.assertTrue(StoragePoolType.Linstor.supportsOverProvisioning());
}
@Test
public void equalityTest() {
StoragePoolType t1 = StoragePoolType.NetworkFilesystem;
StoragePoolType t2 = StoragePoolType.NetworkFilesystem;
Assert.assertTrue(t1 == StoragePoolType.NetworkFilesystem);
Assert.assertTrue(t1.equals(StoragePoolType.NetworkFilesystem));
Assert.assertFalse(t1.equals(StoragePoolType.EXT));
}
}

View File

@ -0,0 +1,53 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.agent.transport;
import com.cloud.storage.Storage.StoragePoolType;
import com.google.gson.JsonDeserializationContext;
import com.google.gson.JsonDeserializer;
import com.google.gson.JsonElement;
import com.google.gson.JsonNull;
import com.google.gson.JsonParseException;
import com.google.gson.JsonPrimitive;
import com.google.gson.JsonSerializationContext;
import com.google.gson.JsonSerializer;
import java.lang.reflect.Type;
/**
* {@link StoragePoolType} acts as extendable set of singleton objects and should return same result when used "=="
* or {@link Object#equals(Object)}.
* To support that, need to return existing object for a given name instead of creating new.
*/
public class StoragePoolTypeAdaptor implements JsonDeserializer<StoragePoolType>, JsonSerializer<StoragePoolType> {
@Override
public StoragePoolType deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) throws JsonParseException {
if (json instanceof JsonPrimitive && ((JsonPrimitive) json).isString()) {
return StoragePoolType.valueOf(json.getAsString());
}
return null;
}
@Override
public JsonElement serialize(StoragePoolType src, Type typeOfSrc, JsonSerializationContext context) {
String name = src.name();
if (name == null) {
return new JsonNull();
}
return new JsonPrimitive(name);
}
}

View File

@ -37,6 +37,8 @@ import com.cloud.agent.transport.InterfaceTypeAdaptor;
import com.cloud.agent.transport.LoggingExclusionStrategy;
import com.cloud.agent.transport.Request.NwGroupsCommandTypeAdaptor;
import com.cloud.agent.transport.Request.PortConfigListTypeAdaptor;
import com.cloud.agent.transport.StoragePoolTypeAdaptor;
import com.cloud.storage.Storage;
import com.cloud.utils.Pair;
public class GsonHelper {
@ -69,6 +71,7 @@ public class GsonHelper {
}.getType(), new PortConfigListTypeAdaptor());
builder.registerTypeAdapter(new TypeToken<Pair<Long, Long>>() {
}.getType(), new NwGroupsCommandTypeAdaptor());
builder.registerTypeAdapter(Storage.StoragePoolType.class, new StoragePoolTypeAdaptor());
Gson gson = builder.create();
dsAdaptor.initGson(gson);
dtAdaptor.initGson(gson);

View File

@ -255,7 +255,7 @@ public class RequestTest extends TestCase {
public void testGoodCommand() {
s_logger.info("Testing good Command");
String content = "[{\"com.cloud.agent.api.GetVolumeStatsCommand\":{\"volumeUuids\":[\"dcc860ac-4a20-498f-9cb3-bab4d57aa676\"],"
+ "\"poolType\":\"NetworkFilesystem\",\"poolUuid\":\"e007c270-2b1b-3ce9-ae92-a98b94eef7eb\",\"contextMap\":{},\"wait\":5}}]";
+ "\"poolType\":{\"name\":\"NetworkFilesystem\"},\"poolUuid\":\"e007c270-2b1b-3ce9-ae92-a98b94eef7eb\",\"contextMap\":{},\"wait\":5}}]";
Request sreq = new Request(Version.v2, 1L, 2L, 3L, 1L, (short)1, content);
sreq.setSequence(1);
Command cmds[] = sreq.getCommands();
@ -266,7 +266,7 @@ public class RequestTest extends TestCase {
public void testBadCommand() {
s_logger.info("Testing Bad Command");
String content = "[{\"com.cloud.agent.api.SomeJunkCommand\":{\"volumeUuids\":[\"dcc860ac-4a20-498f-9cb3-bab4d57aa676\"],"
+ "\"poolType\":\"NetworkFilesystem\",\"poolUuid\":\"e007c270-2b1b-3ce9-ae92-a98b94eef7eb\",\"contextMap\":{},\"wait\":5}}]";
+ "\"poolType\":{\"name\":\"NetworkFilesystem\"},\"poolUuid\":\"e007c270-2b1b-3ce9-ae92-a98b94eef7eb\",\"contextMap\":{},\"wait\":5}}]";
Request sreq = new Request(Version.v2, 1L, 2L, 3L, 1L, (short)1, content);
sreq.setSequence(1);
Command cmds[] = sreq.getCommands();

View File

@ -22,6 +22,7 @@ import java.util.Map;
import java.util.UUID;
import javax.persistence.Column;
import javax.persistence.Convert;
import javax.persistence.DiscriminatorColumn;
import javax.persistence.DiscriminatorType;
import javax.persistence.Entity;
@ -45,6 +46,7 @@ import com.cloud.host.Status;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ResourceState;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.util.StoragePoolTypeConverter;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.db.GenericDao;
import com.cloud.utils.db.StateMachine;
@ -126,6 +128,7 @@ public class EngineHostVO implements EngineHost, Identity {
private String resource;
@Column(name = "fs_type")
@Convert(converter = StoragePoolTypeConverter.class)
private StoragePoolType fsType;
@Column(name = "available")

View File

@ -23,6 +23,7 @@ import java.util.Map;
import java.util.UUID;
import javax.persistence.Column;
import javax.persistence.Convert;
import javax.persistence.DiscriminatorColumn;
import javax.persistence.DiscriminatorType;
import javax.persistence.Entity;
@ -44,6 +45,7 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.offering.ServiceOffering;
import com.cloud.resource.ResourceState;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.util.StoragePoolTypeConverter;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.db.GenericDao;
import java.util.Arrays;
@ -130,6 +132,7 @@ public class HostVO implements Host {
private String resource;
@Column(name = "fs_type")
@Convert(converter = StoragePoolTypeConverter.class)
private StoragePoolType fsType;
@Column(name = "available")

View File

@ -20,6 +20,7 @@ import java.util.Date;
import java.util.UUID;
import javax.persistence.Column;
import javax.persistence.Convert;
import javax.persistence.Entity;
import javax.persistence.EnumType;
import javax.persistence.Enumerated;
@ -32,6 +33,7 @@ import javax.persistence.Temporal;
import javax.persistence.TemporalType;
import javax.persistence.Transient;
import com.cloud.util.StoragePoolTypeConverter;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import com.cloud.storage.Storage.ProvisioningType;
@ -114,7 +116,7 @@ public class VolumeVO implements Volume {
Type volumeType = Volume.Type.UNKNOWN;
@Column(name = "pool_type")
@Enumerated(EnumType.STRING)
@Convert(converter = StoragePoolTypeConverter.class)
StoragePoolType poolType;
@Column(name = GenericDao.REMOVED_COLUMN)
@ -331,9 +333,7 @@ public class VolumeVO implements Volume {
this.poolType = poolType;
}
public StoragePoolType getPoolType() {
return poolType;
}
public StoragePoolType getPoolType() { return poolType; }
@Override
public long getDomainId() {

View File

@ -0,0 +1,40 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.util;
import com.cloud.storage.Storage.StoragePoolType;
import javax.persistence.AttributeConverter;
import javax.persistence.Converter;
/**
* Converts {@link StoragePoolType} to and from {@link String} using {@link StoragePoolType#name()}.
*
* @author mprokopchuk
*/
@Converter
public class StoragePoolTypeConverter implements AttributeConverter<StoragePoolType, String> {
@Override
public String convertToDatabaseColumn(StoragePoolType attribute) {
return attribute != null ? attribute.name() : null;
}
@Override
public StoragePoolType convertToEntityAttribute(String dbData) {
return dbData != null ? StoragePoolType.valueOf(dbData) : null;
}
}

View File

@ -21,6 +21,7 @@ import java.util.Map;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.utils.db.GenericDao;
@ -130,7 +131,7 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
Integer countAll();
List<StoragePoolVO> findPoolsByStorageType(String storageType);
List<StoragePoolVO> findPoolsByStorageType(Storage.StoragePoolType storageType);
List<StoragePoolVO> listStoragePoolsWithActiveVolumesByOfferingId(long offeringid);
}

View File

@ -31,6 +31,7 @@ import org.apache.commons.collections.CollectionUtils;
import com.cloud.host.Status;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.storage.StoragePoolTagVO;
@ -619,7 +620,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
}
@Override
public List<StoragePoolVO> findPoolsByStorageType(String storageType) {
public List<StoragePoolVO> findPoolsByStorageType(Storage.StoragePoolType storageType) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("poolType", storageType);
return listBy(sc);

View File

@ -21,11 +21,13 @@ import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.util.StoragePoolTypeConverter;
import com.cloud.utils.UriUtils;
import com.cloud.utils.db.Encrypt;
import com.cloud.utils.db.GenericDao;
import javax.persistence.Column;
import javax.persistence.Convert;
import javax.persistence.Entity;
import javax.persistence.EnumType;
import javax.persistence.Enumerated;
@ -57,7 +59,7 @@ public class StoragePoolVO implements StoragePool {
private String uuid = null;
@Column(name = "pool_type", updatable = false, nullable = false, length = 32)
@Enumerated(value = EnumType.STRING)
@Convert(converter = StoragePoolTypeConverter.class)
private StoragePoolType poolType;
@Column(name = GenericDao.CREATED_COLUMN)
@ -180,8 +182,8 @@ public class StoragePoolVO implements StoragePool {
return poolType;
}
public void setPoolType(StoragePoolType protocol) {
poolType = protocol;
public void setPoolType(StoragePoolType poolType) {
this.poolType = poolType;
}
@Override
@ -273,7 +275,7 @@ public class StoragePoolVO implements StoragePool {
@Override
public String getPath() {
String updatedPath = path;
if (poolType == StoragePoolType.SMB) {
if (poolType.equals(StoragePoolType.SMB)) {
updatedPath = UriUtils.getUpdateUri(updatedPath, false);
if (updatedPath.contains("password") && updatedPath.contains("?")) {
updatedPath = updatedPath.substring(0, updatedPath.indexOf('?'));

View File

@ -19,6 +19,7 @@
package org.apache.cloudstack.storage.volume;
import com.cloud.storage.Storage;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.snapshot.SnapshotManager;
@ -185,7 +186,9 @@ public class VolumeServiceTest extends TestCase{
public void validateDestroySourceVolumeAfterMigrationExpungeSourceVolumeAfterMigrationThrowExceptionReturnFalse() throws
ExecutionException, InterruptedException{
VolumeObject volumeObject = new VolumeObject();
volumeObject.configure(null, new VolumeVO() {});
VolumeVO vo = new VolumeVO() {};
vo.setPoolType(Storage.StoragePoolType.Filesystem);
volumeObject.configure(null, vo);
List<Exception> exceptions = new ArrayList<>(Arrays.asList(new InterruptedException(), new ExecutionException() {}));

View File

@ -42,13 +42,16 @@ import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.TimeZone;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import javax.naming.ConfigurationException;
import javax.persistence.AttributeConverter;
import javax.persistence.AttributeOverride;
import javax.persistence.Column;
import javax.persistence.Convert;
import javax.persistence.EmbeddedId;
import javax.persistence.EntityExistsException;
import javax.persistence.EnumType;
@ -123,6 +126,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
protected final static TimeZone s_gmtTimeZone = TimeZone.getTimeZone("GMT");
protected final static Map<Class<?>, GenericDao<?, ? extends Serializable>> s_daoMaps = new ConcurrentHashMap<Class<?>, GenericDao<?, ? extends Serializable>>(71);
private final ConversionSupport _conversionSupport;
protected Class<T> _entityBeanType;
protected String _table;
@ -287,6 +291,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
}
}
_conversionSupport = new ConversionSupport();
setRunLevel(ComponentLifecycle.RUN_LEVEL_SYSTEM);
}
@ -649,6 +654,9 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
}
} else if (type == byte[].class) {
field.set(entity, rs.getBytes(index));
} else if (field.getDeclaredAnnotation(Convert.class) != null) {
Object val = _conversionSupport.convertToEntityAttribute(field, rs.getObject(index));
field.set(entity, val);
} else {
field.set(entity, rs.getObject(index));
}
@ -1386,7 +1394,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
}
String stackTrace = ExceptionUtils.getStackTrace(new CloudRuntimeException(String.format("The query to count all the records of [%s] resulted in a value smaller than"
+ " the result set's size [count of records: %s, result set's size: %s]. Using the result set's size instead.", _entityBeanType,
+ " the result set's size [count of records: %s, result set's size: %s]. Using the result set's size instead.", _entityBeanType,
count, resultSetSize)));
s_logger.warn(stackTrace);
@ -1595,7 +1603,10 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
return;
}
}
if (attr.field.getType() == String.class) {
if(attr.field.getDeclaredAnnotation(Convert.class) != null) {
Object val = _conversionSupport.convertToDatabaseColumn(attr.field, value);
pstmt.setObject(j, val);
} else if (attr.field.getType() == String.class) {
final String str = (String)value;
if (str == null) {
pstmt.setString(j, null);
@ -2221,4 +2232,50 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
return sql;
}
/**
* Support conversion between DB and Entity values.
* Detects whether field is annotated with {@link Convert} annotation and use converter instance from the annotation.
*/
static class ConversionSupport {
/**
* Contains cache of {@link AttributeConverter} instances.
*/
private static final Map<Class<?>, AttributeConverter<?, ?>> s_converterCacheMap = new ConcurrentHashMap<>();
/**
* Checks whether field annotated with {@link Convert} annotation and tries to convert source value with converter.
*
* @param field Entity field
* @param value DB value
* @return converted value if field is annotated with {@link Convert} or original value otherwise
*/
private <T> T convertToEntityAttribute(Field field, Object value) {
return (T) getConverter(field).map(converter -> converter.convertToEntityAttribute(value)).orElse(value);
}
/**
* Checks whether field annotated with {@link Convert} annotation and tries to convert source value with converter.
*
* @param field Entity field
* @param value Entity value
* @return converted value if field is annotated with {@link Convert} or original value otherwise
*/
private <T> T convertToDatabaseColumn(Field field, Object value) {
return (T) getConverter(field).map(converter -> converter.convertToDatabaseColumn(value)).orElse(value);
}
private Optional<AttributeConverter<Object, Object>> getConverter(Field field) {
return Optional.of(field).map(f -> f.getAnnotation(Convert.class)).map(Convert::converter).filter(AttributeConverter.class::isAssignableFrom).map(converterType -> {
return (AttributeConverter<Object, Object>) s_converterCacheMap.computeIfAbsent(converterType, ct -> {
try {
return (AttributeConverter<?, ?>) ct.getDeclaredConstructor().newInstance();
} catch (ReflectiveOperationException e) {
throw new CloudRuntimeException("Unable to create converter for the class " + converterType, e);
}
});
});
}
}
}

View File

@ -19,12 +19,16 @@ package com.cloud.hypervisor.kvm.storage;
import com.cloud.storage.Storage;
import com.cloud.utils.exception.CloudRuntimeException;
@StorageAdaptorInfo(storagePoolType=Storage.StoragePoolType.FiberChannel)
public class FiberChannelAdapter extends MultipathSCSIAdapterBase {
public FiberChannelAdapter() {
LOGGER.info("Loaded FiberChannelAdapter for StorageLayer");
}
@Override
public Storage.StoragePoolType getStoragePoolType() {
return Storage.StoragePoolType.FiberChannel;
}
@Override
public KVMStoragePool getStoragePool(String uuid) {
KVMStoragePool pool = MapStorageUuidToStoragePool.get(uuid);

View File

@ -36,7 +36,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.OutputInterpreter;
import com.cloud.utils.script.Script;
@StorageAdaptorInfo(storagePoolType=StoragePoolType.Iscsi)
public class IscsiAdmStorageAdaptor implements StorageAdaptor {
private static final Logger s_logger = Logger.getLogger(IscsiAdmStorageAdaptor.class);
@ -51,6 +50,11 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
return storagePool;
}
@Override
public StoragePoolType getStoragePoolType() {
return StoragePoolType.Iscsi;
}
@Override
public KVMStoragePool getStoragePool(String uuid) {
return MapStorageUuidToStoragePool.get(uuid);

View File

@ -16,6 +16,8 @@
// under the License.
package com.cloud.hypervisor.kvm.storage;
import java.lang.reflect.Constructor;
import java.lang.reflect.Modifier;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
@ -98,27 +100,45 @@ public class KVMStoragePoolManager {
public KVMStoragePoolManager(StorageLayer storagelayer, KVMHAMonitor monitor) {
this._haMonitor = monitor;
this._storageMapper.put("libvirt", new LibvirtStorageAdaptor(storagelayer));
// add other storage adaptors here
// this._storageMapper.put("newadaptor", new NewStorageAdaptor(storagelayer));
this._storageMapper.put(StoragePoolType.ManagedNFS.toString(), new ManagedNfsStorageAdaptor(storagelayer));
this._storageMapper.put(StoragePoolType.PowerFlex.toString(), new ScaleIOStorageAdaptor(storagelayer));
// add other storage adaptors manually here
// add any adaptors that wish to register themselves via annotation
// add any adaptors that wish to register themselves via call to adaptor.getStoragePoolType()
Reflections reflections = new Reflections("com.cloud.hypervisor.kvm.storage");
Set<Class<? extends StorageAdaptor>> storageAdaptors = reflections.getSubTypesOf(StorageAdaptor.class);
for (Class<? extends StorageAdaptor> storageAdaptor : storageAdaptors) {
StorageAdaptorInfo info = storageAdaptor.getAnnotation(StorageAdaptorInfo.class);
if (info != null && info.storagePoolType() != null) {
if (this._storageMapper.containsKey(info.storagePoolType().toString())) {
s_logger.warn(String.format("Duplicate StorageAdaptor type %s, not loading %s", info.storagePoolType().toString(), storageAdaptor.getName()));
Set<Class<? extends StorageAdaptor>> storageAdaptorClasses = reflections.getSubTypesOf(StorageAdaptor.class);
for (Class<? extends StorageAdaptor> storageAdaptorClass : storageAdaptorClasses) {
s_logger.debug("Checking pool type for adaptor " + storageAdaptorClass.getName());
if (Modifier.isAbstract(storageAdaptorClass.getModifiers()) || storageAdaptorClass.isInterface()) {
s_logger.debug("Skipping registration of abstract class / interface " + storageAdaptorClass.getName());
continue;
}
if (storageAdaptorClass.isAssignableFrom(LibvirtStorageAdaptor.class)) {
s_logger.debug("Skipping re-registration of LibvirtStorageAdaptor");
continue;
}
try {
Constructor<?> storageLayerConstructor = Arrays.stream(storageAdaptorClass.getConstructors())
.filter(c -> c.getParameterCount() == 1)
.filter(c -> c.getParameterTypes()[0].isAssignableFrom(StorageLayer.class))
.findFirst().orElse(null);
StorageAdaptor adaptor;
if (storageLayerConstructor == null) {
adaptor = storageAdaptorClass.getDeclaredConstructor().newInstance();
} else {
try {
s_logger.info(String.format("adding storage adaptor for %s", storageAdaptor.getName()));
this._storageMapper.put(info.storagePoolType().toString(), storageAdaptor.getDeclaredConstructor().newInstance());
} catch (Exception ex) {
throw new CloudRuntimeException(ex.toString());
adaptor = (StorageAdaptor) storageLayerConstructor.newInstance(storagelayer);
}
StoragePoolType storagePoolType = adaptor.getStoragePoolType();
if (storagePoolType != null) {
if (this._storageMapper.containsKey(storagePoolType.toString())) {
s_logger.warn(String.format("Duplicate StorageAdaptor type %s, not loading %s", storagePoolType, storageAdaptorClass.getName()));
} else {
s_logger.info(String.format("Adding storage adaptor for %s", storageAdaptorClass.getName()));
this._storageMapper.put(storagePoolType.toString(), adaptor);
}
}
} catch (Exception ex) {
throw new CloudRuntimeException("Failed to set up storage adaptors", ex);
}
}
@ -127,6 +147,13 @@ public class KVMStoragePoolManager {
}
}
/**
* Returns true if physical disk copy functionality supported.
*/
public boolean supportsPhysicalDiskCopy(StoragePoolType type) {
return getStorageAdaptor(type).supportsPhysicalDiskCopy(type);
}
public boolean connectPhysicalDisk(StoragePoolType type, String poolUuid, String volPath, Map<String, String> details) {
StorageAdaptor adaptor = getStorageAdaptor(type);
KVMStoragePool pool = adaptor.getStoragePool(poolUuid);

View File

@ -403,7 +403,7 @@ public class KVMStorageProcessor implements StorageProcessor {
if (primaryPool.getType() == StoragePoolType.CLVM) {
templatePath = ((NfsTO)imageStore).getUrl() + File.separator + templatePath;
vol = templateToPrimaryDownload(templatePath, primaryPool, volume.getUuid(), volume.getSize(), cmd.getWaitInMillSeconds());
} if (primaryPool.getType() == StoragePoolType.PowerFlex) {
} if (storagePoolMgr.supportsPhysicalDiskCopy(primaryPool.getType())) {
Map<String, String> details = primaryStore.getDetails();
String path = details != null ? details.get("managedStoreTarget") : null;

View File

@ -459,6 +459,12 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
return parser.parseStorageVolumeXML(volDefXML);
}
@Override
public StoragePoolType getStoragePoolType() {
// This is mapped manually in KVMStoragePoolManager
return null;
}
@Override
public KVMStoragePool getStoragePool(String uuid) {
return this.getStoragePool(uuid, false);
@ -775,23 +781,22 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
s_logger.info("Attempting to create volume " + name + " (" + pool.getType().toString() + ") in pool "
+ pool.getUuid() + " with size " + toHumanReadableSize(size));
switch (pool.getType()) {
case RBD:
return createPhysicalDiskByLibVirt(name, pool, PhysicalDiskFormat.RAW, provisioningType, size);
case NetworkFilesystem:
case Filesystem:
switch (format) {
case QCOW2:
case RAW:
return createPhysicalDiskByQemuImg(name, pool, format, provisioningType, size, passphrase);
case DIR:
case TAR:
return createPhysicalDiskByLibVirt(name, pool, format, provisioningType, size);
default:
throw new CloudRuntimeException("Unexpected disk format is specified.");
}
default:
return createPhysicalDiskByLibVirt(name, pool, format, provisioningType, size);
StoragePoolType poolType = pool.getType();
if (poolType.equals(StoragePoolType.RBD)) {
return createPhysicalDiskByLibVirt(name, pool, PhysicalDiskFormat.RAW, provisioningType, size);
} else if (poolType.equals(StoragePoolType.NetworkFilesystem) || poolType.equals(StoragePoolType.Filesystem)) {
switch (format) {
case QCOW2:
case RAW:
return createPhysicalDiskByQemuImg(name, pool, format, provisioningType, size, passphrase);
case DIR:
case TAR:
return createPhysicalDiskByLibVirt(name, pool, format, provisioningType, size);
default:
throw new CloudRuntimeException("Unexpected disk format is specified.");
}
} else {
return createPhysicalDiskByLibVirt(name, pool, format, provisioningType, size);
}
}

View File

@ -65,6 +65,9 @@ public class ManagedNfsStorageAdaptor implements StorageAdaptor {
return storagePool;
}
@Override
public StoragePoolType getStoragePoolType() { return StoragePoolType.ManagedNFS; }
@Override
public KVMStoragePool getStoragePool(String uuid) {
return getStoragePool(uuid, false);

View File

@ -41,22 +41,19 @@ import org.apache.log4j.Logger;
import org.libvirt.LibvirtException;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageLayer;
import com.cloud.storage.StorageManager;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.OutputInterpreter;
import com.cloud.utils.script.Script;
import org.apache.commons.lang3.StringUtils;
@StorageAdaptorInfo(storagePoolType= Storage.StoragePoolType.PowerFlex)
public class ScaleIOStorageAdaptor implements StorageAdaptor {
private static final Logger LOGGER = Logger.getLogger(ScaleIOStorageAdaptor.class);
private static final Map<String, KVMStoragePool> MapStorageUuidToStoragePool = new HashMap<>();
private static final int DEFAULT_DISK_WAIT_TIME_IN_SECS = 60;
private StorageLayer storageLayer;
public ScaleIOStorageAdaptor(StorageLayer storagelayer) {
storageLayer = storagelayer;
public ScaleIOStorageAdaptor() {
}
@Override
@ -70,6 +67,11 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor {
return pool;
}
@Override
public Storage.StoragePoolType getStoragePoolType() {
return Storage.StoragePoolType.PowerFlex;
}
@Override
public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) {
return getStoragePool(uuid);

View File

@ -26,6 +26,8 @@ import com.cloud.storage.Storage.StoragePoolType;
public interface StorageAdaptor {
StoragePoolType getStoragePoolType();
public KVMStoragePool getStoragePool(String uuid);
// Get the storage pool from libvirt, but control if libvirt should refresh the pool (can take a long time)
@ -91,4 +93,11 @@ public interface StorageAdaptor {
* @param timeout
*/
KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout);
/**
* Returns true if storage adaptor supports physical disk copy functionality.
*/
default boolean supportsPhysicalDiskCopy(StoragePoolType type) {
return StoragePoolType.PowerFlex == type;
}
}

View File

@ -22,10 +22,7 @@ import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import com.cloud.storage.Storage.StoragePoolType;
@Retention(RetentionPolicy.RUNTIME)
@Target({ TYPE })
public @interface StorageAdaptorInfo {
StoragePoolType storagePoolType();
}

View File

@ -37,14 +37,12 @@ import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.MockedConstruction;
import org.mockito.MockedStatic;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageLayer;
import com.cloud.utils.script.Script;
@RunWith(MockitoJUnitRunner.class)
@ -54,9 +52,6 @@ public class ScaleIOStoragePoolTest {
StorageAdaptor adapter;
@Mock
StorageLayer storageLayer;
@Before
public void setUp() throws Exception {
final String uuid = "345fc603-2d7e-47d2-b719-a0110b3732e6";
@ -65,7 +60,7 @@ public class ScaleIOStoragePoolTest {
Map<String,String> details = new HashMap<String, String>();
details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId);
adapter = spy(new ScaleIOStorageAdaptor(storageLayer));
adapter = spy(new ScaleIOStorageAdaptor());
pool = new ScaleIOStoragePool(uuid, "192.168.1.19", 443, "a519be2f00000000", type, details, adapter);
}

View File

@ -17,14 +17,14 @@
package com.cloud.simulator;
import javax.persistence.Column;
import javax.persistence.Convert;
import javax.persistence.Entity;
import javax.persistence.EnumType;
import javax.persistence.Enumerated;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import com.cloud.util.StoragePoolTypeConverter;
import org.apache.cloudstack.api.InternalIdentity;
import com.cloud.storage.Storage.StoragePoolType;
@ -50,7 +50,7 @@ public class MockStoragePoolVO implements InternalIdentity {
private String hostGuid;
@Column(name = "pool_type")
@Enumerated(value = EnumType.STRING)
@Convert(converter = StoragePoolTypeConverter.class)
private StoragePoolType poolType;
public MockStoragePoolVO() {

View File

@ -52,7 +52,7 @@ public class MockStoragePoolDaoImpl extends GenericDaoBase<MockStoragePoolVO, Lo
public MockStoragePoolVO findByHost(String hostUuid) {
SearchCriteria<MockStoragePoolVO> sc = hostguidSearch.create();
sc.setParameters("hostguid", hostUuid);
sc.setParameters("type", StoragePoolType.Filesystem.toString());
sc.setParameters("type", StoragePoolType.Filesystem);
return findOneBy(sc);
}

View File

@ -263,7 +263,7 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl implements PrimaryDataStore
parameters.setPort(port);
parameters.setPath(hostPath);
} else {
StoragePoolType type = Enum.valueOf(StoragePoolType.class, scheme);
StoragePoolType type = StoragePoolType.valueOf(scheme);
if (type != null) {
parameters.setType(type);

View File

@ -54,7 +54,6 @@ import com.linbit.linstor.api.model.ResourceWithVolumes;
import com.linbit.linstor.api.model.StoragePool;
import com.linbit.linstor.api.model.VolumeDefinition;
@StorageAdaptorInfo(storagePoolType=Storage.StoragePoolType.Linstor)
public class LinstorStorageAdaptor implements StorageAdaptor {
private static final Logger s_logger = Logger.getLogger(LinstorStorageAdaptor.class);
private static final Map<String, KVMStoragePool> MapStorageUuidToStoragePool = new HashMap<>();
@ -66,6 +65,11 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
return new DevelopersApi(client);
}
@Override
public Storage.StoragePoolType getStoragePoolType() {
return Storage.StoragePoolType.Linstor;
}
private static String getLinstorRscName(String name) {
return LinstorUtil.RSC_PREFIX + name;
}

View File

@ -39,7 +39,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.OutputInterpreter;
import com.cloud.utils.script.Script;
@StorageAdaptorInfo(storagePoolType=StoragePoolType.StorPool)
public class StorPoolStorageAdaptor implements StorageAdaptor {
public static void SP_LOG(String fmt, Object... args) {
try (PrintWriter spLogFile = new PrintWriter(new BufferedWriter(new FileWriter("/var/log/cloudstack/agent/storpool-agent.log", true)))) {
@ -65,6 +64,11 @@ public class StorPoolStorageAdaptor implements StorageAdaptor {
return storagePool;
}
@Override
public StoragePoolType getStoragePoolType() {
return StoragePoolType.StorPool;
}
@Override
public KVMStoragePool getStoragePool(String uuid) {
SP_LOG("StorPoolStorageAdaptor.getStoragePool: uuid=%s", uuid);

View File

@ -16,6 +16,27 @@
// under the License.
package com.cloud.api.query.dao;
import java.util.ArrayList;
import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
import org.apache.cloudstack.api.response.StoragePoolResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.utils.jsinterpreter.TagAsRuleHelper;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.api.ApiDBUtils;
import com.cloud.api.query.vo.StoragePoolJoinVO;
import com.cloud.capacity.CapacityManager;
@ -33,25 +54,6 @@ import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
import org.apache.cloudstack.api.response.StoragePoolResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.utils.jsinterpreter.TagAsRuleHelper;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import javax.inject.Inject;
import java.util.ArrayList;
import java.util.List;
@Component
public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Long> implements StoragePoolJoinDao {
@ -337,7 +339,7 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
if (keyword != null) {
SearchCriteria<StoragePoolJoinVO> ssc = createSearchCriteria();
ssc.addOr("name", SearchCriteria.Op.LIKE, "%" + keyword + "%");
ssc.addOr("poolType", SearchCriteria.Op.LIKE, "%" + keyword + "%");
ssc.addOr("poolType", SearchCriteria.Op.LIKE, new Storage.StoragePoolType("%" + keyword + "%"));
sc.addAnd("name", SearchCriteria.Op.SC, ssc);
}

View File

@ -19,12 +19,14 @@ package com.cloud.api.query.vo;
import java.util.Date;
import javax.persistence.Column;
import javax.persistence.Convert;
import javax.persistence.Entity;
import javax.persistence.EnumType;
import javax.persistence.Enumerated;
import javax.persistence.Id;
import javax.persistence.Table;
import com.cloud.util.StoragePoolTypeConverter;
import org.apache.cloudstack.api.Identity;
import org.apache.cloudstack.api.InternalIdentity;
@ -64,7 +66,7 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I
private StoragePoolStatus status;
@Column(name = "pool_type")
@Enumerated(value = EnumType.STRING)
@Convert(converter = StoragePoolTypeConverter.class)
private StoragePoolType poolType;
@Column(name = GenericDao.CREATED_COLUMN)

View File

@ -22,6 +22,7 @@ import java.util.Map;
import javax.persistence.AttributeOverride;
import javax.persistence.Column;
import javax.persistence.Convert;
import javax.persistence.Entity;
import javax.persistence.EnumType;
import javax.persistence.Enumerated;
@ -38,6 +39,7 @@ import com.cloud.storage.Storage.TemplateType;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.Volume;
import com.cloud.user.Account;
import com.cloud.util.StoragePoolTypeConverter;
import com.cloud.utils.db.GenericDao;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.State;
@ -256,7 +258,7 @@ public class UserVmJoinVO extends BaseViewWithTagInformationVO implements Contro
private String poolUuid;
@Column(name = "pool_type", updatable = false, nullable = false, length = 32)
@Enumerated(value = EnumType.STRING)
@Convert(converter = StoragePoolTypeConverter.class)
private StoragePoolType poolType;
@Column(name = "volume_id")

View File

@ -2133,7 +2133,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
StoragePoolInfo childStoragePoolInfo = childDataStoreAnswer.getPoolInfo();
StoragePoolVO dataStoreVO = getExistingPoolByUuid(childStoragePoolInfo.getUuid());
if (dataStoreVO == null && childDataStoreAnswer.getPoolType().equalsIgnoreCase("NFS")) {
List<StoragePoolVO> nfsStoragePools = _storagePoolDao.findPoolsByStorageType(StoragePoolType.NetworkFilesystem.toString());
List<StoragePoolVO> nfsStoragePools = _storagePoolDao.findPoolsByStorageType(StoragePoolType.NetworkFilesystem);
for (StoragePoolVO storagePool : nfsStoragePools) {
String storagePoolUUID = storagePool.getUuid();
if (childStoragePoolInfo.getName().equalsIgnoreCase(storagePoolUUID.replaceAll("-", ""))) {
@ -2193,7 +2193,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
StoragePoolInfo childStoragePoolInfo = childDataStoreAnswer.getPoolInfo();
StoragePoolVO dataStoreVO = _storagePoolDao.findPoolByUUID(childStoragePoolInfo.getUuid());
if (dataStoreVO == null && childDataStoreAnswer.getPoolType().equalsIgnoreCase("NFS")) {
List<StoragePoolVO> nfsStoragePools = _storagePoolDao.findPoolsByStorageType(StoragePoolType.NetworkFilesystem.toString());
List<StoragePoolVO> nfsStoragePools = _storagePoolDao.findPoolsByStorageType(StoragePoolType.NetworkFilesystem);
for (StoragePoolVO storagePool : nfsStoragePools) {
String storagePoolUUID = storagePool.getUuid();
if (childStoragePoolInfo.getName().equalsIgnoreCase(storagePoolUUID.replaceAll("-", ""))) {
@ -2474,7 +2474,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@DB
public StoragePoolVO findLocalStorageOnHost(long hostId) {
SearchCriteria<StoragePoolVO> sc = LocalStorageSearch.create();
sc.setParameters("type", new Object[] {StoragePoolType.Filesystem, StoragePoolType.LVM});
sc.setParameters("type", StoragePoolType.Filesystem, StoragePoolType.LVM);
sc.setJoinParameters("poolHost", "hostId", hostId);
List<StoragePoolVO> storagePools = _storagePoolDao.search(sc, null);
if (!storagePools.isEmpty()) {

View File

@ -21,6 +21,7 @@ import com.cloud.exception.StorageUnavailableException;
import com.cloud.host.HostVO;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManagerImpl;
import com.cloud.storage.StoragePoolStatus;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
@ -53,6 +54,7 @@ public class StoragePoolMonitorTest {
pool.setScope(ScopeType.CLUSTER);
pool.setStatus(StoragePoolStatus.Up);
pool.setId(123L);
pool.setPoolType(Storage.StoragePoolType.Filesystem);
cmd = new StartupRoutingCommand();
cmd.setHypervisorType(Hypervisor.HypervisorType.KVM);
}