storage: Datera storage plugin (#3470)

Features:

Zone-wide and cluster-wide primary storage support
VM template caching automatically on Datera, the subsequent VMs can be created instantaneously by fast cloning the root volume.
Rapid storage-native snapshot
Multiple managed primary storages can be created with a single Datera cluster to provide better management of
Total provisioned capacity
Default storage QoS values
Replica size ( 1 to 5 )
IP pool assignment for iSCSI target
Volume Placement ( hybrid, single_flash, all_flash )
Volume snapshot to VM template
Volume to VM template
Volume size increase using service policy
Volume QoS change using service policy
Enabled KVM support
New Datera app_instance name format to include ACS volume name
VM live migration
This commit is contained in:
manojkverma 2019-07-25 01:43:04 -07:00 committed by Rohit Yadav
parent e894658f8c
commit e3d70b7dcc
12 changed files with 5420 additions and 0 deletions

View File

@ -83,6 +83,11 @@
<artifactId>cloud-plugin-storage-volume-cloudbyte</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-datera</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-server</artifactId>

View File

@ -112,6 +112,7 @@
<module>storage/image/sample</module>
<module>storage/image/swift</module>
<module>storage/volume/cloudbyte</module>
<module>storage/volume/datera</module>
<module>storage/volume/default</module>
<module>storage/volume/nexenta</module>
<module>storage/volume/sample</module>

View File

@ -0,0 +1,66 @@
<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor
license agreements. See the NOTICE file distributed with this work for additional
information regarding copyright ownership. The ASF licenses this file to
you under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License. -->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-plugin-storage-volume-datera</artifactId>
<name>Apache CloudStack Plugin - Storage Volume Datera Provider</name>
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack-plugins</artifactId>
<version>4.13.0.0-SNAPSHOT</version>
<relativePath>../../../pom.xml</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-default</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-storage-volume</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
</dependency>
<dependency>
<groupId>org.aspectj</groupId>
<artifactId>aspectjtools</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.google.http-client</groupId>
<artifactId>google-http-client</artifactId>
<version>1.17.0-rc</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<skipTests>true</skipTests>
</configuration>
<executions>
<execution>
<phase>integration-test</phase>
<goals>
<goal>test</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,420 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.datastore.lifecycle;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.capacity.CapacityManager;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ResourceManager;
import com.cloud.storage.SnapshotVO;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolAutomation;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotDetailsDao;
import com.cloud.storage.dao.SnapshotDetailsVO;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.util.DateraUtil;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.log4j.Logger;
import javax.inject.Inject;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class DateraPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
private static final Logger s_logger = Logger.getLogger(DateraPrimaryDataStoreLifeCycle.class);
@Inject
private CapacityManager _capacityMgr;
@Inject
private DataCenterDao zoneDao;
@Inject
private ClusterDao _clusterDao;
@Inject
private ClusterDetailsDao _clusterDetailsDao;
@Inject
private PrimaryDataStoreDao storagePoolDao;
@Inject
private HostDao _hostDao;
@Inject
private PrimaryDataStoreHelper dataStoreHelper;
@Inject
private ResourceManager _resourceMgr;
@Inject
private SnapshotDao _snapshotDao;
@Inject
private SnapshotDetailsDao _snapshotDetailsDao;
@Inject
private StorageManager _storageMgr;
@Inject
private StoragePoolHostDao _storagePoolHostDao;
@Inject
private StoragePoolAutomation storagePoolAutomation;
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
String url = (String) dsInfos.get("url");
Long zoneId = (Long) dsInfos.get("zoneId");
Long podId = (Long) dsInfos.get("podId");
Long clusterId = (Long) dsInfos.get("clusterId");
String storagePoolName = (String) dsInfos.get("name");
String providerName = (String) dsInfos.get("providerName");
Long capacityBytes = (Long) dsInfos.get("capacityBytes");
Long capacityIops = (Long) dsInfos.get("capacityIops");
String tags = (String) dsInfos.get("tags");
@SuppressWarnings("unchecked")
Map<String, String> details = (Map<String, String>) dsInfos.get("details");
String domainName = details.get("domainname");
String storageVip = DateraUtil.getStorageVip(url);
int storagePort = DateraUtil.getStoragePort(url);
int numReplicas = DateraUtil.getNumReplicas(url);
String volPlacement = DateraUtil.getVolPlacement(url);
String clusterAdminUsername = DateraUtil.getValue(DateraUtil.CLUSTER_ADMIN_USERNAME, url);
String clusterAdminPassword = DateraUtil.getValue(DateraUtil.CLUSTER_ADMIN_PASSWORD, url);
String uuid;
String randomString;
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
// checks if primary datastore is clusterwide. If so, uses the clusterId to set
// the uuid and then sets the podId and clusterId parameters
if (clusterId != null) {
if (podId == null) {
throw new CloudRuntimeException("The Pod ID must be specified.");
}
if (zoneId == null) {
throw new CloudRuntimeException("The Zone ID must be specified.");
}
ClusterVO cluster = _clusterDao.findById(clusterId);
String clusterUuid = cluster.getUuid();
randomString = DateraUtil.generateUUID(clusterUuid);
// uuid = DateraUtil.PROVIDER_NAME + "_" + cluster.getUuid() + "_" + storageVip
// + "_" + clusterAdminUsername + "_" + numReplicas + "_" + volPlacement;
uuid = DateraUtil.PROVIDER_NAME + "_" + clusterUuid + "_" + randomString;
s_logger.debug("Datera - Setting Datera cluster-wide primary storage uuid to " + uuid);
parameters.setPodId(podId);
parameters.setClusterId(clusterId);
HypervisorType hypervisorType = getHypervisorTypeForCluster(clusterId);
if (!isSupportedHypervisorType(hypervisorType)) {
throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
}
}
// sets the uuid with zoneid in it
else {
DataCenterVO zone = zoneDao.findById(zoneId);
String zoneUuid = zone.getUuid();
randomString = DateraUtil.generateUUID(zoneUuid);
// uuid = DateraUtil.PROVIDER_NAME + "_" + zone.getUuid() + "_" + storageVip +
// "_" + clusterAdminUsername + "_" + numReplicas + "_" + volPlacement;
uuid = DateraUtil.PROVIDER_NAME + "_" + zoneUuid + "_" + randomString;
s_logger.debug("Datera - Setting Datera zone-wide primary storage uuid to " + uuid);
}
if (capacityBytes == null || capacityBytes <= 0) {
throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0.");
}
if (capacityIops == null || capacityIops <= 0) {
throw new IllegalArgumentException("'capacityIops' must be present and greater than 0.");
}
if (domainName == null) {
domainName = "ROOT";
s_logger.debug("setting the domain to ROOT");
}
s_logger.debug("Datera - domainName: " + domainName);
parameters.setHost(storageVip);
parameters.setPort(storagePort);
parameters.setPath(DateraUtil.getModifiedUrl(url));
parameters.setType(StoragePoolType.Iscsi);
parameters.setUuid(uuid);
parameters.setZoneId(zoneId);
parameters.setName(storagePoolName);
parameters.setProviderName(providerName);
parameters.setManaged(true);
parameters.setCapacityBytes(capacityBytes);
parameters.setUsedBytes(0);
parameters.setCapacityIops(capacityIops);
parameters.setHypervisorType(HypervisorType.Any);
parameters.setTags(tags);
parameters.setDetails(details);
String managementVip = DateraUtil.getManagementVip(url);
int managementPort = DateraUtil.getManagementPort(url);
details.put(DateraUtil.MANAGEMENT_VIP, managementVip);
details.put(DateraUtil.MANAGEMENT_PORT, String.valueOf(managementPort));
details.put(DateraUtil.CLUSTER_ADMIN_USERNAME, clusterAdminUsername);
details.put(DateraUtil.CLUSTER_ADMIN_PASSWORD, clusterAdminPassword);
long lClusterDefaultMinIops = 100;
long lClusterDefaultMaxIops = 15000;
try {
String clusterDefaultMinIops = DateraUtil.getValue(DateraUtil.CLUSTER_DEFAULT_MIN_IOPS, url);
if (clusterDefaultMinIops != null && clusterDefaultMinIops.trim().length() > 0) {
lClusterDefaultMinIops = Long.parseLong(clusterDefaultMinIops);
}
} catch (NumberFormatException ex) {
s_logger.warn("Cannot parse the setting of " + DateraUtil.CLUSTER_DEFAULT_MIN_IOPS
+ ", using default value: " + lClusterDefaultMinIops + ". Exception: " + ex);
}
try {
String clusterDefaultMaxIops = DateraUtil.getValue(DateraUtil.CLUSTER_DEFAULT_MAX_IOPS, url);
if (clusterDefaultMaxIops != null && clusterDefaultMaxIops.trim().length() > 0) {
lClusterDefaultMaxIops = Long.parseLong(clusterDefaultMaxIops);
}
} catch (NumberFormatException ex) {
s_logger.warn("Cannot parse the setting of " + DateraUtil.CLUSTER_DEFAULT_MAX_IOPS
+ ", using default value: " + lClusterDefaultMaxIops + ". Exception: " + ex);
}
if (lClusterDefaultMinIops > lClusterDefaultMaxIops) {
throw new CloudRuntimeException("The parameter '" + DateraUtil.CLUSTER_DEFAULT_MIN_IOPS
+ "' must be less than or equal to the parameter '" + DateraUtil.CLUSTER_DEFAULT_MAX_IOPS + "'.");
}
if (numReplicas < DateraUtil.MIN_NUM_REPLICAS || numReplicas > DateraUtil.MAX_NUM_REPLICAS) {
throw new CloudRuntimeException("The parameter '" + DateraUtil.NUM_REPLICAS + "' must be between "
+ DateraUtil.CLUSTER_DEFAULT_MAX_IOPS + "' and " + DateraUtil.MAX_NUM_REPLICAS);
}
details.put(DateraUtil.CLUSTER_DEFAULT_MIN_IOPS, String.valueOf(lClusterDefaultMinIops));
details.put(DateraUtil.CLUSTER_DEFAULT_MAX_IOPS, String.valueOf(lClusterDefaultMaxIops));
details.put(DateraUtil.NUM_REPLICAS, String.valueOf(DateraUtil.getNumReplicas(url)));
details.put(DateraUtil.VOL_PLACEMENT, String.valueOf(DateraUtil.getVolPlacement(url)));
details.put(DateraUtil.IP_POOL, String.valueOf(DateraUtil.getIpPool(url)));
return dataStoreHelper.createPrimaryDataStore(parameters);
}
@Override
public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
return true; // should be ignored for zone-wide-only plug-ins like
}
@Override
public boolean attachCluster(DataStore datastore, ClusterScope scope) {
PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) datastore;
// check if there is at least one host up in this cluster
List<HostVO> allHosts = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing,
primaryDataStoreInfo.getClusterId(), primaryDataStoreInfo.getPodId(),
primaryDataStoreInfo.getDataCenterId());
if (allHosts.isEmpty()) {
storagePoolDao.expunge(primaryDataStoreInfo.getId());
throw new CloudRuntimeException(
"No host up to associate a storage pool with in cluster " + primaryDataStoreInfo.getClusterId());
}
List<HostVO> poolHosts = new ArrayList<HostVO>();
for (HostVO host : allHosts) {
try {
_storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId());
poolHosts.add(host);
} catch (Exception e) {
s_logger.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e);
}
}
if (poolHosts.isEmpty()) {
s_logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '"
+ primaryDataStoreInfo.getClusterId() + "'.");
storagePoolDao.expunge(primaryDataStoreInfo.getId());
throw new CloudRuntimeException("Failed to access storage pool");
}
dataStoreHelper.attachCluster(datastore);
return true;
// throw new UnsupportedOperationException("Only Zone-wide scope is supported
// with the Datera Storage driver");
}
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
dataStoreHelper.attachZone(dataStore);
List<HostVO> xenServerHosts = _resourceMgr
.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.XenServer, scope.getScopeId());
List<HostVO> vmWareServerHosts = _resourceMgr
.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.VMware, scope.getScopeId());
List<HostVO> kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM,
scope.getScopeId());
List<HostVO> hosts = new ArrayList<HostVO>();
hosts.addAll(xenServerHosts);
hosts.addAll(vmWareServerHosts);
hosts.addAll(kvmHosts);
for (HostVO host : hosts) {
try {
_storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId());
} catch (Exception e) {
s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
}
}
return true;
}
@Override
public boolean maintain(DataStore dataStore) {
storagePoolAutomation.maintain(dataStore);
dataStoreHelper.maintain(dataStore);
return true;
}
@Override
public boolean cancelMaintain(DataStore store) {
dataStoreHelper.cancelMaintain(store);
storagePoolAutomation.cancelMaintain(store);
return true;
}
@Override
public boolean deleteDataStore(DataStore store) {
List<SnapshotVO> lstSnapshots = _snapshotDao.listAll();
if (lstSnapshots != null) {
for (SnapshotVO snapshot : lstSnapshots) {
SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshot.getId(),
DateraUtil.STORAGE_POOL_ID);
// if this snapshot belongs to the storagePool that was passed in
if (snapshotDetails != null && snapshotDetails.getValue() != null
&& Long.parseLong(snapshotDetails.getValue()) == store.getId()) {
throw new CloudRuntimeException(
"This primary storage cannot be deleted because it currently contains one or more snapshots.");
}
}
}
return dataStoreHelper.deletePrimaryDataStore(store);
}
@Override
public boolean migrateToObjectStore(DataStore store) {
return false;
}
@Override
public void updateStoragePool(StoragePool storagePool, Map<String, String> details) {
StoragePoolVO storagePoolVo = storagePoolDao.findById(storagePool.getId());
String strCapacityBytes = details.get(PrimaryDataStoreLifeCycle.CAPACITY_BYTES);
Long capacityBytes = strCapacityBytes != null ? Long.parseLong(strCapacityBytes) : null;
if (capacityBytes != null) {
long usedBytes = _capacityMgr.getUsedBytes(storagePoolVo);
if (capacityBytes < usedBytes) {
throw new CloudRuntimeException(
"Cannot reduce the number of bytes for this storage pool as it would lead to an insufficient number of bytes");
}
}
String strCapacityIops = details.get(PrimaryDataStoreLifeCycle.CAPACITY_IOPS);
Long capacityIops = strCapacityIops != null ? Long.parseLong(strCapacityIops) : null;
if (capacityIops != null) {
long usedIops = _capacityMgr.getUsedIops(storagePoolVo);
if (capacityIops < usedIops) {
throw new CloudRuntimeException(
"Cannot reduce the number of IOPS for this storage pool as it would lead to an insufficient number of IOPS");
}
}
}
@Override
public void enableStoragePool(DataStore dataStore) {
dataStoreHelper.enable(dataStore);
}
@Override
public void disableStoragePool(DataStore dataStore) {
dataStoreHelper.disable(dataStore);
}
private HypervisorType getHypervisorTypeForCluster(long clusterId) {
ClusterVO cluster = _clusterDao.findById(clusterId);
if (cluster == null) {
throw new CloudRuntimeException("Cluster ID '" + clusterId + "' was not found in the database.");
}
return cluster.getHypervisorType();
}
private static boolean isSupportedHypervisorType(HypervisorType hypervisorType) {
return HypervisorType.XenServer.equals(hypervisorType) || HypervisorType.VMware.equals(hypervisorType)
|| HypervisorType.KVM.equals(hypervisorType);
}
private HypervisorType getHypervisorType(long hostId) {
HostVO host = _hostDao.findById(hostId);
if (host != null) {
return host.getHypervisorType();
}
return HypervisorType.None;
}
}

View File

@ -0,0 +1,342 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.datastore.provider;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.ModifyStoragePoolAnswer;
import com.cloud.agent.api.ModifyStoragePoolCommand;
import com.cloud.agent.api.ModifyTargetsCommand;
import com.cloud.alert.AlertManager;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterDetailsVO;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.db.GlobalLock;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.dao.VMInstanceDao;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.util.DateraObject;
import org.apache.cloudstack.storage.datastore.util.DateraUtil;
import org.apache.log4j.Logger;
import javax.inject.Inject;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class DateraHostListener implements HypervisorHostListener {
private static final Logger s_logger = Logger.getLogger(DateraHostListener.class);
@Inject private AgentManager _agentMgr;
@Inject private AlertManager _alertMgr;
@Inject private ClusterDao _clusterDao;
@Inject private ClusterDetailsDao _clusterDetailsDao;
@Inject private DataStoreManager _dataStoreMgr;
@Inject private HostDao _hostDao;
@Inject private PrimaryDataStoreDao _storagePoolDao;
@Inject private StoragePoolDetailsDao _storagePoolDetailsDao;
@Inject private StoragePoolHostDao storagePoolHostDao;
@Inject private VMInstanceDao _vmDao;
@Inject private VolumeDao _volumeDao;
@Override
public boolean hostAdded(long hostId) {
return true;
}
@Override
public boolean hostConnect(long hostId, long storagePoolId) {
HostVO host = _hostDao.findById(hostId);
if (host == null) {
s_logger.error("Failed to add host by HostListener as host was not found with id : " + hostId);
return false;
}
StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId);
if (storagePoolHost == null) {
storagePoolHost = new StoragePoolHostVO(storagePoolId, hostId, "");
storagePoolHostDao.persist(storagePoolHost);
}
if (host.getHypervisorType().equals(HypervisorType.XenServer)) {
handleXenServer(host.getClusterId(), host.getId(), storagePoolId);
}
else if (host.getHypervisorType().equals(HypervisorType.KVM)) {
//handleKVM(host.getClusterId(), host.getId(), storagePoolId);
handleKVM(hostId, storagePoolId);
}
return true;
}
@Override
public boolean hostDisconnected(long hostId, long storagePoolId) {
StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId);
if (storagePoolHost != null) {
storagePoolHostDao.deleteStoragePoolHostDetails(hostId, storagePoolId);
}
return true;
}
@Override
public boolean hostAboutToBeRemoved(long hostId) {
HostVO host = _hostDao.findById(hostId);
handleVMware(host, false);
return true;
}
@Override
public boolean hostRemoved(long hostId, long clusterId) {
ClusterVO clusterVO = _clusterDao.findById(clusterId);
HostVO hostVO = _hostDao.findByIdIncludingRemoved(hostId);
String initiatorName = DateraUtil.INITIATOR_PREFIX + "-" + hostVO.getUuid();
int s_lockTimeInSeconds = 5;
GlobalLock lock = GlobalLock.getInternLock(clusterVO.getUuid());
if (!lock.lock(s_lockTimeInSeconds)) {
String errMsg = "Couldn't lock the DB on the following string: " + clusterVO.getUuid();
s_logger.debug(errMsg);
throw new CloudRuntimeException(errMsg);
}
try {
List<StoragePoolVO> storagePools = _storagePoolDao.findPoolsByProvider(DateraUtil.PROVIDER_NAME);
if (storagePools != null && storagePools.size() > 0) {
for (StoragePoolVO storagePool : storagePools) {
ClusterDetailsVO clusterDetail = _clusterDetailsDao.findDetail(clusterId, DateraUtil.getInitiatorGroupKey(storagePool.getId()));
String initiatorGroupName = clusterDetail != null ? clusterDetail.getValue() : null;
if (initiatorGroupName != null && DateraUtil.hostSupport_iScsi(hostVO) ) {
DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePool.getId(), _storagePoolDetailsDao);
DateraObject.Initiator initiator = DateraUtil.getInitiator(conn, hostVO.getStorageUrl());
DateraObject.InitiatorGroup initiatorGroup = DateraUtil.getInitiatorGroup(conn, initiatorGroupName);
if (initiator!= null && DateraUtil.isInitiatorPresentInGroup(initiator, initiatorGroup)) {
DateraUtil.removeInitiatorFromGroup(conn, initiator.getPath(), initiatorGroupName);
}
}
}
}
} catch (DateraObject.DateraError | UnsupportedEncodingException e) {
s_logger.warn("Error while removing host from initiator groups ", e);
} finally {
lock.unlock();
lock.releaseRef();
}
return true;
}
private void handleXenServer(long clusterId, long hostId, long storagePoolId) {
List<String> storagePaths = getStoragePaths(clusterId, storagePoolId);
StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
for (String storagePath : storagePaths) {
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool);
cmd.setStoragePath(storagePath);
sendModifyStoragePoolCommand(cmd, storagePool, hostId);
}
}
private void handleVMware(HostVO host, boolean add) {
if (HypervisorType.VMware.equals(host.getHypervisorType())) {
List<StoragePoolVO> storagePools = _storagePoolDao.findPoolsByProvider(DateraUtil.PROVIDER_NAME);
if (storagePools != null && storagePools.size() > 0) {
List<Map<String, String>> targets = new ArrayList<>();
for (StoragePoolVO storagePool : storagePools) {
List<Map<String, String>> targetsForClusterAndStoragePool = getTargets(host.getClusterId(), storagePool.getId());
targets.addAll(targetsForClusterAndStoragePool);
}
ModifyTargetsCommand cmd = new ModifyTargetsCommand();
cmd.setAdd(add);
cmd.setTargets(targets);
sendModifyTargetsCommand(cmd, host.getId());
}
}
}
private void handleKVM(long clusterId, long hostId, long storagePoolId) {
List<String> storagePaths = getStoragePaths(clusterId, storagePoolId);
StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
for (String storagePath : storagePaths) {
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool);
cmd.setStoragePath(storagePath);
sendModifyStoragePoolCommand(cmd, storagePool, hostId);
}
}
private void handleKVM(long hostId, long storagePoolId) {
StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool);
sendModifyStoragePoolCommand(cmd, storagePool, hostId);
}
private List<String> getStoragePaths(long clusterId, long storagePoolId) {
List<String> storagePaths = new ArrayList<>();
// If you do not pass in null for the second parameter, you only get back applicable ROOT disks.
List<VolumeVO> volumes = _volumeDao.findByPoolId(storagePoolId, null);
if (volumes != null) {
for (VolumeVO volume : volumes) {
Long instanceId = volume.getInstanceId();
if (instanceId != null) {
VMInstanceVO vmInstance = _vmDao.findById(instanceId);
Long hostIdForVm = vmInstance.getHostId() != null ? vmInstance.getHostId() : vmInstance.getLastHostId();
if (hostIdForVm != null ) {
HostVO hostForVm = _hostDao.findById(hostIdForVm);
if (hostForVm.getClusterId().equals(clusterId)) {
storagePaths.add(volume.get_iScsiName());
}
}
}
}
}
return storagePaths;
}
private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) {
Answer answer = _agentMgr.easySend(hostId, cmd);
if (answer == null) {
throw new CloudRuntimeException("Unable to get an answer to the modify targets command");
}
if (!answer.getResult()) {
String msg = "Unable to modify targets on the following host: " + hostId;
HostVO host = _hostDao.findById(hostId);
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), msg, msg);
throw new CloudRuntimeException(msg);
}
}
private void sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, long hostId) {
Answer answer = _agentMgr.easySend(hostId, cmd);
if (answer == null) {
throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command (" + storagePool.getId() + ")");
}
if (!answer.getResult()) {
String msg = "Unable to attach storage pool " + storagePool.getId() + " to host " + hostId;
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg);
throw new CloudRuntimeException("Unable to establish a connection from agent to storage pool " + storagePool.getId() + " due to " + answer.getDetails() +
" (" + storagePool.getId() + ")");
}
assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; Pool = " + storagePool.getId() + " Host = " + hostId;
s_logger.info("Connection established between storage pool " + storagePool + " and host + " + hostId);
}
private List<Map<String, String>> getTargets(long clusterId, long storagePoolId) {
List<Map<String, String>> targets = new ArrayList<>();
StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
// If you do not pass in null for the second parameter, you only get back applicable ROOT disks.
List<VolumeVO> volumes = _volumeDao.findByPoolId(storagePoolId, null);
if (volumes != null) {
for (VolumeVO volume : volumes) {
Long instanceId = volume.getInstanceId();
if (instanceId != null) {
VMInstanceVO vmInstance = _vmDao.findById(instanceId);
Long hostIdForVm = vmInstance.getHostId() != null ? vmInstance.getHostId() : vmInstance.getLastHostId();
if (hostIdForVm != null) {
HostVO hostForVm = _hostDao.findById(hostIdForVm);
if (hostForVm.getClusterId().equals(clusterId)) {
Map<String, String> details = new HashMap<>();
details.put(ModifyTargetsCommand.IQN, volume.get_iScsiName());
details.put(ModifyTargetsCommand.STORAGE_HOST, storagePool.getHostAddress());
details.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePool.getPort()));
targets.add(details);
}
}
}
}
}
return targets;
}
}

View File

@ -0,0 +1,83 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.datastore.provider;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.cloudstack.storage.datastore.driver.DateraPrimaryDataStoreDriver;
import org.apache.cloudstack.storage.datastore.lifecycle.DateraPrimaryDataStoreLifeCycle;
import org.apache.cloudstack.storage.datastore.util.DateraUtil;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
import com.cloud.utils.component.ComponentContext;
@Component
public class DateraPrimaryDataStoreProvider implements PrimaryDataStoreProvider {
private DataStoreLifeCycle lifecycle;
private PrimaryDataStoreDriver driver;
private HypervisorHostListener listener;
DateraPrimaryDataStoreProvider() {
}
@Override
public String getName() {
return DateraUtil.PROVIDER_NAME;
}
@Override
public DataStoreLifeCycle getDataStoreLifeCycle() {
return lifecycle;
}
@Override
public PrimaryDataStoreDriver getDataStoreDriver() {
return driver;
}
@Override
public HypervisorHostListener getHostListener() {
return listener;
}
@Override
public boolean configure(Map<String, Object> params) {
lifecycle = ComponentContext.inject(DateraPrimaryDataStoreLifeCycle.class);
driver = ComponentContext.inject(DateraPrimaryDataStoreDriver.class);
listener = ComponentContext.inject(DateraHostListener.class);
return true;
}
@Override
public Set<DataStoreProviderType> getTypes() {
Set<DataStoreProviderType> types = new HashSet<DataStoreProviderType>();
types.add(DataStoreProviderType.PRIMARY);
return types;
}
}

View File

@ -0,0 +1,469 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.util;
import com.cloud.utils.StringUtils;
import com.google.gson.annotations.SerializedName;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class DateraObject {
public static final String DEFAULT_CREATE_MODE = "cloudstack";
public static final String DEFAULT_STORAGE_NAME = "storage-1";
public static final String DEFAULT_VOLUME_NAME = "volume-1";
public static final String DEFAULT_ACL = "deny_all";
public static final String DEFAULT_STORAGE_FORCE_BOOLEAN = "true";
public enum AppState {
ONLINE, OFFLINE;
@Override
public String toString() {
return this.name().toLowerCase();
}
}
public enum DateraOperation {
ADD, REMOVE;
@Override
public String toString() {
return this.name().toLowerCase();
}
}
public enum DateraErrorTypes {
PermissionDeniedError, InvalidRouteError, AuthFailedError, ValidationFailedError, InvalidRequestError,
NotFoundError, NotConnectedError, InvalidSessionKeyError, DatabaseError, InternalError,ConflictError;
public boolean equals(DateraError err) {
return this.name().equals(err.getName());
}
}
public static class DateraConnection {
private int managementPort;
private String managementIp;
private String username;
private String password;
public DateraConnection(String managementIp, int managementPort, String username, String password) {
this.managementPort = managementPort;
this.managementIp = managementIp;
this.username = username;
this.password = password;
}
public int getManagementPort() {
return managementPort;
}
public String getManagementIp() {
return managementIp;
}
public String getUsername() {
return username;
}
public String getPassword() {
return password;
}
}
public static class DateraLogin {
private final String name;
private final String password;
public DateraLogin(String username, String password) {
this.name = username;
this.password = password;
}
}
public static class DateraLoginResponse {
private String key;
public String getKey() {
return key;
}
}
public class Access {
private String iqn;
private List<String> ips;
public Access(String iqn, List<String> ips) {
this.iqn = iqn;
this.ips = ips;
}
public String getIqn() {
return iqn;
}
}
public static class PerformancePolicy {
@SerializedName("total_iops_max")
private Integer totalIops;
public PerformancePolicy(int totalIops) {
this.totalIops = totalIops;
}
public Integer getTotalIops() {
return totalIops;
}
}
public static class Volume {
private String name;
private String path;
private Integer size;
@SerializedName("replica_count")
private Integer replicaCount;
@SerializedName("performance_policy")
private PerformancePolicy performancePolicy;
@SerializedName("placement_mode")
private String placementMode;
@SerializedName("op_state")
private String opState;
public Volume(int size, int totalIops, int replicaCount) {
this.name = DEFAULT_VOLUME_NAME;
this.size = size;
this.replicaCount = replicaCount;
this.performancePolicy = new PerformancePolicy(totalIops);
}
public Volume(int size, int totalIops, int replicaCount, String placementMode) {
this.name = DEFAULT_VOLUME_NAME;
this.size = size;
this.replicaCount = replicaCount;
this.performancePolicy = new PerformancePolicy(totalIops);
this.placementMode = placementMode;
}
public Volume(Integer newSize) {
this.size = newSize;
}
public Volume(String newPlacementMode) {
this.placementMode = newPlacementMode;
}
public PerformancePolicy getPerformancePolicy() {
return performancePolicy;
}
public int getSize() {
return size;
}
public String getPlacementMode() {
return placementMode;
}
public String getPath() {
return path;
}
public String getOpState() {
return opState;
}
}
public static class StorageInstance {
private final String name = DEFAULT_STORAGE_NAME;
private Map<String, Volume> volumes;
private Access access;
private String force;
@SerializedName("ip_pool")
private String ipPool;
public StorageInstance(int size, int totalIops, int replicaCount) {
Volume volume = new Volume(size, totalIops, replicaCount);
volumes = new HashMap<String, Volume>();
volumes.put(DEFAULT_VOLUME_NAME, volume);
}
public StorageInstance(int size, int totalIops, int replicaCount, String placementMode, String ipPool) {
Volume volume = new Volume(size, totalIops, replicaCount, placementMode);
volumes = new HashMap<String, Volume>();
volumes.put(DEFAULT_VOLUME_NAME, volume);
this.ipPool = new StringBuilder("/access_network_ip_pools/").append(ipPool).toString();
}
public StorageInstance(int size, int totalIops, int replicaCount, String placementMode, String ipPool, String force) {
Volume volume = new Volume(size, totalIops, replicaCount, placementMode);
volumes = new HashMap<String, Volume>();
volumes.put(DEFAULT_VOLUME_NAME, volume);
this.ipPool = new StringBuilder("/access_network_ip_pools/").append(ipPool).toString();
this.force = DEFAULT_STORAGE_FORCE_BOOLEAN;
}
public Access getAccess() {
return access;
}
public Volume getVolume() {
return volumes.get(DEFAULT_VOLUME_NAME);
}
public int getSize() {
return getVolume().getSize();
}
public String getForce() {
return this.force;
}
}
public static class AppInstance {
private String name;
@SerializedName("access_control_mode")
private String accessControlMode;
@SerializedName("create_mode")
private String createMode;
@SerializedName("storage_instances")
private Map<String, StorageInstance> storageInstances;
@SerializedName("clone_src")
private String cloneSrc;
@SerializedName("admin_state")
private String adminState;
private Boolean force;
public AppInstance(String name, int size, int totalIops, int replicaCount) {
this.name = name;
StorageInstance storageInstance = new StorageInstance(size, totalIops, replicaCount);
this.storageInstances = new HashMap<String, StorageInstance>();
this.storageInstances.put(DEFAULT_STORAGE_NAME, storageInstance);
this.accessControlMode = DEFAULT_ACL;
this.createMode = DEFAULT_CREATE_MODE;
}
public AppInstance(String name, int size, int totalIops, int replicaCount, String placementMode,
String ipPool) {
this.name = name;
StorageInstance storageInstance = new StorageInstance(size, totalIops, replicaCount, placementMode, ipPool);
this.storageInstances = new HashMap<String, StorageInstance>();
this.storageInstances.put(DEFAULT_STORAGE_NAME, storageInstance);
this.accessControlMode = DEFAULT_ACL;
this.createMode = DEFAULT_CREATE_MODE;
}
public AppInstance(AppState state) {
this.adminState = state.toString();
this.force = true;
}
public AppInstance(String name, String cloneSrc) {
this.name = name;
this.cloneSrc = cloneSrc;
}
public String getIqn() {
StorageInstance storageInstance = storageInstances.get(DEFAULT_STORAGE_NAME);
return storageInstance.getAccess().getIqn();
}
public int getTotalIops() {
StorageInstance storageInstance = storageInstances.get(DEFAULT_STORAGE_NAME);
return storageInstance.getVolume().getPerformancePolicy().getTotalIops();
}
public String getName() {
return name;
}
public int getSize() {
StorageInstance storageInstance = storageInstances.get(DEFAULT_STORAGE_NAME);
return storageInstance.getSize();
}
public String getVolumePath() {
StorageInstance storageInstance = storageInstances.get(DEFAULT_STORAGE_NAME);
return storageInstance.getVolume().getPath();
}
public String getVolumeOpState() {
StorageInstance storageInstance = storageInstances.get(DEFAULT_STORAGE_NAME);
return storageInstance.getVolume().getOpState();
}
}
public static class AccessNetworkIpPool {
@SerializedName("ip_pool")
private String ipPool;
public AccessNetworkIpPool(String ipPool) {
this.ipPool = new StringBuilder("/access_network_ip_pools/").append(ipPool).toString();
}
}
public static class Initiator {
private String id; // IQN
private String name;
private String path;
private String op;
public Initiator(String name, String id) {
this.id = id;
this.name = name;
}
public Initiator(String path, DateraOperation op) {
this.path = path;
this.op = op.toString();
}
public String getPath() {
return path;
}
}
public static class InitiatorGroup {
private String name;
private List<String> members;
private String path;
private String op;
public InitiatorGroup(String name, List<String> members) {
this.name = name;
this.members = members;
}
public InitiatorGroup(String path, DateraOperation op) {
this.path = path;
this.op = op.toString();
}
public String getPath() {
return path;
}
public String getName() {
return name;
}
public List<String> getMembers() {
return members;
}
}
public static class VolumeSnapshot {
private String uuid;
private String timestamp;
private String path;
@SerializedName("op_state")
private String opState;
VolumeSnapshot(String uuid) {
this.uuid = uuid;
}
public String getTimestamp() {
return timestamp;
}
public String getOpState() {
return opState;
}
public String getPath() {
return path;
}
}
public static class VolumeSnapshotRestore {
@SerializedName("restore_point")
private String restorePoint;
VolumeSnapshotRestore(String restorePoint) {
this.restorePoint = restorePoint;
}
}
public static class DateraError extends Exception {
private String name;
private int code;
private List<String> errors;
private String message;
public DateraError(String name, int code, List<String> errors, String message) {
this.name = name;
this.code = code;
this.errors = errors;
this.message = message;
}
public List<String> getErrors() {
return errors;
}
public boolean isError() {
return message != null && name.endsWith("Error");
}
public String getMessage() {
String errMesg = name + "\n";
if (message != null) {
errMesg += message + "\n";
}
if (errors != null) {
errMesg += StringUtils.join(errors, "\n");
}
return errMesg;
}
public String getName() {
return name;
}
}
}

View File

@ -0,0 +1,18 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
name=storage-volume-datera
parent=storage

View File

@ -0,0 +1,33 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:context="http://www.springframework.org/schema/context"
xmlns:aop="http://www.springframework.org/schema/aop"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans.xsd
http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd
http://www.springframework.org/schema/context
http://www.springframework.org/schema/context/spring-context.xsd"
>
<bean id="dateraDataStoreProvider"
class="org.apache.cloudstack.storage.datastore.provider.DateraPrimaryDataStoreProvider" />
</beans>

File diff suppressed because it is too large Load Diff