mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
KVM Host HA enhancement for StorPool storage (#8045)
Extending the current functionality of KVM Host HA for the StorPool storage plugin and the option for easy integration for the rest of the storage plugins to support Host HA This extension works like the current NFS storage implementation. It allows it to be used simultaneously with NFS and StorPool storage or only with StorPool primary storage. If it is used with different primary storages like NFS and StorPool, and one of the health checks fails for storage, there is an option to report the failure to the management with the global config kvm.ha.fence.on.storage.heartbeat.failure. By default this option is disabled when enabled the Host HA service will continue with the checks on the host and eventually will fence the host
This commit is contained in:
parent
0caf18bc1a
commit
2bb182c3e1
@ -24,6 +24,7 @@ public class HostTO {
|
|||||||
private NetworkTO publicNetwork;
|
private NetworkTO publicNetwork;
|
||||||
private NetworkTO storageNetwork1;
|
private NetworkTO storageNetwork1;
|
||||||
private NetworkTO storageNetwork2;
|
private NetworkTO storageNetwork2;
|
||||||
|
private String parent;
|
||||||
|
|
||||||
protected HostTO() {
|
protected HostTO() {
|
||||||
}
|
}
|
||||||
@ -40,6 +41,9 @@ public class HostTO {
|
|||||||
if (vo.getStorageIpAddressDeux() != null) {
|
if (vo.getStorageIpAddressDeux() != null) {
|
||||||
storageNetwork2 = new NetworkTO(vo.getStorageIpAddressDeux(), vo.getStorageNetmaskDeux(), vo.getStorageMacAddressDeux());
|
storageNetwork2 = new NetworkTO(vo.getStorageIpAddressDeux(), vo.getStorageNetmaskDeux(), vo.getStorageMacAddressDeux());
|
||||||
}
|
}
|
||||||
|
if (vo.getParent() != null) {
|
||||||
|
parent = vo.getParent();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getGuid() {
|
public String getGuid() {
|
||||||
@ -81,4 +85,12 @@ public class HostTO {
|
|||||||
public void setStorageNetwork2(NetworkTO storageNetwork2) {
|
public void setStorageNetwork2(NetworkTO storageNetwork2) {
|
||||||
this.storageNetwork2 = storageNetwork2;
|
this.storageNetwork2 = storageNetwork2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getParent() {
|
||||||
|
return parent;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setParent(String parent) {
|
||||||
|
this.parent = parent;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -24,6 +24,7 @@ import com.cloud.host.Host;
|
|||||||
|
|
||||||
public class CheckOnHostCommand extends Command {
|
public class CheckOnHostCommand extends Command {
|
||||||
HostTO host;
|
HostTO host;
|
||||||
|
boolean reportCheckFailureIfOneStorageIsDown;
|
||||||
|
|
||||||
protected CheckOnHostCommand() {
|
protected CheckOnHostCommand() {
|
||||||
}
|
}
|
||||||
@ -33,10 +34,20 @@ public class CheckOnHostCommand extends Command {
|
|||||||
setWait(20);
|
setWait(20);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public CheckOnHostCommand(Host host, boolean reportCheckFailureIfOneStorageIsDown) {
|
||||||
|
super();
|
||||||
|
this.host = new HostTO(host);
|
||||||
|
this.reportCheckFailureIfOneStorageIsDown = reportCheckFailureIfOneStorageIsDown;
|
||||||
|
}
|
||||||
|
|
||||||
public HostTO getHost() {
|
public HostTO getHost() {
|
||||||
return host;
|
return host;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public boolean isCheckFailedOnOneStorage() {
|
||||||
|
return reportCheckFailureIfOneStorageIsDown;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean executeInSequence() {
|
public boolean executeInSequence() {
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
package com.cloud.agent.api;
|
package com.cloud.agent.api;
|
||||||
|
|
||||||
|
import com.cloud.agent.api.to.HostTO;
|
||||||
import com.cloud.host.Host;
|
import com.cloud.host.Host;
|
||||||
import com.cloud.vm.VirtualMachine;
|
import com.cloud.vm.VirtualMachine;
|
||||||
|
|
||||||
@ -32,6 +33,8 @@ public class FenceCommand extends Command {
|
|||||||
String hostGuid;
|
String hostGuid;
|
||||||
String hostIp;
|
String hostIp;
|
||||||
boolean inSeq;
|
boolean inSeq;
|
||||||
|
HostTO host;
|
||||||
|
boolean reportCheckFailureIfOneStorageIsDown;
|
||||||
|
|
||||||
public FenceCommand(VirtualMachine vm, Host host) {
|
public FenceCommand(VirtualMachine vm, Host host) {
|
||||||
super();
|
super();
|
||||||
@ -39,6 +42,7 @@ public class FenceCommand extends Command {
|
|||||||
hostGuid = host.getGuid();
|
hostGuid = host.getGuid();
|
||||||
hostIp = host.getPrivateIpAddress();
|
hostIp = host.getPrivateIpAddress();
|
||||||
inSeq = false;
|
inSeq = false;
|
||||||
|
this.host = new HostTO(host);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setSeq(boolean inseq) {
|
public void setSeq(boolean inseq) {
|
||||||
@ -61,4 +65,16 @@ public class FenceCommand extends Command {
|
|||||||
public boolean executeInSequence() {
|
public boolean executeInSequence() {
|
||||||
return inSeq;
|
return inSeq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public HostTO getHost() {
|
||||||
|
return host;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean isReportCheckFailureIfOneStorageIsDown() {
|
||||||
|
return reportCheckFailureIfOneStorageIsDown;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setReportCheckFailureIfOneStorageIsDown(boolean reportCheckFailureIfOneStorageIsDown) {
|
||||||
|
this.reportCheckFailureIfOneStorageIsDown = reportCheckFailureIfOneStorageIsDown;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -23,6 +23,8 @@ import org.apache.cloudstack.storage.command.CommandResult;
|
|||||||
|
|
||||||
import com.cloud.host.Host;
|
import com.cloud.host.Host;
|
||||||
import com.cloud.storage.StoragePool;
|
import com.cloud.storage.StoragePool;
|
||||||
|
import com.cloud.storage.Volume;
|
||||||
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
|
|
||||||
public interface PrimaryDataStoreDriver extends DataStoreDriver {
|
public interface PrimaryDataStoreDriver extends DataStoreDriver {
|
||||||
@ -132,4 +134,8 @@ public interface PrimaryDataStoreDriver extends DataStoreDriver {
|
|||||||
* @param tagValue The value of the VM's tag
|
* @param tagValue The value of the VM's tag
|
||||||
*/
|
*/
|
||||||
void provideVmTags(long vmId, long volumeId, String tagValue);
|
void provideVmTags(long vmId, long volumeId, String tagValue);
|
||||||
|
|
||||||
|
boolean isStorageSupportHA(StoragePoolType type);
|
||||||
|
|
||||||
|
void detachVolumeFromAllStorageNodes(Volume volume);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -72,6 +72,9 @@ public interface HighAvailabilityManager extends Manager {
|
|||||||
+ " which are registered for the HA event that were successful and are now ready to be purged.",
|
+ " which are registered for the HA event that were successful and are now ready to be purged.",
|
||||||
true, Cluster);
|
true, Cluster);
|
||||||
|
|
||||||
|
public static final ConfigKey<Boolean> KvmHAFenceHostIfHeartbeatFailsOnStorage = new ConfigKey<>("Advanced", Boolean.class, "kvm.ha.fence.on.storage.heartbeat.failure", "false",
|
||||||
|
"Proceed fencing the host even the heartbeat failed for only one storage pool", false, ConfigKey.Scope.Zone);
|
||||||
|
|
||||||
public enum WorkType {
|
public enum WorkType {
|
||||||
Migration, // Migrating VMs off of a host.
|
Migration, // Migrating VMs off of a host.
|
||||||
Stop, // Stops a VM for storage pool migration purposes. This should be obsolete now.
|
Stop, // Stops a VM for storage pool migration purposes. This should be obsolete now.
|
||||||
|
|||||||
@ -27,8 +27,12 @@ import com.cloud.host.Status;
|
|||||||
import com.cloud.host.dao.HostDao;
|
import com.cloud.host.dao.HostDao;
|
||||||
import com.cloud.hypervisor.Hypervisor;
|
import com.cloud.hypervisor.Hypervisor;
|
||||||
import com.cloud.resource.ResourceManager;
|
import com.cloud.resource.ResourceManager;
|
||||||
import com.cloud.storage.Storage.StoragePoolType;
|
|
||||||
import com.cloud.utils.component.AdapterBase;
|
import com.cloud.utils.component.AdapterBase;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
||||||
import org.apache.cloudstack.ha.HAManager;
|
import org.apache.cloudstack.ha.HAManager;
|
||||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
@ -49,6 +53,8 @@ public class KVMInvestigator extends AdapterBase implements Investigator {
|
|||||||
private PrimaryDataStoreDao _storagePoolDao;
|
private PrimaryDataStoreDao _storagePoolDao;
|
||||||
@Inject
|
@Inject
|
||||||
private HAManager haManager;
|
private HAManager haManager;
|
||||||
|
@Inject
|
||||||
|
private DataStoreProviderManager dataStoreProviderMgr;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isVmAlive(com.cloud.vm.VirtualMachine vm, Host host) throws UnknownVM {
|
public boolean isVmAlive(com.cloud.vm.VirtualMachine vm, Host host) throws UnknownVM {
|
||||||
@ -78,23 +84,12 @@ public class KVMInvestigator extends AdapterBase implements Investigator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
List<StoragePoolVO> clusterPools = _storagePoolDao.listPoolsByCluster(agent.getClusterId());
|
List<StoragePoolVO> clusterPools = _storagePoolDao.listPoolsByCluster(agent.getClusterId());
|
||||||
boolean hasNfs = false;
|
boolean storageSupportHA = storageSupportHa(clusterPools);
|
||||||
for (StoragePoolVO pool : clusterPools) {
|
if (!storageSupportHA) {
|
||||||
if (pool.getPoolType() == StoragePoolType.NetworkFilesystem) {
|
|
||||||
hasNfs = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!hasNfs) {
|
|
||||||
List<StoragePoolVO> zonePools = _storagePoolDao.findZoneWideStoragePoolsByHypervisor(agent.getDataCenterId(), agent.getHypervisorType());
|
List<StoragePoolVO> zonePools = _storagePoolDao.findZoneWideStoragePoolsByHypervisor(agent.getDataCenterId(), agent.getHypervisorType());
|
||||||
for (StoragePoolVO pool : zonePools) {
|
storageSupportHA = storageSupportHa(zonePools);
|
||||||
if (pool.getPoolType() == StoragePoolType.NetworkFilesystem) {
|
|
||||||
hasNfs = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (!hasNfs) {
|
if (!storageSupportHA) {
|
||||||
s_logger.warn(
|
s_logger.warn(
|
||||||
"Agent investigation was requested on host " + agent + ", but host does not support investigation because it has no NFS storage. Skipping investigation.");
|
"Agent investigation was requested on host " + agent + ", but host does not support investigation because it has no NFS storage. Skipping investigation.");
|
||||||
return Status.Disconnected;
|
return Status.Disconnected;
|
||||||
@ -102,7 +97,8 @@ public class KVMInvestigator extends AdapterBase implements Investigator {
|
|||||||
|
|
||||||
Status hostStatus = null;
|
Status hostStatus = null;
|
||||||
Status neighbourStatus = null;
|
Status neighbourStatus = null;
|
||||||
CheckOnHostCommand cmd = new CheckOnHostCommand(agent);
|
boolean reportFailureIfOneStorageIsDown = HighAvailabilityManager.KvmHAFenceHostIfHeartbeatFailsOnStorage.value();
|
||||||
|
CheckOnHostCommand cmd = new CheckOnHostCommand(agent, reportFailureIfOneStorageIsDown);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
Answer answer = _agentMgr.easySend(agent.getId(), cmd);
|
Answer answer = _agentMgr.easySend(agent.getId(), cmd);
|
||||||
@ -145,4 +141,20 @@ public class KVMInvestigator extends AdapterBase implements Investigator {
|
|||||||
s_logger.debug("HA: HOST is ineligible legacy state " + hostStatus + " for host " + agent.getId());
|
s_logger.debug("HA: HOST is ineligible legacy state " + hostStatus + " for host " + agent.getId());
|
||||||
return hostStatus;
|
return hostStatus;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private boolean storageSupportHa(List<StoragePoolVO> pools) {
|
||||||
|
boolean storageSupportHA = false;
|
||||||
|
for (StoragePoolVO pool : pools) {
|
||||||
|
DataStoreProvider storeProvider = dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName());
|
||||||
|
DataStoreDriver storeDriver = storeProvider.getDataStoreDriver();
|
||||||
|
if (storeDriver instanceof PrimaryDataStoreDriver) {
|
||||||
|
PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver;
|
||||||
|
if (primaryStoreDriver.isStorageSupportHA(pool.getPoolType())) {
|
||||||
|
storageSupportHA = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return storageSupportHA;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -24,6 +24,7 @@ import org.libvirt.StoragePool;
|
|||||||
import org.libvirt.StoragePoolInfo;
|
import org.libvirt.StoragePoolInfo;
|
||||||
import org.libvirt.StoragePoolInfo.StoragePoolState;
|
import org.libvirt.StoragePoolInfo.StoragePoolState;
|
||||||
|
|
||||||
|
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
|
||||||
import com.cloud.utils.script.OutputInterpreter;
|
import com.cloud.utils.script.OutputInterpreter;
|
||||||
import com.cloud.utils.script.OutputInterpreter.AllLinesParser;
|
import com.cloud.utils.script.OutputInterpreter.AllLinesParser;
|
||||||
import com.cloud.utils.script.Script;
|
import com.cloud.utils.script.Script;
|
||||||
@ -41,26 +42,76 @@ public class KVMHABase {
|
|||||||
PrimaryStorage, SecondaryStorage
|
PrimaryStorage, SecondaryStorage
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class NfsStoragePool {
|
public static class HAStoragePool {
|
||||||
String _poolUUID;
|
String poolUuid;
|
||||||
String _poolIp;
|
String poolIp;
|
||||||
String _poolMountSourcePath;
|
String poolMountSourcePath;
|
||||||
String _mountDestPath;
|
String mountDestPath;
|
||||||
PoolType _type;
|
PoolType poolType;
|
||||||
|
KVMStoragePool pool;
|
||||||
|
|
||||||
public NfsStoragePool(String poolUUID, String poolIp, String poolSourcePath, String mountDestPath, PoolType type) {
|
public HAStoragePool(KVMStoragePool pool, String host, String path, PoolType type) {
|
||||||
_poolUUID = poolUUID;
|
this.pool = pool;
|
||||||
_poolIp = poolIp;
|
this.poolUuid = pool.getUuid();
|
||||||
_poolMountSourcePath = poolSourcePath;
|
this.mountDestPath = pool.getLocalPath();
|
||||||
_mountDestPath = mountDestPath;
|
this.poolIp = host;
|
||||||
_type = type;
|
this.poolMountSourcePath = path;
|
||||||
|
this.poolType = type;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getPoolUUID() {
|
||||||
|
return poolUuid;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setPoolUUID(String poolUuid) {
|
||||||
|
this.poolUuid = poolUuid;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getPoolIp() {
|
||||||
|
return poolIp;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setPoolIp(String poolIp) {
|
||||||
|
this.poolIp = poolIp;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getPoolMountSourcePath() {
|
||||||
|
return poolMountSourcePath;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setPoolMountSourcePath(String poolMountSourcePath) {
|
||||||
|
this.poolMountSourcePath = poolMountSourcePath;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getMountDestPath() {
|
||||||
|
return mountDestPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setMountDestPath(String mountDestPath) {
|
||||||
|
this.mountDestPath = mountDestPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
public PoolType getType() {
|
||||||
|
return poolType;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setType(PoolType type) {
|
||||||
|
this.poolType = type;
|
||||||
|
}
|
||||||
|
|
||||||
|
public KVMStoragePool getPool() {
|
||||||
|
return pool;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setPool(KVMStoragePool pool) {
|
||||||
|
this.pool = pool;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected String checkingMountPoint(NfsStoragePool pool, String poolName) {
|
protected String checkingMountPoint(HAStoragePool pool, String poolName) {
|
||||||
String mountSource = pool._poolIp + ":" + pool._poolMountSourcePath;
|
String mountSource = pool.getPoolIp() + ":" + pool.getPoolMountSourcePath();
|
||||||
String mountPaths = Script.runSimpleBashScript("cat /proc/mounts | grep " + mountSource);
|
String mountPaths = Script.runSimpleBashScript("cat /proc/mounts | grep " + mountSource);
|
||||||
String destPath = pool._mountDestPath;
|
String destPath = pool.getMountDestPath();
|
||||||
|
|
||||||
if (mountPaths != null) {
|
if (mountPaths != null) {
|
||||||
String token[] = mountPaths.split(" ");
|
String token[] = mountPaths.split(" ");
|
||||||
@ -100,12 +151,12 @@ public class KVMHABase {
|
|||||||
return destPath;
|
return destPath;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected String getMountPoint(NfsStoragePool storagePool) {
|
protected String getMountPoint(HAStoragePool storagePool) {
|
||||||
|
|
||||||
StoragePool pool = null;
|
StoragePool pool = null;
|
||||||
String poolName = null;
|
String poolName = null;
|
||||||
try {
|
try {
|
||||||
pool = LibvirtConnection.getConnection().storagePoolLookupByUUIDString(storagePool._poolUUID);
|
pool = LibvirtConnection.getConnection().storagePoolLookupByUUIDString(storagePool.getPoolUUID());
|
||||||
if (pool != null) {
|
if (pool != null) {
|
||||||
StoragePoolInfo spi = pool.getInfo();
|
StoragePoolInfo spi = pool.getInfo();
|
||||||
if (spi.state != StoragePoolState.VIR_STORAGE_POOL_RUNNING) {
|
if (spi.state != StoragePoolState.VIR_STORAGE_POOL_RUNNING) {
|
||||||
|
|||||||
@ -22,18 +22,18 @@ import java.util.stream.Collectors;
|
|||||||
|
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
import com.cloud.utils.script.OutputInterpreter;
|
import com.cloud.agent.api.to.HostTO;
|
||||||
import com.cloud.utils.script.Script;
|
|
||||||
|
|
||||||
public class KVMHAChecker extends KVMHABase implements Callable<Boolean> {
|
public class KVMHAChecker extends KVMHABase implements Callable<Boolean> {
|
||||||
private static final Logger s_logger = Logger.getLogger(KVMHAChecker.class);
|
private static final Logger s_logger = Logger.getLogger(KVMHAChecker.class);
|
||||||
private List<NfsStoragePool> nfsStoragePools;
|
private List<HAStoragePool> storagePools;
|
||||||
private String hostIp;
|
private HostTO host;
|
||||||
private long heartBeatCheckerTimeout = 360000; // 6 minutes
|
private boolean reportFailureIfOneStorageIsDown;
|
||||||
|
|
||||||
public KVMHAChecker(List<NfsStoragePool> pools, String host) {
|
public KVMHAChecker(List<HAStoragePool> pools, HostTO host, boolean reportFailureIfOneStorageIsDown) {
|
||||||
this.nfsStoragePools = pools;
|
this.storagePools = pools;
|
||||||
this.hostIp = host;
|
this.host = host;
|
||||||
|
this.reportFailureIfOneStorageIsDown = reportFailureIfOneStorageIsDown;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -44,30 +44,14 @@ public class KVMHAChecker extends KVMHABase implements Callable<Boolean> {
|
|||||||
public Boolean checkingHeartBeat() {
|
public Boolean checkingHeartBeat() {
|
||||||
boolean validResult = false;
|
boolean validResult = false;
|
||||||
|
|
||||||
String hostAndPools = String.format("host IP [%s] in pools [%s]", hostIp, nfsStoragePools.stream().map(pool -> pool._poolIp).collect(Collectors.joining(", ")));
|
String hostAndPools = String.format("host IP [%s] in pools [%s]", host.getPrivateNetwork().getIp(), storagePools.stream().map(pool -> pool.getPoolUUID()).collect(Collectors.joining(", ")));
|
||||||
|
|
||||||
s_logger.debug(String.format("Checking heart beat with KVMHAChecker for %s", hostAndPools));
|
s_logger.debug(String.format("Checking heart beat with KVMHAChecker for %s", hostAndPools));
|
||||||
|
|
||||||
for (NfsStoragePool pool : nfsStoragePools) {
|
for (HAStoragePool pool : storagePools) {
|
||||||
Script cmd = new Script(s_heartBeatPath, heartBeatCheckerTimeout, s_logger);
|
validResult = pool.getPool().checkingHeartBeat(pool, host);
|
||||||
cmd.add("-i", pool._poolIp);
|
if (reportFailureIfOneStorageIsDown && !validResult) {
|
||||||
cmd.add("-p", pool._poolMountSourcePath);
|
break;
|
||||||
cmd.add("-m", pool._mountDestPath);
|
|
||||||
cmd.add("-h", hostIp);
|
|
||||||
cmd.add("-r");
|
|
||||||
cmd.add("-t", String.valueOf(_heartBeatUpdateFreq / 1000));
|
|
||||||
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
|
|
||||||
String result = cmd.execute(parser);
|
|
||||||
String parsedLine = parser.getLine();
|
|
||||||
|
|
||||||
s_logger.debug(String.format("Checking heart beat with KVMHAChecker [{command=\"%s\", result: \"%s\", log: \"%s\", pool: \"%s\"}].", cmd.toString(), result, parsedLine,
|
|
||||||
pool._poolIp));
|
|
||||||
|
|
||||||
if (result == null && parsedLine.contains("DEAD")) {
|
|
||||||
s_logger.warn(String.format("Checking heart beat with KVMHAChecker command [%s] returned [%s]. [%s]. It may cause a shutdown of host IP [%s].", cmd.toString(),
|
|
||||||
result, parsedLine, hostIp));
|
|
||||||
} else {
|
|
||||||
validResult = true;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -18,6 +18,7 @@ package com.cloud.hypervisor.kvm.resource;
|
|||||||
|
|
||||||
import com.cloud.agent.properties.AgentProperties;
|
import com.cloud.agent.properties.AgentProperties;
|
||||||
import com.cloud.agent.properties.AgentPropertiesFileHandler;
|
import com.cloud.agent.properties.AgentPropertiesFileHandler;
|
||||||
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
import com.cloud.utils.script.Script;
|
import com.cloud.utils.script.Script;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
import org.libvirt.Connect;
|
import org.libvirt.Connect;
|
||||||
@ -35,14 +36,14 @@ import java.util.concurrent.ConcurrentHashMap;
|
|||||||
public class KVMHAMonitor extends KVMHABase implements Runnable {
|
public class KVMHAMonitor extends KVMHABase implements Runnable {
|
||||||
|
|
||||||
private static final Logger s_logger = Logger.getLogger(KVMHAMonitor.class);
|
private static final Logger s_logger = Logger.getLogger(KVMHAMonitor.class);
|
||||||
private final Map<String, NfsStoragePool> storagePool = new ConcurrentHashMap<>();
|
private final Map<String, HAStoragePool> storagePool = new ConcurrentHashMap<>();
|
||||||
private final boolean rebootHostAndAlertManagementOnHeartbeatTimeout;
|
private final boolean rebootHostAndAlertManagementOnHeartbeatTimeout;
|
||||||
|
|
||||||
private final String hostPrivateIp;
|
private final String hostPrivateIp;
|
||||||
|
|
||||||
public KVMHAMonitor(NfsStoragePool pool, String host, String scriptPath) {
|
public KVMHAMonitor(HAStoragePool pool, String host, String scriptPath) {
|
||||||
if (pool != null) {
|
if (pool != null) {
|
||||||
storagePool.put(pool._poolUUID, pool);
|
storagePool.put(pool.getPoolUUID(), pool);
|
||||||
}
|
}
|
||||||
hostPrivateIp = host;
|
hostPrivateIp = host;
|
||||||
configureHeartBeatPath(scriptPath);
|
configureHeartBeatPath(scriptPath);
|
||||||
@ -55,29 +56,29 @@ public class KVMHAMonitor extends KVMHABase implements Runnable {
|
|||||||
KVMHABase.s_heartBeatPath = scriptPath;
|
KVMHABase.s_heartBeatPath = scriptPath;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void addStoragePool(NfsStoragePool pool) {
|
public void addStoragePool(HAStoragePool pool) {
|
||||||
synchronized (storagePool) {
|
synchronized (storagePool) {
|
||||||
storagePool.put(pool._poolUUID, pool);
|
storagePool.put(pool.getPoolUUID(), pool);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void removeStoragePool(String uuid) {
|
public void removeStoragePool(String uuid) {
|
||||||
synchronized (storagePool) {
|
synchronized (storagePool) {
|
||||||
NfsStoragePool pool = storagePool.get(uuid);
|
HAStoragePool pool = storagePool.get(uuid);
|
||||||
if (pool != null) {
|
if (pool != null) {
|
||||||
Script.runSimpleBashScript("umount " + pool._mountDestPath);
|
Script.runSimpleBashScript("umount " + pool.getMountDestPath());
|
||||||
storagePool.remove(uuid);
|
storagePool.remove(uuid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<NfsStoragePool> getStoragePools() {
|
public List<HAStoragePool> getStoragePools() {
|
||||||
synchronized (storagePool) {
|
synchronized (storagePool) {
|
||||||
return new ArrayList<>(storagePool.values());
|
return new ArrayList<>(storagePool.values());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public NfsStoragePool getStoragePool(String uuid) {
|
public HAStoragePool getStoragePool(String uuid) {
|
||||||
synchronized (storagePool) {
|
synchronized (storagePool) {
|
||||||
return storagePool.get(uuid);
|
return storagePool.get(uuid);
|
||||||
}
|
}
|
||||||
@ -87,86 +88,72 @@ public class KVMHAMonitor extends KVMHABase implements Runnable {
|
|||||||
synchronized (storagePool) {
|
synchronized (storagePool) {
|
||||||
Set<String> removedPools = new HashSet<>();
|
Set<String> removedPools = new HashSet<>();
|
||||||
for (String uuid : storagePool.keySet()) {
|
for (String uuid : storagePool.keySet()) {
|
||||||
NfsStoragePool primaryStoragePool = storagePool.get(uuid);
|
HAStoragePool primaryStoragePool = storagePool.get(uuid);
|
||||||
StoragePool storage;
|
if (primaryStoragePool.getPool().getType() == StoragePoolType.NetworkFilesystem) {
|
||||||
try {
|
checkForNotExistingPools(removedPools, uuid);
|
||||||
Connect conn = LibvirtConnection.getConnection();
|
if (removedPools.contains(uuid)) {
|
||||||
storage = conn.storagePoolLookupByUUIDString(uuid);
|
|
||||||
if (storage == null || storage.getInfo().state != StoragePoolState.VIR_STORAGE_POOL_RUNNING) {
|
|
||||||
if (storage == null) {
|
|
||||||
s_logger.debug(String.format("Libvirt storage pool [%s] not found, removing from HA list.", uuid));
|
|
||||||
} else {
|
|
||||||
s_logger.debug(String.format("Libvirt storage pool [%s] found, but not running, removing from HA list.", uuid));
|
|
||||||
}
|
|
||||||
|
|
||||||
removedPools.add(uuid);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
s_logger.debug(String.format("Found NFS storage pool [%s] in libvirt, continuing.", uuid));
|
|
||||||
|
|
||||||
} catch (LibvirtException e) {
|
|
||||||
s_logger.debug(String.format("Failed to lookup libvirt storage pool [%s].", uuid), e);
|
|
||||||
|
|
||||||
if (e.toString().contains("pool not found")) {
|
|
||||||
s_logger.debug(String.format("Removing pool [%s] from HA monitor since it was deleted.", uuid));
|
|
||||||
removedPools.add(uuid);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
String result = null;
|
String result = null;
|
||||||
for (int i = 1; i <= _heartBeatUpdateMaxTries; i++) {
|
result = executePoolHeartBeatCommand(uuid, primaryStoragePool, result);
|
||||||
Script cmd = createHeartBeatCommand(primaryStoragePool, hostPrivateIp, true);
|
|
||||||
result = cmd.execute();
|
|
||||||
|
|
||||||
s_logger.debug(String.format("The command (%s), to the pool [%s], has the result [%s].", cmd.toString(), uuid, result));
|
|
||||||
|
|
||||||
if (result != null) {
|
|
||||||
s_logger.warn(String.format("Write heartbeat for pool [%s] failed: %s; try: %s of %s.", uuid, result, i, _heartBeatUpdateMaxTries));
|
|
||||||
try {
|
|
||||||
Thread.sleep(_heartBeatUpdateRetrySleep);
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
s_logger.debug("[IGNORED] Interrupted between heartbeat retries.", e);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if (result != null && rebootHostAndAlertManagementOnHeartbeatTimeout) {
|
if (result != null && rebootHostAndAlertManagementOnHeartbeatTimeout) {
|
||||||
s_logger.warn(String.format("Write heartbeat for pool [%s] failed: %s; stopping cloudstack-agent.", uuid, result));
|
s_logger.warn(String.format("Write heartbeat for pool [%s] failed: %s; stopping cloudstack-agent.", uuid, result));
|
||||||
Script cmd = createHeartBeatCommand(primaryStoragePool, null, false);
|
primaryStoragePool.getPool().createHeartBeatCommand(primaryStoragePool, null, false);;
|
||||||
result = cmd.execute();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!removedPools.isEmpty()) {
|
if (!removedPools.isEmpty()) {
|
||||||
for (String uuid : removedPools) {
|
for (String uuid : removedPools) {
|
||||||
removeStoragePool(uuid);
|
removeStoragePool(uuid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private Script createHeartBeatCommand(NfsStoragePool primaryStoragePool, String hostPrivateIp, boolean hostValidation) {
|
private String executePoolHeartBeatCommand(String uuid, HAStoragePool primaryStoragePool, String result) {
|
||||||
Script cmd = new Script(s_heartBeatPath, _heartBeatUpdateTimeout, s_logger);
|
for (int i = 1; i <= _heartBeatUpdateMaxTries; i++) {
|
||||||
cmd.add("-i", primaryStoragePool._poolIp);
|
result = primaryStoragePool.getPool().createHeartBeatCommand(primaryStoragePool, hostPrivateIp, true);
|
||||||
cmd.add("-p", primaryStoragePool._poolMountSourcePath);
|
|
||||||
cmd.add("-m", primaryStoragePool._mountDestPath);
|
if (result != null) {
|
||||||
|
s_logger.warn(String.format("Write heartbeat for pool [%s] failed: %s; try: %s of %s.", uuid, result, i, _heartBeatUpdateMaxTries));
|
||||||
|
try {
|
||||||
|
Thread.sleep(_heartBeatUpdateRetrySleep);
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
s_logger.debug("[IGNORED] Interrupted between heartbeat retries.", e);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if (hostValidation) {
|
|
||||||
cmd.add("-h", hostPrivateIp);
|
|
||||||
}
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
if (!hostValidation) {
|
private void checkForNotExistingPools(Set<String> removedPools, String uuid) {
|
||||||
cmd.add("-c");
|
try {
|
||||||
|
Connect conn = LibvirtConnection.getConnection();
|
||||||
|
StoragePool storage = conn.storagePoolLookupByUUIDString(uuid);
|
||||||
|
if (storage == null || storage.getInfo().state != StoragePoolState.VIR_STORAGE_POOL_RUNNING) {
|
||||||
|
if (storage == null) {
|
||||||
|
s_logger.debug(String.format("Libvirt storage pool [%s] not found, removing from HA list.", uuid));
|
||||||
|
} else {
|
||||||
|
s_logger.debug(String.format("Libvirt storage pool [%s] found, but not running, removing from HA list.", uuid));
|
||||||
|
}
|
||||||
|
|
||||||
|
removedPools.add(uuid);
|
||||||
|
}
|
||||||
|
|
||||||
|
s_logger.debug(String.format("Found NFS storage pool [%s] in libvirt, continuing.", uuid));
|
||||||
|
|
||||||
|
} catch (LibvirtException e) {
|
||||||
|
s_logger.debug(String.format("Failed to lookup libvirt storage pool [%s].", uuid), e);
|
||||||
|
|
||||||
|
if (e.toString().contains("pool not found")) {
|
||||||
|
s_logger.debug(String.format("Removing pool [%s] from HA monitor since it was deleted.", uuid));
|
||||||
|
removedPools.add(uuid);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return cmd;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|||||||
@ -16,54 +16,31 @@
|
|||||||
// under the License.
|
// under the License.
|
||||||
package com.cloud.hypervisor.kvm.resource;
|
package com.cloud.hypervisor.kvm.resource;
|
||||||
|
|
||||||
import com.cloud.utils.script.OutputInterpreter;
|
import com.cloud.agent.api.to.HostTO;
|
||||||
import com.cloud.utils.script.Script;
|
|
||||||
import org.apache.log4j.Logger;
|
|
||||||
import org.joda.time.Duration;
|
import org.joda.time.Duration;
|
||||||
|
|
||||||
import java.util.concurrent.Callable;
|
import java.util.concurrent.Callable;
|
||||||
|
|
||||||
public class KVMHAVMActivityChecker extends KVMHABase implements Callable<Boolean> {
|
public class KVMHAVMActivityChecker extends KVMHABase implements Callable<Boolean> {
|
||||||
private static final Logger LOG = Logger.getLogger(KVMHAVMActivityChecker.class);
|
|
||||||
|
|
||||||
final private NfsStoragePool nfsStoragePool;
|
private final HAStoragePool storagePool;
|
||||||
final private String hostIP;
|
private final String volumeUuidList;
|
||||||
final private String volumeUuidList;
|
private final String vmActivityCheckPath;
|
||||||
final private String vmActivityCheckPath;
|
private final Duration activityScriptTimeout = Duration.standardSeconds(3600L);
|
||||||
final private Duration activityScriptTimeout = Duration.standardSeconds(3600L);
|
private final long suspectTimeInSeconds;
|
||||||
final private long suspectTimeInSeconds;
|
private final HostTO host;
|
||||||
|
|
||||||
public KVMHAVMActivityChecker(final NfsStoragePool pool, final String host, final String volumeUUIDListString, String vmActivityCheckPath, final long suspectTime) {
|
public KVMHAVMActivityChecker(final HAStoragePool pool, final HostTO host, final String volumeUUIDListString, String vmActivityCheckPath, final long suspectTime) {
|
||||||
this.nfsStoragePool = pool;
|
this.storagePool = pool;
|
||||||
this.hostIP = host;
|
|
||||||
this.volumeUuidList = volumeUUIDListString;
|
this.volumeUuidList = volumeUUIDListString;
|
||||||
this.vmActivityCheckPath = vmActivityCheckPath;
|
this.vmActivityCheckPath = vmActivityCheckPath;
|
||||||
this.suspectTimeInSeconds = suspectTime;
|
this.suspectTimeInSeconds = suspectTime;
|
||||||
|
this.host = host;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Boolean checkingHeartBeat() {
|
public Boolean checkingHeartBeat() {
|
||||||
Script cmd = new Script(vmActivityCheckPath, activityScriptTimeout.getStandardSeconds(), LOG);
|
return this.storagePool.getPool().vmActivityCheck(storagePool, host, activityScriptTimeout, volumeUuidList, vmActivityCheckPath, suspectTimeInSeconds);
|
||||||
cmd.add("-i", nfsStoragePool._poolIp);
|
|
||||||
cmd.add("-p", nfsStoragePool._poolMountSourcePath);
|
|
||||||
cmd.add("-m", nfsStoragePool._mountDestPath);
|
|
||||||
cmd.add("-h", hostIP);
|
|
||||||
cmd.add("-u", volumeUuidList);
|
|
||||||
cmd.add("-t", String.valueOf(String.valueOf(System.currentTimeMillis() / 1000)));
|
|
||||||
cmd.add("-d", String.valueOf(suspectTimeInSeconds));
|
|
||||||
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
|
|
||||||
|
|
||||||
String result = cmd.execute(parser);
|
|
||||||
String parsedLine = parser.getLine();
|
|
||||||
|
|
||||||
LOG.debug(String.format("Checking heart beat with KVMHAVMActivityChecker [{command=\"%s\", result: \"%s\", log: \"%s\", pool: \"%s\"}].", cmd.toString(), result, parsedLine, nfsStoragePool._poolIp));
|
|
||||||
|
|
||||||
if (result == null && parsedLine.contains("DEAD")) {
|
|
||||||
LOG.warn(String.format("Checking heart beat with KVMHAVMActivityChecker command [%s] returned [%s]. It is [%s]. It may cause a shutdown of host IP [%s].", cmd.toString(), result, parsedLine, hostIP));
|
|
||||||
return false;
|
|
||||||
} else {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|||||||
@ -28,8 +28,7 @@ import java.util.concurrent.Future;
|
|||||||
import com.cloud.agent.api.Answer;
|
import com.cloud.agent.api.Answer;
|
||||||
import com.cloud.agent.api.CheckOnHostCommand;
|
import com.cloud.agent.api.CheckOnHostCommand;
|
||||||
import com.cloud.agent.api.to.HostTO;
|
import com.cloud.agent.api.to.HostTO;
|
||||||
import com.cloud.agent.api.to.NetworkTO;
|
import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool;
|
||||||
import com.cloud.hypervisor.kvm.resource.KVMHABase.NfsStoragePool;
|
|
||||||
import com.cloud.hypervisor.kvm.resource.KVMHAChecker;
|
import com.cloud.hypervisor.kvm.resource.KVMHAChecker;
|
||||||
import com.cloud.hypervisor.kvm.resource.KVMHAMonitor;
|
import com.cloud.hypervisor.kvm.resource.KVMHAMonitor;
|
||||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||||
@ -44,10 +43,9 @@ public final class LibvirtCheckOnHostCommandWrapper extends CommandWrapper<Check
|
|||||||
final ExecutorService executors = Executors.newSingleThreadExecutor();
|
final ExecutorService executors = Executors.newSingleThreadExecutor();
|
||||||
final KVMHAMonitor monitor = libvirtComputingResource.getMonitor();
|
final KVMHAMonitor monitor = libvirtComputingResource.getMonitor();
|
||||||
|
|
||||||
final List<NfsStoragePool> pools = monitor.getStoragePools();
|
final List<HAStoragePool> pools = monitor.getStoragePools();
|
||||||
final HostTO host = command.getHost();
|
final HostTO host = command.getHost();
|
||||||
final NetworkTO privateNetwork = host.getPrivateNetwork();
|
final KVMHAChecker ha = new KVMHAChecker(pools, host, command.isCheckFailedOnOneStorage());
|
||||||
final KVMHAChecker ha = new KVMHAChecker(pools, privateNetwork.getIp());
|
|
||||||
|
|
||||||
final Future<Boolean> future = executors.submit(ha);
|
final Future<Boolean> future = executors.submit(ha);
|
||||||
try {
|
try {
|
||||||
|
|||||||
@ -22,13 +22,14 @@ package com.cloud.hypervisor.kvm.resource.wrapper;
|
|||||||
import com.cloud.agent.api.Answer;
|
import com.cloud.agent.api.Answer;
|
||||||
import com.cloud.agent.api.CheckVMActivityOnStoragePoolCommand;
|
import com.cloud.agent.api.CheckVMActivityOnStoragePoolCommand;
|
||||||
import com.cloud.agent.api.to.StorageFilerTO;
|
import com.cloud.agent.api.to.StorageFilerTO;
|
||||||
import com.cloud.hypervisor.kvm.resource.KVMHABase.NfsStoragePool;
|
import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool;
|
||||||
|
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
|
||||||
|
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
|
||||||
import com.cloud.hypervisor.kvm.resource.KVMHAMonitor;
|
import com.cloud.hypervisor.kvm.resource.KVMHAMonitor;
|
||||||
import com.cloud.hypervisor.kvm.resource.KVMHAVMActivityChecker;
|
import com.cloud.hypervisor.kvm.resource.KVMHAVMActivityChecker;
|
||||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||||
import com.cloud.resource.CommandWrapper;
|
import com.cloud.resource.CommandWrapper;
|
||||||
import com.cloud.resource.ResourceWrapper;
|
import com.cloud.resource.ResourceWrapper;
|
||||||
import com.cloud.storage.Storage;
|
|
||||||
|
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
import java.util.concurrent.ExecutorService;
|
import java.util.concurrent.ExecutorService;
|
||||||
@ -43,9 +44,13 @@ public final class LibvirtCheckVMActivityOnStoragePoolCommandWrapper extends Com
|
|||||||
final ExecutorService executors = Executors.newSingleThreadExecutor();
|
final ExecutorService executors = Executors.newSingleThreadExecutor();
|
||||||
final KVMHAMonitor monitor = libvirtComputingResource.getMonitor();
|
final KVMHAMonitor monitor = libvirtComputingResource.getMonitor();
|
||||||
final StorageFilerTO pool = command.getPool();
|
final StorageFilerTO pool = command.getPool();
|
||||||
if (Storage.StoragePoolType.NetworkFilesystem == pool.getType()){
|
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
|
||||||
final NfsStoragePool nfspool = monitor.getStoragePool(pool.getUuid());
|
|
||||||
final KVMHAVMActivityChecker ha = new KVMHAVMActivityChecker(nfspool, command.getHost().getPrivateNetwork().getIp(), command.getVolumeList(), libvirtComputingResource.getVmActivityCheckPath(), command.getSuspectTimeInSeconds());
|
KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(pool.getType(), pool.getUuid());
|
||||||
|
|
||||||
|
if (primaryPool.isPoolSupportHA()){
|
||||||
|
final HAStoragePool nfspool = monitor.getStoragePool(pool.getUuid());
|
||||||
|
final KVMHAVMActivityChecker ha = new KVMHAVMActivityChecker(nfspool, command.getHost(), command.getVolumeList(), libvirtComputingResource.getVmActivityCheckPath(), command.getSuspectTimeInSeconds());
|
||||||
final Future<Boolean> future = executors.submit(ha);
|
final Future<Boolean> future = executors.submit(ha);
|
||||||
try {
|
try {
|
||||||
final Boolean result = future.get();
|
final Boolean result = future.get();
|
||||||
|
|||||||
@ -30,7 +30,7 @@ import org.apache.log4j.Logger;
|
|||||||
import com.cloud.agent.api.Answer;
|
import com.cloud.agent.api.Answer;
|
||||||
import com.cloud.agent.api.FenceAnswer;
|
import com.cloud.agent.api.FenceAnswer;
|
||||||
import com.cloud.agent.api.FenceCommand;
|
import com.cloud.agent.api.FenceCommand;
|
||||||
import com.cloud.hypervisor.kvm.resource.KVMHABase.NfsStoragePool;
|
import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool;
|
||||||
import com.cloud.hypervisor.kvm.resource.KVMHAChecker;
|
import com.cloud.hypervisor.kvm.resource.KVMHAChecker;
|
||||||
import com.cloud.hypervisor.kvm.resource.KVMHAMonitor;
|
import com.cloud.hypervisor.kvm.resource.KVMHAMonitor;
|
||||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||||
@ -47,7 +47,7 @@ public final class LibvirtFenceCommandWrapper extends CommandWrapper<FenceComman
|
|||||||
final ExecutorService executors = Executors.newSingleThreadExecutor();
|
final ExecutorService executors = Executors.newSingleThreadExecutor();
|
||||||
final KVMHAMonitor monitor = libvirtComputingResource.getMonitor();
|
final KVMHAMonitor monitor = libvirtComputingResource.getMonitor();
|
||||||
|
|
||||||
final List<NfsStoragePool> pools = monitor.getStoragePools();
|
final List<HAStoragePool> pools = monitor.getStoragePools();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* We can only safely fence off hosts when we use NFS
|
* We can only safely fence off hosts when we use NFS
|
||||||
@ -60,7 +60,7 @@ public final class LibvirtFenceCommandWrapper extends CommandWrapper<FenceComman
|
|||||||
return new FenceAnswer(command, false, logline);
|
return new FenceAnswer(command, false, logline);
|
||||||
}
|
}
|
||||||
|
|
||||||
final KVMHAChecker ha = new KVMHAChecker(pools, command.getHostIp());
|
final KVMHAChecker ha = new KVMHAChecker(pools, command.getHost(), command.isReportCheckFailureIfOneStorageIsDown());
|
||||||
|
|
||||||
final Future<Boolean> future = executors.submit(ha);
|
final Future<Boolean> future = executors.submit(ha);
|
||||||
try {
|
try {
|
||||||
|
|||||||
@ -22,7 +22,10 @@ import java.util.Map;
|
|||||||
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
||||||
import org.apache.commons.lang3.builder.ToStringBuilder;
|
import org.apache.commons.lang3.builder.ToStringBuilder;
|
||||||
import org.apache.commons.lang3.builder.ToStringStyle;
|
import org.apache.commons.lang3.builder.ToStringStyle;
|
||||||
|
import org.joda.time.Duration;
|
||||||
|
|
||||||
|
import com.cloud.agent.api.to.HostTO;
|
||||||
|
import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool;
|
||||||
import com.cloud.storage.Storage;
|
import com.cloud.storage.Storage;
|
||||||
import com.cloud.storage.Storage.StoragePoolType;
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
|
|
||||||
@ -183,4 +186,35 @@ public class IscsiAdmStoragePool implements KVMStoragePool {
|
|||||||
return new ToStringBuilder(this, ToStringStyle.JSON_STYLE).append("uuid", getUuid()).append("path", getLocalPath()).toString();
|
return new ToStringBuilder(this, ToStringStyle.JSON_STYLE).append("uuid", getUuid()).append("path", getLocalPath()).toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isPoolSupportHA() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHearthBeatPath() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String createHeartBeatCommand(HAStoragePool primaryStoragePool, String hostPrivateIp,
|
||||||
|
boolean hostValidation) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getStorageNodeId() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Boolean checkingHeartBeat(HAStoragePool pool, HostTO host) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Boolean vmActivityCheck(HAStoragePool pool, HostTO host, Duration activityScriptTimeout, String volumeUUIDListString, String vmActivityCheckPath, long duration) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -20,11 +20,22 @@ import java.util.List;
|
|||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
||||||
|
import org.joda.time.Duration;
|
||||||
|
|
||||||
|
import com.cloud.agent.api.to.HostTO;
|
||||||
|
import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool;
|
||||||
import com.cloud.storage.Storage;
|
import com.cloud.storage.Storage;
|
||||||
import com.cloud.storage.Storage.StoragePoolType;
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
|
|
||||||
public interface KVMStoragePool {
|
public interface KVMStoragePool {
|
||||||
|
|
||||||
|
public static final long HeartBeatUpdateTimeout = 60000;
|
||||||
|
public static final long HeartBeatUpdateFreq = 60000;
|
||||||
|
public static final long HeartBeatUpdateMaxTries = 5;
|
||||||
|
public static final long HeartBeatUpdateRetrySleep = 10000;
|
||||||
|
public static final long HeartBeatCheckerTimeout = 360000; // 6 minutes
|
||||||
|
|
||||||
|
|
||||||
public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase);
|
public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase);
|
||||||
|
|
||||||
public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, Storage.ProvisioningType provisioningType, long size, byte[] passphrase);
|
public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, Storage.ProvisioningType provisioningType, long size, byte[] passphrase);
|
||||||
@ -74,4 +85,16 @@ public interface KVMStoragePool {
|
|||||||
public boolean supportsConfigDriveIso();
|
public boolean supportsConfigDriveIso();
|
||||||
|
|
||||||
public Map<String, String> getDetails();
|
public Map<String, String> getDetails();
|
||||||
|
|
||||||
|
public boolean isPoolSupportHA();
|
||||||
|
|
||||||
|
public String getHearthBeatPath();
|
||||||
|
|
||||||
|
public String createHeartBeatCommand(HAStoragePool primaryStoragePool, String hostPrivateIp, boolean hostValidation);
|
||||||
|
|
||||||
|
public String getStorageNodeId();
|
||||||
|
|
||||||
|
public Boolean checkingHeartBeat(HAStoragePool pool, HostTO host);
|
||||||
|
|
||||||
|
public Boolean vmActivityCheck(HAStoragePool pool, HostTO host, Duration activityScriptTimeout, String volumeUUIDListString, String vmActivityCheckPath, long duration);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -360,9 +360,9 @@ public class KVMStoragePoolManager {
|
|||||||
KVMStoragePool pool = adaptor.createStoragePool(name, host, port, path, userInfo, type, details);
|
KVMStoragePool pool = adaptor.createStoragePool(name, host, port, path, userInfo, type, details);
|
||||||
|
|
||||||
// LibvirtStorageAdaptor-specific statement
|
// LibvirtStorageAdaptor-specific statement
|
||||||
if (type == StoragePoolType.NetworkFilesystem && primaryStorage) {
|
if (pool.isPoolSupportHA() && primaryStorage) {
|
||||||
KVMHABase.NfsStoragePool nfspool = new KVMHABase.NfsStoragePool(pool.getUuid(), host, path, pool.getLocalPath(), PoolType.PrimaryStorage);
|
KVMHABase.HAStoragePool storagePool = new KVMHABase.HAStoragePool(pool, host, path, PoolType.PrimaryStorage);
|
||||||
_haMonitor.addStoragePool(nfspool);
|
_haMonitor.addStoragePool(storagePool);
|
||||||
}
|
}
|
||||||
StoragePoolInformation info = new StoragePoolInformation(name, host, port, path, userInfo, type, details, primaryStorage);
|
StoragePoolInformation info = new StoragePoolInformation(name, host, port, path, userInfo, type, details, primaryStorage);
|
||||||
addStoragePool(pool.getUuid(), info);
|
addStoragePool(pool.getUuid(), info);
|
||||||
|
|||||||
@ -72,6 +72,7 @@ import java.util.HashSet;
|
|||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
|
||||||
public class LibvirtStorageAdaptor implements StorageAdaptor {
|
public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||||
private static final Logger s_logger = Logger.getLogger(LibvirtStorageAdaptor.class);
|
private static final Logger s_logger = Logger.getLogger(LibvirtStorageAdaptor.class);
|
||||||
private StorageLayer _storageLayer;
|
private StorageLayer _storageLayer;
|
||||||
|
|||||||
@ -21,15 +21,22 @@ import java.util.List;
|
|||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
import org.joda.time.Duration;
|
||||||
import org.libvirt.StoragePool;
|
import org.libvirt.StoragePool;
|
||||||
|
|
||||||
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
||||||
import org.apache.commons.lang3.builder.ToStringBuilder;
|
import org.apache.commons.lang3.builder.ToStringBuilder;
|
||||||
import org.apache.commons.lang3.builder.ToStringStyle;
|
import org.apache.commons.lang3.builder.ToStringStyle;
|
||||||
|
|
||||||
|
import com.cloud.agent.api.to.HostTO;
|
||||||
|
import com.cloud.agent.properties.AgentProperties;
|
||||||
|
import com.cloud.agent.properties.AgentPropertiesFileHandler;
|
||||||
|
import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool;
|
||||||
import com.cloud.storage.Storage;
|
import com.cloud.storage.Storage;
|
||||||
import com.cloud.storage.Storage.StoragePoolType;
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
import com.cloud.utils.script.OutputInterpreter;
|
||||||
|
import com.cloud.utils.script.Script;
|
||||||
|
|
||||||
public class LibvirtStoragePool implements KVMStoragePool {
|
public class LibvirtStoragePool implements KVMStoragePool {
|
||||||
private static final Logger s_logger = Logger.getLogger(LibvirtStoragePool.class);
|
private static final Logger s_logger = Logger.getLogger(LibvirtStoragePool.class);
|
||||||
@ -287,8 +294,96 @@ public class LibvirtStoragePool implements KVMStoragePool {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isPoolSupportHA() {
|
||||||
|
return type == StoragePoolType.NetworkFilesystem;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getHearthBeatPath() {
|
||||||
|
if (type == StoragePoolType.NetworkFilesystem) {
|
||||||
|
String kvmScriptsDir = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_SCRIPTS_DIR);
|
||||||
|
return Script.findScript(kvmScriptsDir, "kvmheartbeat.sh");
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
public String createHeartBeatCommand(HAStoragePool primaryStoragePool, String hostPrivateIp, boolean hostValidation) {
|
||||||
|
Script cmd = new Script(primaryStoragePool.getPool().getHearthBeatPath(), HeartBeatUpdateTimeout, s_logger);
|
||||||
|
cmd.add("-i", primaryStoragePool.getPoolIp());
|
||||||
|
cmd.add("-p", primaryStoragePool.getPoolMountSourcePath());
|
||||||
|
cmd.add("-m", primaryStoragePool.getMountDestPath());
|
||||||
|
|
||||||
|
if (hostValidation) {
|
||||||
|
cmd.add("-h", hostPrivateIp);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!hostValidation) {
|
||||||
|
cmd.add("-c");
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd.execute();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return new ToStringBuilder(this, ToStringStyle.JSON_STYLE).append("uuid", getUuid()).append("path", getLocalPath()).toString();
|
return new ToStringBuilder(this, ToStringStyle.JSON_STYLE).append("uuid", getUuid()).append("path", getLocalPath()).toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getStorageNodeId() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Boolean checkingHeartBeat(HAStoragePool pool, HostTO host) {
|
||||||
|
boolean validResult = false;
|
||||||
|
String hostIp = host.getPrivateNetwork().getIp();
|
||||||
|
Script cmd = new Script(getHearthBeatPath(), HeartBeatCheckerTimeout, s_logger);
|
||||||
|
cmd.add("-i", pool.getPoolIp());
|
||||||
|
cmd.add("-p", pool.getPoolMountSourcePath());
|
||||||
|
cmd.add("-m", pool.getMountDestPath());
|
||||||
|
cmd.add("-h", hostIp);
|
||||||
|
cmd.add("-r");
|
||||||
|
cmd.add("-t", String.valueOf(HeartBeatUpdateFreq / 1000));
|
||||||
|
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
|
||||||
|
String result = cmd.execute(parser);
|
||||||
|
String parsedLine = parser.getLine();
|
||||||
|
|
||||||
|
s_logger.debug(String.format("Checking heart beat with KVMHAChecker [{command=\"%s\", result: \"%s\", log: \"%s\", pool: \"%s\"}].", cmd.toString(), result, parsedLine,
|
||||||
|
pool.getPoolIp()));
|
||||||
|
|
||||||
|
if (result == null && parsedLine.contains("DEAD")) {
|
||||||
|
s_logger.warn(String.format("Checking heart beat with KVMHAChecker command [%s] returned [%s]. [%s]. It may cause a shutdown of host IP [%s].", cmd.toString(),
|
||||||
|
result, parsedLine, hostIp));
|
||||||
|
} else {
|
||||||
|
validResult = true;
|
||||||
|
}
|
||||||
|
return validResult;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Boolean vmActivityCheck(HAStoragePool pool, HostTO host, Duration activityScriptTimeout, String volumeUUIDListString, String vmActivityCheckPath, long duration) {
|
||||||
|
Script cmd = new Script(vmActivityCheckPath, activityScriptTimeout.getStandardSeconds(), s_logger);
|
||||||
|
cmd.add("-i", pool.getPoolIp());
|
||||||
|
cmd.add("-p", pool.getPoolMountSourcePath());
|
||||||
|
cmd.add("-m", pool.getMountDestPath());
|
||||||
|
cmd.add("-h", host.getPrivateNetwork().getIp());
|
||||||
|
cmd.add("-u", volumeUUIDListString);
|
||||||
|
cmd.add("-t", String.valueOf(String.valueOf(System.currentTimeMillis() / 1000)));
|
||||||
|
cmd.add("-d", String.valueOf(duration));
|
||||||
|
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
|
||||||
|
|
||||||
|
String result = cmd.execute(parser);
|
||||||
|
String parsedLine = parser.getLine();
|
||||||
|
|
||||||
|
s_logger.debug(String.format("Checking heart beat with KVMHAVMActivityChecker [{command=\"%s\", result: \"%s\", log: \"%s\", pool: \"%s\"}].", cmd.toString(), result, parsedLine, pool.getPoolIp()));
|
||||||
|
|
||||||
|
if (result == null && parsedLine.contains("DEAD")) {
|
||||||
|
s_logger.warn(String.format("Checking heart beat with KVMHAVMActivityChecker command [%s] returned [%s]. It is [%s]. It may cause a shutdown of host IP [%s].", cmd.toString(), result, parsedLine, host.getPrivateNetwork().getIp()));
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -23,7 +23,10 @@ import java.util.Map;
|
|||||||
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient;
|
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient;
|
||||||
import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil;
|
import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil;
|
||||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||||
|
import org.joda.time.Duration;
|
||||||
|
|
||||||
|
import com.cloud.agent.api.to.HostTO;
|
||||||
|
import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool;
|
||||||
import com.cloud.storage.Storage;
|
import com.cloud.storage.Storage;
|
||||||
|
|
||||||
public class ScaleIOStoragePool implements KVMStoragePool {
|
public class ScaleIOStoragePool implements KVMStoragePool {
|
||||||
@ -205,4 +208,35 @@ public class ScaleIOStoragePool implements KVMStoragePool {
|
|||||||
public Map<String, String> getDetails() {
|
public Map<String, String> getDetails() {
|
||||||
return this.details;
|
return this.details;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isPoolSupportHA() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHearthBeatPath() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String createHeartBeatCommand(HAStoragePool primaryStoragePool, String hostPrivateIp,
|
||||||
|
boolean hostValidation) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getStorageNodeId() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Boolean checkingHeartBeat(HAStoragePool pool, HostTO host) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Boolean vmActivityCheck(HAStoragePool pool, HostTO host, Duration activityScriptTimeout, String volumeUUIDListString, String vmActivityCheckPath, long duration) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -86,6 +86,7 @@ public final class KVMHAProvider extends HAAbstractHostProvider implements HAPro
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean fence(Host r) throws HAFenceException {
|
public boolean fence(Host r) throws HAFenceException {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (outOfBandManagementService.isOutOfBandManagementEnabled(r)){
|
if (outOfBandManagementService.isOutOfBandManagementEnabled(r)){
|
||||||
final OutOfBandManagementResponse resp = outOfBandManagementService.executePowerOperation(r, PowerOperation.OFF, null);
|
final OutOfBandManagementResponse resp = outOfBandManagementService.executePowerOperation(r, PowerOperation.OFF, null);
|
||||||
@ -96,7 +97,7 @@ public final class KVMHAProvider extends HAAbstractHostProvider implements HAPro
|
|||||||
}
|
}
|
||||||
} catch (Exception e){
|
} catch (Exception e){
|
||||||
LOG.warn("OOBM service is not configured or enabled for this host " + r.getName() + " error is " + e.getMessage());
|
LOG.warn("OOBM service is not configured or enabled for this host " + r.getName() + " error is " + e.getMessage());
|
||||||
throw new HAFenceException("OOBM service is not configured or enabled for this host " + r.getName() , e);
|
throw new HAFenceException("OBM service is not configured or enabled for this host " + r.getName() , e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -151,7 +152,7 @@ public final class KVMHAProvider extends HAAbstractHostProvider implements HAPro
|
|||||||
KVMHAConfig.KvmHAActivityCheckFailureThreshold,
|
KVMHAConfig.KvmHAActivityCheckFailureThreshold,
|
||||||
KVMHAConfig.KvmHADegradedMaxPeriod,
|
KVMHAConfig.KvmHADegradedMaxPeriod,
|
||||||
KVMHAConfig.KvmHARecoverWaitPeriod,
|
KVMHAConfig.KvmHARecoverWaitPeriod,
|
||||||
KVMHAConfig.KvmHARecoverAttemptThreshold
|
KVMHAConfig.KvmHARecoverAttemptThreshold,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -22,6 +22,7 @@ import com.cloud.agent.api.Answer;
|
|||||||
import com.cloud.agent.api.CheckOnHostCommand;
|
import com.cloud.agent.api.CheckOnHostCommand;
|
||||||
import com.cloud.agent.api.CheckVMActivityOnStoragePoolCommand;
|
import com.cloud.agent.api.CheckVMActivityOnStoragePoolCommand;
|
||||||
import com.cloud.exception.StorageUnavailableException;
|
import com.cloud.exception.StorageUnavailableException;
|
||||||
|
import com.cloud.ha.HighAvailabilityManager;
|
||||||
import com.cloud.host.Host;
|
import com.cloud.host.Host;
|
||||||
import com.cloud.host.HostVO;
|
import com.cloud.host.HostVO;
|
||||||
import com.cloud.host.Status;
|
import com.cloud.host.Status;
|
||||||
@ -90,7 +91,7 @@ public class KVMHostActivityChecker extends AdapterBase implements ActivityCheck
|
|||||||
}
|
}
|
||||||
Status hostStatus = Status.Unknown;
|
Status hostStatus = Status.Unknown;
|
||||||
Status neighbourStatus = Status.Unknown;
|
Status neighbourStatus = Status.Unknown;
|
||||||
final CheckOnHostCommand cmd = new CheckOnHostCommand(agent);
|
final CheckOnHostCommand cmd = new CheckOnHostCommand(agent, HighAvailabilityManager.KvmHAFenceHostIfHeartbeatFailsOnStorage.value());
|
||||||
try {
|
try {
|
||||||
LOG.debug(String.format("Checking %s status...", agent.toString()));
|
LOG.debug(String.format("Checking %s status...", agent.toString()));
|
||||||
Answer answer = agentMgr.easySend(agent.getId(), cmd);
|
Answer answer = agentMgr.easySend(agent.getId(), cmd);
|
||||||
|
|||||||
@ -174,7 +174,7 @@ import com.cloud.agent.properties.AgentPropertiesFileHandler;
|
|||||||
import com.cloud.agent.resource.virtualnetwork.VirtualRoutingResource;
|
import com.cloud.agent.resource.virtualnetwork.VirtualRoutingResource;
|
||||||
import com.cloud.exception.InternalErrorException;
|
import com.cloud.exception.InternalErrorException;
|
||||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||||
import com.cloud.hypervisor.kvm.resource.KVMHABase.NfsStoragePool;
|
import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool;
|
||||||
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.ChannelDef;
|
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.ChannelDef;
|
||||||
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.ClockDef;
|
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.ClockDef;
|
||||||
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.ConsoleDef;
|
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.ConsoleDef;
|
||||||
@ -3346,8 +3346,8 @@ public class LibvirtComputingResourceTest {
|
|||||||
|
|
||||||
final KVMHAMonitor monitor = Mockito.mock(KVMHAMonitor.class);
|
final KVMHAMonitor monitor = Mockito.mock(KVMHAMonitor.class);
|
||||||
|
|
||||||
final NfsStoragePool storagePool = Mockito.mock(NfsStoragePool.class);
|
final HAStoragePool storagePool = Mockito.mock(HAStoragePool.class);
|
||||||
final List<NfsStoragePool> pools = new ArrayList<NfsStoragePool>();
|
final List<HAStoragePool> pools = new ArrayList<HAStoragePool>();
|
||||||
pools.add(storagePool);
|
pools.add(storagePool);
|
||||||
|
|
||||||
when(libvirtComputingResourceMock.getMonitor()).thenReturn(monitor);
|
when(libvirtComputingResourceMock.getMonitor()).thenReturn(monitor);
|
||||||
|
|||||||
@ -66,8 +66,10 @@ import com.cloud.storage.ResizeVolumePayload;
|
|||||||
import com.cloud.storage.Snapshot;
|
import com.cloud.storage.Snapshot;
|
||||||
import com.cloud.storage.SnapshotVO;
|
import com.cloud.storage.SnapshotVO;
|
||||||
import com.cloud.storage.Storage;
|
import com.cloud.storage.Storage;
|
||||||
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
import com.cloud.storage.StoragePool;
|
import com.cloud.storage.StoragePool;
|
||||||
import com.cloud.storage.VMTemplateStoragePoolVO;
|
import com.cloud.storage.VMTemplateStoragePoolVO;
|
||||||
|
import com.cloud.storage.Volume;
|
||||||
import com.cloud.storage.VolumeDetailVO;
|
import com.cloud.storage.VolumeDetailVO;
|
||||||
import com.cloud.storage.VolumeVO;
|
import com.cloud.storage.VolumeVO;
|
||||||
import com.cloud.storage.dao.SnapshotDao;
|
import com.cloud.storage.dao.SnapshotDao;
|
||||||
@ -1879,4 +1881,13 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
@Override
|
@Override
|
||||||
public void provideVmTags(long vmId, long volumeId, String tagValue) {
|
public void provideVmTags(long vmId, long volumeId, String tagValue) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isStorageSupportHA(StoragePoolType type) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void detachVolumeFromAllStorageNodes(Volume volume) {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -552,4 +552,13 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri
|
|||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isStorageSupportHA(StoragePoolType type) {
|
||||||
|
return StoragePoolType.NetworkFilesystem == type;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void detachVolumeFromAllStorageNodes(Volume volume) {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -20,7 +20,10 @@ import java.util.List;
|
|||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||||
|
import org.joda.time.Duration;
|
||||||
|
|
||||||
|
import com.cloud.agent.api.to.HostTO;
|
||||||
|
import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool;
|
||||||
import com.cloud.storage.Storage;
|
import com.cloud.storage.Storage;
|
||||||
|
|
||||||
public class LinstorStoragePool implements KVMStoragePool {
|
public class LinstorStoragePool implements KVMStoragePool {
|
||||||
@ -194,4 +197,35 @@ public class LinstorStoragePool implements KVMStoragePool {
|
|||||||
public String getResourceGroup() {
|
public String getResourceGroup() {
|
||||||
return _resourceGroup;
|
return _resourceGroup;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isPoolSupportHA() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHearthBeatPath() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String createHeartBeatCommand(HAStoragePool primaryStoragePool, String hostPrivateIp,
|
||||||
|
boolean hostValidation) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getStorageNodeId() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Boolean checkingHeartBeat(HAStoragePool pool, HostTO host) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Boolean vmActivityCheck(HAStoragePool pool, HostTO host, Duration activityScriptTimeout, String volumeUUIDListString, String vmActivityCheckPath, long duration) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -54,9 +54,11 @@ import com.cloud.agent.api.to.StorageFilerTO;
|
|||||||
import com.cloud.host.Host;
|
import com.cloud.host.Host;
|
||||||
import com.cloud.storage.ResizeVolumePayload;
|
import com.cloud.storage.ResizeVolumePayload;
|
||||||
import com.cloud.storage.SnapshotVO;
|
import com.cloud.storage.SnapshotVO;
|
||||||
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
import com.cloud.storage.StorageManager;
|
import com.cloud.storage.StorageManager;
|
||||||
import com.cloud.storage.StoragePool;
|
import com.cloud.storage.StoragePool;
|
||||||
import com.cloud.storage.VMTemplateStoragePoolVO;
|
import com.cloud.storage.VMTemplateStoragePoolVO;
|
||||||
|
import com.cloud.storage.Volume;
|
||||||
import com.cloud.storage.VolumeDetailVO;
|
import com.cloud.storage.VolumeDetailVO;
|
||||||
import com.cloud.storage.VolumeVO;
|
import com.cloud.storage.VolumeVO;
|
||||||
import com.cloud.storage.dao.SnapshotDao;
|
import com.cloud.storage.dao.SnapshotDao;
|
||||||
@ -881,4 +883,13 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
|||||||
@Override
|
@Override
|
||||||
public void provideVmTags(long vmId, long volumeId, String tagValue) {
|
public void provideVmTags(long vmId, long volumeId, String tagValue) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isStorageSupportHA(StoragePoolType type) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void detachVolumeFromAllStorageNodes(Volume volume) {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -49,7 +49,9 @@ import com.cloud.agent.api.to.DataStoreTO;
|
|||||||
import com.cloud.agent.api.to.DataTO;
|
import com.cloud.agent.api.to.DataTO;
|
||||||
import com.cloud.host.Host;
|
import com.cloud.host.Host;
|
||||||
import com.cloud.storage.Storage;
|
import com.cloud.storage.Storage;
|
||||||
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
import com.cloud.storage.StoragePool;
|
import com.cloud.storage.StoragePool;
|
||||||
|
import com.cloud.storage.Volume;
|
||||||
import com.cloud.storage.VolumeVO;
|
import com.cloud.storage.VolumeVO;
|
||||||
import com.cloud.storage.dao.VolumeDao;
|
import com.cloud.storage.dao.VolumeDao;
|
||||||
import com.cloud.user.dao.AccountDao;
|
import com.cloud.user.dao.AccountDao;
|
||||||
@ -257,4 +259,13 @@ public class NexentaPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
@Override
|
@Override
|
||||||
public void provideVmTags(long vmId, long volumeId, String tagValue) {
|
public void provideVmTags(long vmId, long volumeId, String tagValue) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isStorageSupportHA(StoragePoolType type) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void detachVolumeFromAllStorageNodes(Volume volume) {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -44,7 +44,9 @@ import com.cloud.agent.api.Answer;
|
|||||||
import com.cloud.agent.api.to.DataStoreTO;
|
import com.cloud.agent.api.to.DataStoreTO;
|
||||||
import com.cloud.agent.api.to.DataTO;
|
import com.cloud.agent.api.to.DataTO;
|
||||||
import com.cloud.host.Host;
|
import com.cloud.host.Host;
|
||||||
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
import com.cloud.storage.StoragePool;
|
import com.cloud.storage.StoragePool;
|
||||||
|
import com.cloud.storage.Volume;
|
||||||
import com.cloud.storage.dao.StoragePoolHostDao;
|
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
@ -283,4 +285,13 @@ public class SamplePrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
|||||||
@Override
|
@Override
|
||||||
public void provideVmTags(long vmId, long volumeId, String tagValue) {
|
public void provideVmTags(long vmId, long volumeId, String tagValue) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isStorageSupportHA(StoragePoolType type) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void detachVolumeFromAllStorageNodes(Volume volume) {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -79,6 +79,7 @@ import com.cloud.storage.DataStoreRole;
|
|||||||
import com.cloud.storage.ResizeVolumePayload;
|
import com.cloud.storage.ResizeVolumePayload;
|
||||||
import com.cloud.storage.SnapshotVO;
|
import com.cloud.storage.SnapshotVO;
|
||||||
import com.cloud.storage.Storage;
|
import com.cloud.storage.Storage;
|
||||||
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
import com.cloud.storage.StorageManager;
|
import com.cloud.storage.StorageManager;
|
||||||
import com.cloud.storage.StoragePool;
|
import com.cloud.storage.StoragePool;
|
||||||
import com.cloud.storage.StoragePoolHostVO;
|
import com.cloud.storage.StoragePoolHostVO;
|
||||||
@ -1415,4 +1416,13 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isStorageSupportHA(StoragePoolType type) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void detachVolumeFromAllStorageNodes(Volume volume) {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1663,4 +1663,13 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
@Override
|
@Override
|
||||||
public void provideVmTags(long vmId, long volumeId, String tagValue) {
|
public void provideVmTags(long vmId, long volumeId, String tagValue) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isStorageSupportHA(StoragePoolType type) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void detachVolumeFromAllStorageNodes(Volume volume) {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -35,13 +35,15 @@ public class StorPoolModifyStoragePoolAnswer extends Answer{
|
|||||||
private String poolType;
|
private String poolType;
|
||||||
private List<ModifyStoragePoolAnswer> datastoreClusterChildren = new ArrayList<>();
|
private List<ModifyStoragePoolAnswer> datastoreClusterChildren = new ArrayList<>();
|
||||||
private String clusterId;
|
private String clusterId;
|
||||||
|
private String clientNodeId;
|
||||||
|
|
||||||
public StorPoolModifyStoragePoolAnswer(StorPoolModifyStoragePoolCommand cmd, long capacityBytes, long availableBytes, Map<String, TemplateProp> tInfo, String clusterId) {
|
public StorPoolModifyStoragePoolAnswer(StorPoolModifyStoragePoolCommand cmd, long capacityBytes, long availableBytes, Map<String, TemplateProp> tInfo, String clusterId, String clientNodeId) {
|
||||||
super(cmd);
|
super(cmd);
|
||||||
result = true;
|
result = true;
|
||||||
poolInfo = new StoragePoolInfo(null, cmd.getPool().getHost(), cmd.getPool().getPath(), cmd.getLocalPath(), cmd.getPool().getType(), capacityBytes, availableBytes);
|
poolInfo = new StoragePoolInfo(null, cmd.getPool().getHost(), cmd.getPool().getPath(), cmd.getLocalPath(), cmd.getPool().getType(), capacityBytes, availableBytes);
|
||||||
templateInfo = tInfo;
|
templateInfo = tInfo;
|
||||||
this.clusterId = clusterId;
|
this.clusterId = clusterId;
|
||||||
|
this.clientNodeId = clientNodeId;
|
||||||
}
|
}
|
||||||
|
|
||||||
public StorPoolModifyStoragePoolAnswer(String errMsg) {
|
public StorPoolModifyStoragePoolAnswer(String errMsg) {
|
||||||
@ -91,4 +93,12 @@ public class StorPoolModifyStoragePoolAnswer extends Answer{
|
|||||||
public String getClusterId() {
|
public String getClusterId() {
|
||||||
return clusterId;
|
return clusterId;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getClientNodeId() {
|
||||||
|
return clientNodeId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setClientNodeId(String clientNodeId) {
|
||||||
|
this.clientNodeId = clientNodeId;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -33,6 +33,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
|||||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
|
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
|
||||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
|
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
|
||||||
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
|
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
|
||||||
|
import com.cloud.hypervisor.kvm.storage.StorPoolStoragePool;
|
||||||
import com.cloud.resource.CommandWrapper;
|
import com.cloud.resource.CommandWrapper;
|
||||||
import com.cloud.resource.ResourceWrapper;
|
import com.cloud.resource.ResourceWrapper;
|
||||||
import com.cloud.storage.template.TemplateProp;
|
import com.cloud.storage.template.TemplateProp;
|
||||||
@ -47,7 +48,7 @@ public final class StorPoolModifyStorageCommandWrapper extends CommandWrapper<St
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Answer execute(final StorPoolModifyStoragePoolCommand command, final LibvirtComputingResource libvirtComputingResource) {
|
public Answer execute(final StorPoolModifyStoragePoolCommand command, final LibvirtComputingResource libvirtComputingResource) {
|
||||||
String clusterId = getSpClusterId();
|
String clusterId = StorPoolStoragePool.getStorPoolConfigParam("SP_CLUSTER_ID");
|
||||||
if (clusterId == null) {
|
if (clusterId == null) {
|
||||||
log.debug(String.format("Could not get StorPool cluster id for a command [%s]", command.getClass()));
|
log.debug(String.format("Could not get StorPool cluster id for a command [%s]", command.getClass()));
|
||||||
return new Answer(command, false, "spNotFound");
|
return new Answer(command, false, "spNotFound");
|
||||||
@ -66,40 +67,14 @@ public final class StorPoolModifyStorageCommandWrapper extends CommandWrapper<St
|
|||||||
return new Answer(command, false, String.format("Failed to create storage pool [%s]", command.getPool().getId()));
|
return new Answer(command, false, String.format("Failed to create storage pool [%s]", command.getPool().getId()));
|
||||||
}
|
}
|
||||||
|
|
||||||
final Map<String, TemplateProp> tInfo = new HashMap<String, TemplateProp>();
|
final Map<String, TemplateProp> tInfo = new HashMap<>();
|
||||||
final StorPoolModifyStoragePoolAnswer answer = new StorPoolModifyStoragePoolAnswer(command, storagepool.getCapacity(), storagepool.getAvailable(), tInfo, clusterId);
|
return new StorPoolModifyStoragePoolAnswer(command, storagepool.getCapacity(), storagepool.getAvailable(), tInfo, clusterId, storagepool.getStorageNodeId());
|
||||||
|
|
||||||
return answer;
|
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
log.debug(String.format("Could not modify storage due to %s", e.getMessage()));
|
log.debug(String.format("Could not modify storage due to %s", e.getMessage()));
|
||||||
return new Answer(command, e);
|
return new Answer(command, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private String getSpClusterId() {
|
|
||||||
Script sc = new Script("storpool_confget", 0, log);
|
|
||||||
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
|
|
||||||
|
|
||||||
String SP_CLUSTER_ID = null;
|
|
||||||
final String err = sc.execute(parser);
|
|
||||||
if (err != null) {
|
|
||||||
StorPoolStorageAdaptor.SP_LOG("Could not execute storpool_confget. Error: %s", err);
|
|
||||||
return SP_CLUSTER_ID;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (String line: parser.getLines().split("\n")) {
|
|
||||||
String[] toks = line.split("=");
|
|
||||||
if( toks.length != 2 ) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (toks[0].equals("SP_CLUSTER_ID")) {
|
|
||||||
SP_CLUSTER_ID = toks[1];
|
|
||||||
return SP_CLUSTER_ID;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return SP_CLUSTER_ID;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String attachOrDetachVolume(String command, String type, String volumeUuid) {
|
public String attachOrDetachVolume(String command, String type, String volumeUuid) {
|
||||||
final String name = StorPoolStorageAdaptor.getVolumeNameFromPath(volumeUuid, true);
|
final String name = StorPoolStorageAdaptor.getVolumeNameFromPath(volumeUuid, true);
|
||||||
if (name == null) {
|
if (name == null) {
|
||||||
@ -126,7 +101,11 @@ public final class StorPoolModifyStorageCommandWrapper extends CommandWrapper<St
|
|||||||
Set<Entry<String, JsonElement>> obj2 = new JsonParser().parse(res).getAsJsonObject().entrySet();
|
Set<Entry<String, JsonElement>> obj2 = new JsonParser().parse(res).getAsJsonObject().entrySet();
|
||||||
for (Entry<String, JsonElement> entry : obj2) {
|
for (Entry<String, JsonElement> entry : obj2) {
|
||||||
if (entry.getKey().equals("error")) {
|
if (entry.getKey().equals("error")) {
|
||||||
res = entry.getValue().getAsJsonObject().get("name").getAsString();
|
JsonElement errName = entry.getValue().getAsJsonObject().get("name");
|
||||||
|
if (errName != null) {
|
||||||
|
res = errName.getAsString();
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
|||||||
@ -18,13 +18,28 @@ package com.cloud.hypervisor.kvm.storage;
|
|||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Map.Entry;
|
||||||
|
|
||||||
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
import org.joda.time.Duration;
|
||||||
|
|
||||||
|
import com.cloud.agent.api.to.HostTO;
|
||||||
|
import com.cloud.agent.properties.AgentProperties;
|
||||||
|
import com.cloud.agent.properties.AgentPropertiesFileHandler;
|
||||||
|
import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool;
|
||||||
import com.cloud.storage.Storage;
|
import com.cloud.storage.Storage;
|
||||||
import com.cloud.storage.Storage.StoragePoolType;
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
|
import com.cloud.utils.script.OutputInterpreter;
|
||||||
|
import com.cloud.utils.script.Script;
|
||||||
|
import com.google.gson.JsonElement;
|
||||||
|
import com.google.gson.JsonIOException;
|
||||||
|
import com.google.gson.JsonObject;
|
||||||
|
import com.google.gson.JsonParser;
|
||||||
|
import com.google.gson.JsonSyntaxException;
|
||||||
|
|
||||||
public class StorPoolStoragePool implements KVMStoragePool {
|
public class StorPoolStoragePool implements KVMStoragePool {
|
||||||
|
private static final Logger log = Logger.getLogger(StorPoolStoragePool.class);
|
||||||
private String _uuid;
|
private String _uuid;
|
||||||
private String _sourceHost;
|
private String _sourceHost;
|
||||||
private int _sourcePort;
|
private int _sourcePort;
|
||||||
@ -34,6 +49,7 @@ public class StorPoolStoragePool implements KVMStoragePool {
|
|||||||
private String _authSecret;
|
private String _authSecret;
|
||||||
private String _sourceDir;
|
private String _sourceDir;
|
||||||
private String _localPath;
|
private String _localPath;
|
||||||
|
private String storageNodeId = getStorPoolConfigParam("SP_OURID");
|
||||||
|
|
||||||
public StorPoolStoragePool(String uuid, String host, int port, StoragePoolType storagePoolType, StorageAdaptor storageAdaptor) {
|
public StorPoolStoragePool(String uuid, String host, int port, StoragePoolType storagePoolType, StorageAdaptor storageAdaptor) {
|
||||||
_uuid = uuid;
|
_uuid = uuid;
|
||||||
@ -166,4 +182,123 @@ public class StorPoolStoragePool implements KVMStoragePool {
|
|||||||
public Map<String, String> getDetails() {
|
public Map<String, String> getDetails() {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isPoolSupportHA() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHearthBeatPath() {
|
||||||
|
String kvmScriptsDir = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_SCRIPTS_DIR);
|
||||||
|
return Script.findScript(kvmScriptsDir, "kvmspheartbeat.sh");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String createHeartBeatCommand(HAStoragePool primaryStoragePool, String hostPrivateIp, boolean hostValidation) {
|
||||||
|
boolean isStorageNodeUp = checkingHeartBeat(primaryStoragePool, null);
|
||||||
|
if (!isStorageNodeUp && !hostValidation) {
|
||||||
|
//restart the host
|
||||||
|
log.debug(String.format("The host [%s] will be restarted because the health check failed for the storage pool [%s]", hostPrivateIp, primaryStoragePool.getPool().getType()));
|
||||||
|
Script cmd = new Script(primaryStoragePool.getPool().getHearthBeatPath(), HeartBeatUpdateTimeout, log);
|
||||||
|
cmd.add("-c");
|
||||||
|
cmd.execute();
|
||||||
|
return "Down";
|
||||||
|
}
|
||||||
|
return isStorageNodeUp ? null : "Down";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getStorageNodeId() {
|
||||||
|
return storageNodeId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static final String getStorPoolConfigParam(String param) {
|
||||||
|
Script sc = new Script("storpool_confget", 0, Logger.getLogger(StorPoolStoragePool.class));
|
||||||
|
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
|
||||||
|
|
||||||
|
String configParam = null;
|
||||||
|
final String err = sc.execute(parser);
|
||||||
|
if (err != null) {
|
||||||
|
StorPoolStorageAdaptor.SP_LOG("Could not execute storpool_confget. Error: %s", err);
|
||||||
|
return configParam;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (String line: parser.getLines().split("\n")) {
|
||||||
|
String[] toks = line.split("=");
|
||||||
|
if( toks.length != 2 ) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (toks[0].equals(param)) {
|
||||||
|
configParam = toks[1];
|
||||||
|
return configParam;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return configParam;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Boolean checkingHeartBeat(HAStoragePool pool, HostTO host) {
|
||||||
|
boolean isNodeWorking = false;
|
||||||
|
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
|
||||||
|
|
||||||
|
String res = executeStorPoolServiceListCmd(parser);
|
||||||
|
|
||||||
|
if (res != null) {
|
||||||
|
return isNodeWorking;
|
||||||
|
}
|
||||||
|
String response = parser.getLines();
|
||||||
|
|
||||||
|
Integer hostStorageNodeId = null;
|
||||||
|
if (host == null) {
|
||||||
|
hostStorageNodeId = Integer.parseInt(storageNodeId);
|
||||||
|
} else {
|
||||||
|
hostStorageNodeId = host.getParent() != null ? Integer.parseInt(host.getParent()) : null;
|
||||||
|
}
|
||||||
|
if (hostStorageNodeId == null) {
|
||||||
|
return isNodeWorking;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
isNodeWorking = checkIfNodeIsRunning(response, hostStorageNodeId);
|
||||||
|
} catch (JsonIOException | JsonSyntaxException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
return isNodeWorking;
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean checkIfNodeIsRunning(String response, Integer hostStorageNodeId) {
|
||||||
|
boolean isNodeWorking = false;
|
||||||
|
JsonParser jsonParser = new JsonParser();
|
||||||
|
JsonObject stats = (JsonObject) jsonParser.parse(response);
|
||||||
|
JsonObject data = stats.getAsJsonObject("data");
|
||||||
|
if (data != null) {
|
||||||
|
JsonObject clients = data.getAsJsonObject("clients");
|
||||||
|
for (Entry<String, JsonElement> element : clients.entrySet()) {
|
||||||
|
String storageNodeStatus = element.getValue().getAsJsonObject().get("status").getAsString();
|
||||||
|
int nodeId = element.getValue().getAsJsonObject().get("nodeId").getAsInt();
|
||||||
|
if (hostStorageNodeId == nodeId) {
|
||||||
|
if (storageNodeStatus.equals("running")) {
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return isNodeWorking;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return isNodeWorking;
|
||||||
|
}
|
||||||
|
|
||||||
|
private String executeStorPoolServiceListCmd(OutputInterpreter.AllLinesParser parser) {
|
||||||
|
Script sc = new Script("storpool", 0, log);
|
||||||
|
sc.add("-j");
|
||||||
|
sc.add("service");
|
||||||
|
sc.add("list");
|
||||||
|
String res = sc.execute(parser);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Boolean vmActivityCheck(HAStoragePool pool, HostTO host, Duration activityScriptTimeout, String volumeUuidListString, String vmActivityCheckPath, long duration) {
|
||||||
|
return checkingHeartBeat(pool, host);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -92,6 +92,7 @@ import com.cloud.storage.StorageManager;
|
|||||||
import com.cloud.storage.StoragePool;
|
import com.cloud.storage.StoragePool;
|
||||||
import com.cloud.storage.VMTemplateDetailVO;
|
import com.cloud.storage.VMTemplateDetailVO;
|
||||||
import com.cloud.storage.VMTemplateStoragePoolVO;
|
import com.cloud.storage.VMTemplateStoragePoolVO;
|
||||||
|
import com.cloud.storage.Volume;
|
||||||
import com.cloud.storage.VolumeDetailVO;
|
import com.cloud.storage.VolumeDetailVO;
|
||||||
import com.cloud.storage.VolumeVO;
|
import com.cloud.storage.VolumeVO;
|
||||||
import com.cloud.storage.dao.SnapshotDetailsDao;
|
import com.cloud.storage.dao.SnapshotDetailsDao;
|
||||||
@ -1164,4 +1165,20 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isStorageSupportHA(StoragePoolType type) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void detachVolumeFromAllStorageNodes(Volume volume) {
|
||||||
|
StoragePoolVO poolVO = primaryStoreDao.findById(volume.getPoolId());
|
||||||
|
if (poolVO != null) {
|
||||||
|
SpConnectionDesc conn = StorPoolUtil.getSpConnection(poolVO.getUuid(), poolVO.getId(), storagePoolDetailsDao, primaryStoreDao);
|
||||||
|
String volName = StorPoolStorageAdaptor.getVolumeNameFromPath(volume.getPath(), true);
|
||||||
|
SpApiResponse resp = StorPoolUtil.detachAllForced(volName, false, conn);
|
||||||
|
StorPoolUtil.spLog("The volume [%s] is detach from all clusters [%s]", volName, resp);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -37,6 +37,8 @@ import org.apache.cloudstack.storage.datastore.util.StorPoolHelper;
|
|||||||
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
|
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
|
||||||
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse;
|
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse;
|
||||||
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc;
|
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc;
|
||||||
|
import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager;
|
||||||
|
import org.apache.commons.collections4.CollectionUtils;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
@ -47,6 +49,7 @@ import com.cloud.agent.api.storage.StorPoolModifyStoragePoolCommand;
|
|||||||
import com.cloud.agent.manager.AgentAttache;
|
import com.cloud.agent.manager.AgentAttache;
|
||||||
import com.cloud.alert.AlertManager;
|
import com.cloud.alert.AlertManager;
|
||||||
import com.cloud.dc.ClusterDetailsDao;
|
import com.cloud.dc.ClusterDetailsDao;
|
||||||
|
import com.cloud.dc.ClusterDetailsVO;
|
||||||
import com.cloud.dc.dao.ClusterDao;
|
import com.cloud.dc.dao.ClusterDao;
|
||||||
import com.cloud.exception.StorageConflictException;
|
import com.cloud.exception.StorageConflictException;
|
||||||
import com.cloud.host.HostVO;
|
import com.cloud.host.HostVO;
|
||||||
@ -162,6 +165,11 @@ public class StorPoolHostListener implements HypervisorHostListener {
|
|||||||
poolHost.setLocalPath(mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/"));
|
poolHost.setLocalPath(mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (host.getParent() == null && mspAnswer.getClientNodeId() != null) {
|
||||||
|
host.setParent(mspAnswer.getClientNodeId());
|
||||||
|
hostDao.update(host.getId(), host);
|
||||||
|
}
|
||||||
|
|
||||||
StorPoolHelper.setSpClusterIdIfNeeded(hostId, mspAnswer.getClusterId(), clusterDao, hostDao, clusterDetailsDao);
|
StorPoolHelper.setSpClusterIdIfNeeded(hostId, mspAnswer.getClusterId(), clusterDao, hostDao, clusterDetailsDao);
|
||||||
|
|
||||||
StorPoolUtil.spLog("Connection established between storage pool [%s] and host [%s]", poolVO.getName(), host.getName());
|
StorPoolUtil.spLog("Connection established between storage pool [%s] and host [%s]", poolVO.getName(), host.getName());
|
||||||
@ -214,10 +222,23 @@ public class StorPoolHostListener implements HypervisorHostListener {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean hostRemoved(long hostId, long clusterId) {
|
public synchronized boolean hostRemoved(long hostId, long clusterId) {
|
||||||
|
List<HostVO> hosts = hostDao.findByClusterId(clusterId);
|
||||||
|
if (CollectionUtils.isNotEmpty(hosts) && hosts.size() == 1) {
|
||||||
|
removeSPClusterIdWhenTheLastHostIsRemoved(clusterId);
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void removeSPClusterIdWhenTheLastHostIsRemoved(long clusterId) {
|
||||||
|
ClusterDetailsVO clusterDetailsVo = clusterDetailsDao.findDetail(clusterId,
|
||||||
|
StorPoolConfigurationManager.StorPoolClusterId.key());
|
||||||
|
if (clusterDetailsVo != null && (clusterDetailsVo.getValue() != null && !clusterDetailsVo.getValue().equals(StorPoolConfigurationManager.StorPoolClusterId.defaultValue())) ){
|
||||||
|
clusterDetailsVo.setValue(StorPoolConfigurationManager.StorPoolClusterId.defaultValue());
|
||||||
|
clusterDetailsDao.update(clusterDetailsVo.getId(), clusterDetailsVo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//workaround: we need this "hack" to add our command StorPoolModifyStoragePoolCommand in AgentAttache.s_commandsAllowedInMaintenanceMode
|
//workaround: we need this "hack" to add our command StorPoolModifyStoragePoolCommand in AgentAttache.s_commandsAllowedInMaintenanceMode
|
||||||
//which checks the allowed commands when the host is in maintenance mode
|
//which checks the allowed commands when the host is in maintenance mode
|
||||||
private void addModifyCommandToCommandsAllowedInMaintenanceMode() {
|
private void addModifyCommandToCommandsAllowedInMaintenanceMode() {
|
||||||
|
|||||||
66
scripts/vm/hypervisor/kvm/kvmspheartbeat.sh
Executable file
66
scripts/vm/hypervisor/kvm/kvmspheartbeat.sh
Executable file
@ -0,0 +1,66 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
help() {
|
||||||
|
printf "Usage: $0
|
||||||
|
-c cleanup"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
#set -x
|
||||||
|
cflag=0
|
||||||
|
|
||||||
|
while getopts 'c' OPTION
|
||||||
|
do
|
||||||
|
case $OPTION in
|
||||||
|
c)
|
||||||
|
cflag=1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
help
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
|
||||||
|
#delete VMs on this mountpoint
|
||||||
|
deleteVMs() {
|
||||||
|
vmPids=$(ps aux| grep qemu | grep 'storpool-byid' | awk '{print $2}' 2> /dev/null)
|
||||||
|
if [ $? -gt 0 ]
|
||||||
|
then
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$vmPids" ]
|
||||||
|
then
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
for pid in $vmPids
|
||||||
|
do
|
||||||
|
kill -9 $pid &> /dev/null
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ "$cflag" == "1" ]
|
||||||
|
then
|
||||||
|
/usr/bin/logger -t heartbeat "kvmspheartbeat.sh will reboot system because it was unable to write the heartbeat to the storage."
|
||||||
|
sync &
|
||||||
|
sleep 5
|
||||||
|
echo b > /proc/sysrq-trigger
|
||||||
|
exit $?
|
||||||
|
fi
|
||||||
@ -29,6 +29,10 @@ import javax.inject.Inject;
|
|||||||
import javax.naming.ConfigurationException;
|
import javax.naming.ConfigurationException;
|
||||||
|
|
||||||
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
|
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.Configurable;
|
import org.apache.cloudstack.framework.config.Configurable;
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
@ -67,8 +71,11 @@ import com.cloud.server.ManagementServer;
|
|||||||
import com.cloud.service.ServiceOfferingVO;
|
import com.cloud.service.ServiceOfferingVO;
|
||||||
import com.cloud.service.dao.ServiceOfferingDao;
|
import com.cloud.service.dao.ServiceOfferingDao;
|
||||||
import com.cloud.storage.StorageManager;
|
import com.cloud.storage.StorageManager;
|
||||||
|
import com.cloud.storage.VolumeVO;
|
||||||
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
import com.cloud.storage.dao.GuestOSCategoryDao;
|
import com.cloud.storage.dao.GuestOSCategoryDao;
|
||||||
import com.cloud.storage.dao.GuestOSDao;
|
import com.cloud.storage.dao.GuestOSDao;
|
||||||
|
import com.cloud.storage.dao.VolumeDao;
|
||||||
import com.cloud.storage.secondary.SecondaryStorageVmManager;
|
import com.cloud.storage.secondary.SecondaryStorageVmManager;
|
||||||
import com.cloud.user.AccountManager;
|
import com.cloud.user.AccountManager;
|
||||||
import com.cloud.utils.component.ManagerBase;
|
import com.cloud.utils.component.ManagerBase;
|
||||||
@ -133,6 +140,10 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur
|
|||||||
private ConsoleProxyManager consoleProxyManager;
|
private ConsoleProxyManager consoleProxyManager;
|
||||||
@Inject
|
@Inject
|
||||||
private SecondaryStorageVmManager secondaryStorageVmManager;
|
private SecondaryStorageVmManager secondaryStorageVmManager;
|
||||||
|
@Inject
|
||||||
|
VolumeDao volumeDao;
|
||||||
|
@Inject
|
||||||
|
DataStoreProviderManager dataStoreProviderMgr;
|
||||||
|
|
||||||
long _serverId;
|
long _serverId;
|
||||||
|
|
||||||
@ -314,6 +325,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected void wakeupWorkers() {
|
protected void wakeupWorkers() {
|
||||||
|
s_logger.debug("Wakeup workers HA");
|
||||||
for (WorkerThread worker : _workers) {
|
for (WorkerThread worker : _workers) {
|
||||||
worker.wakup();
|
worker.wakup();
|
||||||
}
|
}
|
||||||
@ -332,6 +344,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void scheduleRestart(VMInstanceVO vm, boolean investigate) {
|
public void scheduleRestart(VMInstanceVO vm, boolean investigate) {
|
||||||
|
s_logger.debug("HA schedule restart");
|
||||||
Long hostId = vm.getHostId();
|
Long hostId = vm.getHostId();
|
||||||
if (hostId == null) {
|
if (hostId == null) {
|
||||||
try {
|
try {
|
||||||
@ -425,6 +438,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected Long restart(final HaWorkVO work) {
|
protected Long restart(final HaWorkVO work) {
|
||||||
|
s_logger.debug("RESTART with HAWORK");
|
||||||
List<HaWorkVO> items = _haDao.listFutureHaWorkForVm(work.getInstanceId(), work.getId());
|
List<HaWorkVO> items = _haDao.listFutureHaWorkForVm(work.getInstanceId(), work.getId());
|
||||||
if (items.size() > 0) {
|
if (items.size() > 0) {
|
||||||
StringBuilder str = new StringBuilder("Cancelling this work item because newer ones have been scheduled. Work Ids = [");
|
StringBuilder str = new StringBuilder("Cancelling this work item because newer ones have been scheduled. Work Ids = [");
|
||||||
@ -599,6 +613,20 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur
|
|||||||
}
|
}
|
||||||
|
|
||||||
try{
|
try{
|
||||||
|
if (HypervisorType.KVM == host.getHypervisorType()) {
|
||||||
|
List<VolumeVO> volumes = volumeDao.findByInstance(vmId);
|
||||||
|
for (VolumeVO volumeVO : volumes) {
|
||||||
|
//detach the volumes from all clusters before starting the VM on another host.
|
||||||
|
if (volumeVO.getPoolType() == StoragePoolType.StorPool) {
|
||||||
|
DataStoreProvider storeProvider = dataStoreProviderMgr.getDataStoreProvider(volumeVO.getPoolType().name());
|
||||||
|
DataStoreDriver storeDriver = storeProvider.getDataStoreDriver();
|
||||||
|
if (storeDriver instanceof PrimaryDataStoreDriver) {
|
||||||
|
PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver;
|
||||||
|
primaryStoreDriver.detachVolumeFromAllStorageNodes(volumeVO);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
// First try starting the vm with its original planner, if it doesn't succeed send HAPlanner as its an emergency.
|
// First try starting the vm with its original planner, if it doesn't succeed send HAPlanner as its an emergency.
|
||||||
_itMgr.advanceStart(vm.getUuid(), params, null);
|
_itMgr.advanceStart(vm.getUuid(), params, null);
|
||||||
}catch (InsufficientCapacityException e){
|
}catch (InsufficientCapacityException e){
|
||||||
@ -1064,6 +1092,6 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur
|
|||||||
public ConfigKey<?>[] getConfigKeys() {
|
public ConfigKey<?>[] getConfigKeys() {
|
||||||
return new ConfigKey[] {TimeBetweenCleanup, MigrationMaxRetries, TimeToSleep, TimeBetweenFailures,
|
return new ConfigKey[] {TimeBetweenCleanup, MigrationMaxRetries, TimeToSleep, TimeBetweenFailures,
|
||||||
StopRetryInterval, RestartRetryInterval, MigrateRetryInterval, InvestigateRetryInterval,
|
StopRetryInterval, RestartRetryInterval, MigrateRetryInterval, InvestigateRetryInterval,
|
||||||
HAWorkers, ForceHA};
|
HAWorkers, ForceHA, KvmHAFenceHostIfHeartbeatFailsOnStorage};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -82,6 +82,7 @@ public class KVMFencer extends AdapterBase implements FenceBuilder {
|
|||||||
|
|
||||||
List<HostVO> hosts = _resourceMgr.listAllHostsInCluster(host.getClusterId());
|
List<HostVO> hosts = _resourceMgr.listAllHostsInCluster(host.getClusterId());
|
||||||
FenceCommand fence = new FenceCommand(vm, host);
|
FenceCommand fence = new FenceCommand(vm, host);
|
||||||
|
fence.setReportCheckFailureIfOneStorageIsDown(HighAvailabilityManager.KvmHAFenceHostIfHeartbeatFailsOnStorage.value());
|
||||||
|
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for (HostVO h : hosts) {
|
for (HostVO h : hosts) {
|
||||||
|
|||||||
@ -16,6 +16,33 @@
|
|||||||
// under the License.
|
// under the License.
|
||||||
package com.cloud.ha;
|
package com.cloud.ha;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertNotNull;
|
||||||
|
import static org.junit.Assert.assertNull;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
import java.lang.reflect.Array;
|
||||||
|
import java.lang.reflect.Field;
|
||||||
|
import java.lang.reflect.InvocationTargetException;
|
||||||
|
import java.lang.reflect.Method;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import javax.inject.Inject;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
|
||||||
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
|
import org.apache.cloudstack.managed.context.ManagedContext;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.runner.RunWith;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
import org.mockito.runners.MockitoJUnitRunner;
|
||||||
|
|
||||||
import com.cloud.agent.AgentManager;
|
import com.cloud.agent.AgentManager;
|
||||||
import com.cloud.alert.AlertManager;
|
import com.cloud.alert.AlertManager;
|
||||||
import com.cloud.consoleproxy.ConsoleProxyManager;
|
import com.cloud.consoleproxy.ConsoleProxyManager;
|
||||||
@ -39,36 +66,13 @@ import com.cloud.service.dao.ServiceOfferingDao;
|
|||||||
import com.cloud.storage.StorageManager;
|
import com.cloud.storage.StorageManager;
|
||||||
import com.cloud.storage.dao.GuestOSCategoryDao;
|
import com.cloud.storage.dao.GuestOSCategoryDao;
|
||||||
import com.cloud.storage.dao.GuestOSDao;
|
import com.cloud.storage.dao.GuestOSDao;
|
||||||
|
import com.cloud.storage.dao.VolumeDao;
|
||||||
import com.cloud.storage.secondary.SecondaryStorageVmManager;
|
import com.cloud.storage.secondary.SecondaryStorageVmManager;
|
||||||
import com.cloud.user.AccountManager;
|
import com.cloud.user.AccountManager;
|
||||||
import com.cloud.vm.VMInstanceVO;
|
import com.cloud.vm.VMInstanceVO;
|
||||||
import com.cloud.vm.VirtualMachine;
|
import com.cloud.vm.VirtualMachine;
|
||||||
import com.cloud.vm.VirtualMachineManager;
|
import com.cloud.vm.VirtualMachineManager;
|
||||||
import com.cloud.vm.dao.VMInstanceDao;
|
import com.cloud.vm.dao.VMInstanceDao;
|
||||||
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
|
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
|
||||||
import org.apache.cloudstack.managed.context.ManagedContext;
|
|
||||||
import org.apache.log4j.Logger;
|
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.BeforeClass;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.junit.runner.RunWith;
|
|
||||||
import org.mockito.Mock;
|
|
||||||
import org.mockito.Mockito;
|
|
||||||
import org.mockito.runners.MockitoJUnitRunner;
|
|
||||||
|
|
||||||
import javax.inject.Inject;
|
|
||||||
import java.lang.reflect.Array;
|
|
||||||
import java.lang.reflect.Field;
|
|
||||||
import java.lang.reflect.InvocationTargetException;
|
|
||||||
import java.lang.reflect.Method;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import static org.junit.Assert.assertNotNull;
|
|
||||||
import static org.junit.Assert.assertNull;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
|
|
||||||
@RunWith(MockitoJUnitRunner.class)
|
@RunWith(MockitoJUnitRunner.class)
|
||||||
public class HighAvailabilityManagerImplTest {
|
public class HighAvailabilityManagerImplTest {
|
||||||
@ -117,6 +121,10 @@ public class HighAvailabilityManagerImplTest {
|
|||||||
SecondaryStorageVmManager secondaryStorageVmManager;
|
SecondaryStorageVmManager secondaryStorageVmManager;
|
||||||
@Mock
|
@Mock
|
||||||
HostVO hostVO;
|
HostVO hostVO;
|
||||||
|
@Mock
|
||||||
|
VolumeDao volumeDao;
|
||||||
|
@Mock
|
||||||
|
DataStoreProviderManager dataStoreProviderMgr;
|
||||||
|
|
||||||
HighAvailabilityManagerImpl highAvailabilityManager;
|
HighAvailabilityManagerImpl highAvailabilityManager;
|
||||||
HighAvailabilityManagerImpl highAvailabilityManagerSpy;
|
HighAvailabilityManagerImpl highAvailabilityManagerSpy;
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user