mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
CLOUDSTACK-8813: Notify listeners when a host has been added to a cluster, is about to be removed from a cluster, or has been removed from a cluster
This commit is contained in:
parent
bee2bdc299
commit
dad9e5d868
@ -19,10 +19,10 @@
|
|||||||
|
|
||||||
package com.cloud.agent.api;
|
package com.cloud.agent.api;
|
||||||
|
|
||||||
import com.cloud.storage.StoragePool;
|
|
||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
import com.cloud.storage.StoragePool;
|
||||||
|
|
||||||
public class CreateStoragePoolCommand extends ModifyStoragePoolCommand {
|
public class CreateStoragePoolCommand extends ModifyStoragePoolCommand {
|
||||||
public static final String DATASTORE_NAME = "datastoreName";
|
public static final String DATASTORE_NAME = "datastoreName";
|
||||||
public static final String IQN = "iqn";
|
public static final String IQN = "iqn";
|
||||||
@ -32,9 +32,6 @@ public class CreateStoragePoolCommand extends ModifyStoragePoolCommand {
|
|||||||
private boolean _createDatastore;
|
private boolean _createDatastore;
|
||||||
private Map<String, String> _details;
|
private Map<String, String> _details;
|
||||||
|
|
||||||
public CreateStoragePoolCommand() {
|
|
||||||
}
|
|
||||||
|
|
||||||
public CreateStoragePoolCommand(boolean add, StoragePool pool) {
|
public CreateStoragePoolCommand(boolean add, StoragePool pool) {
|
||||||
super(add, pool);
|
super(add, pool);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -24,44 +24,41 @@ import java.util.Map;
|
|||||||
import com.cloud.storage.template.TemplateProp;
|
import com.cloud.storage.template.TemplateProp;
|
||||||
|
|
||||||
public class ModifyStoragePoolAnswer extends Answer {
|
public class ModifyStoragePoolAnswer extends Answer {
|
||||||
StoragePoolInfo poolInfo;
|
private StoragePoolInfo _poolInfo;
|
||||||
Map<String, TemplateProp> templateInfo;
|
private Map<String, TemplateProp> _templateInfo;
|
||||||
String localDatastoreName = null;
|
private String _localDatastoreName;
|
||||||
|
|
||||||
protected ModifyStoragePoolAnswer() {
|
|
||||||
}
|
|
||||||
|
|
||||||
public ModifyStoragePoolAnswer(ModifyStoragePoolCommand cmd, long capacityBytes, long availableBytes, Map<String, TemplateProp> tInfo) {
|
public ModifyStoragePoolAnswer(ModifyStoragePoolCommand cmd, long capacityBytes, long availableBytes, Map<String, TemplateProp> tInfo) {
|
||||||
super(cmd);
|
super(cmd);
|
||||||
this.result = true;
|
|
||||||
this.poolInfo =
|
|
||||||
new StoragePoolInfo(null, cmd.getPool().getHost(), cmd.getPool().getPath(), cmd.getLocalPath(), cmd.getPool().getType(), capacityBytes, availableBytes);
|
|
||||||
|
|
||||||
this.templateInfo = tInfo;
|
result = true;
|
||||||
}
|
|
||||||
|
|
||||||
public StoragePoolInfo getPoolInfo() {
|
_poolInfo = new StoragePoolInfo(null, cmd.getPool().getHost(), cmd.getPool().getPath(), cmd.getLocalPath(), cmd.getPool().getType(), capacityBytes, availableBytes);
|
||||||
return poolInfo;
|
|
||||||
|
_templateInfo = tInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setPoolInfo(StoragePoolInfo poolInfo) {
|
public void setPoolInfo(StoragePoolInfo poolInfo) {
|
||||||
this.poolInfo = poolInfo;
|
_poolInfo = poolInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Map<String, TemplateProp> getTemplateInfo() {
|
public StoragePoolInfo getPoolInfo() {
|
||||||
return templateInfo;
|
return _poolInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setTemplateInfo(Map<String, TemplateProp> templateInfo) {
|
public void setTemplateInfo(Map<String, TemplateProp> templateInfo) {
|
||||||
this.templateInfo = templateInfo;
|
_templateInfo = templateInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getLocalDatastoreName() {
|
public Map<String, TemplateProp> getTemplateInfo() {
|
||||||
return localDatastoreName;
|
return _templateInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setLocalDatastoreName(String localDatastoreName) {
|
public void setLocalDatastoreName(String localDatastoreName) {
|
||||||
this.localDatastoreName = localDatastoreName;
|
_localDatastoreName = localDatastoreName;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getLocalDatastoreName() {
|
||||||
|
return _localDatastoreName;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -26,51 +26,49 @@ import com.cloud.agent.api.to.StorageFilerTO;
|
|||||||
import com.cloud.storage.StoragePool;
|
import com.cloud.storage.StoragePool;
|
||||||
|
|
||||||
public class ModifyStoragePoolCommand extends Command {
|
public class ModifyStoragePoolCommand extends Command {
|
||||||
|
|
||||||
boolean add;
|
|
||||||
StorageFilerTO pool;
|
|
||||||
String localPath;
|
|
||||||
String[] options;
|
|
||||||
public static final String LOCAL_PATH_PREFIX = "/mnt/";
|
public static final String LOCAL_PATH_PREFIX = "/mnt/";
|
||||||
|
|
||||||
public ModifyStoragePoolCommand() {
|
private boolean _add;
|
||||||
|
private StorageFilerTO _pool;
|
||||||
}
|
private String _localPath;
|
||||||
|
private String _storagePath;
|
||||||
|
|
||||||
public ModifyStoragePoolCommand(boolean add, StoragePool pool, String localPath) {
|
public ModifyStoragePoolCommand(boolean add, StoragePool pool, String localPath) {
|
||||||
this.add = add;
|
_add = add;
|
||||||
this.pool = new StorageFilerTO(pool);
|
_pool = new StorageFilerTO(pool);
|
||||||
this.localPath = localPath;
|
_localPath = localPath;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public ModifyStoragePoolCommand(boolean add, StoragePool pool) {
|
public ModifyStoragePoolCommand(boolean add, StoragePool pool) {
|
||||||
this(add, pool, LOCAL_PATH_PREFIX + File.separator + UUID.nameUUIDFromBytes((pool.getHostAddress() + pool.getPath()).getBytes()));
|
this(add, pool, LOCAL_PATH_PREFIX + File.separator + UUID.nameUUIDFromBytes((pool.getHostAddress() + pool.getPath()).getBytes()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public StorageFilerTO getPool() {
|
public boolean getAdd() {
|
||||||
return pool;
|
return _add;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setPool(StoragePool pool) {
|
public void setPool(StoragePool pool) {
|
||||||
this.pool = new StorageFilerTO(pool);
|
_pool = new StorageFilerTO(pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean getAdd() {
|
public StorageFilerTO getPool() {
|
||||||
return add;
|
return _pool;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getLocalPath() {
|
||||||
|
return _localPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setStoragePath(String storagePath) {
|
||||||
|
_storagePath = storagePath;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getStoragePath() {
|
||||||
|
return _storagePath;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean executeInSequence() {
|
public boolean executeInSequence() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getLocalPath() {
|
|
||||||
return localPath;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setOptions(String[] options) {
|
|
||||||
this.options = options;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
23
core/src/com/cloud/agent/api/ModifyTargetsAnswer.java
Normal file
23
core/src/com/cloud/agent/api/ModifyTargetsAnswer.java
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
//
|
||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package com.cloud.agent.api;
|
||||||
|
|
||||||
|
public class ModifyTargetsAnswer extends Answer {
|
||||||
|
}
|
||||||
57
core/src/com/cloud/agent/api/ModifyTargetsCommand.java
Normal file
57
core/src/com/cloud/agent/api/ModifyTargetsCommand.java
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
//
|
||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package com.cloud.agent.api;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
public class ModifyTargetsCommand extends Command {
|
||||||
|
public static final String IQN = "iqn";
|
||||||
|
public static final String STORAGE_HOST = "storageHost";
|
||||||
|
public static final String STORAGE_PORT = "storagePort";
|
||||||
|
public static final String CHAP_NAME = "chapName";
|
||||||
|
public static final String CHAP_SECRET = "chapSecret";
|
||||||
|
public static final String MUTUAL_CHAP_NAME = "mutualChapName";
|
||||||
|
public static final String MUTUAL_CHAP_SECRET = "mutualChapSecret";
|
||||||
|
|
||||||
|
private boolean _add;
|
||||||
|
private List<Map<String, String>> _targets;
|
||||||
|
|
||||||
|
public void setAdd(boolean add) {
|
||||||
|
_add = add;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean getAdd() {
|
||||||
|
return _add;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setTargets(List<Map<String, String>> targets) {
|
||||||
|
_targets = targets;
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<Map<String, String>> getTargets() {
|
||||||
|
return _targets;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean executeInSequence() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -18,6 +18,8 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cloudstack.engine.subsystem.api.storage;
|
package org.apache.cloudstack.engine.subsystem.api.storage;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
import com.cloud.storage.DataStoreProviderApiService;
|
import com.cloud.storage.DataStoreProviderApiService;
|
||||||
import com.cloud.utils.component.Manager;
|
import com.cloud.utils.component.Manager;
|
||||||
|
|
||||||
@ -29,4 +31,6 @@ public interface DataStoreProviderManager extends Manager, DataStoreProviderApiS
|
|||||||
DataStoreProvider getDefaultImageDataStoreProvider();
|
DataStoreProvider getDefaultImageDataStoreProvider();
|
||||||
|
|
||||||
DataStoreProvider getDefaultCacheDataStoreProvider();
|
DataStoreProvider getDefaultCacheDataStoreProvider();
|
||||||
|
|
||||||
|
List<DataStoreProvider> getProviders();
|
||||||
}
|
}
|
||||||
|
|||||||
@ -21,7 +21,13 @@ package org.apache.cloudstack.engine.subsystem.api.storage;
|
|||||||
import com.cloud.exception.StorageConflictException;
|
import com.cloud.exception.StorageConflictException;
|
||||||
|
|
||||||
public interface HypervisorHostListener {
|
public interface HypervisorHostListener {
|
||||||
|
boolean hostAdded(long hostId);
|
||||||
|
|
||||||
boolean hostConnect(long hostId, long poolId) throws StorageConflictException;
|
boolean hostConnect(long hostId, long poolId) throws StorageConflictException;
|
||||||
|
|
||||||
boolean hostDisconnected(long hostId, long poolId);
|
boolean hostDisconnected(long hostId, long poolId);
|
||||||
|
|
||||||
|
boolean hostAboutToBeRemoved(long hostId);
|
||||||
|
|
||||||
|
boolean hostRemoved(long hostId, long clusterId);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -26,27 +26,27 @@ import com.cloud.storage.StoragePool;
|
|||||||
import com.cloud.storage.Volume;
|
import com.cloud.storage.Volume;
|
||||||
|
|
||||||
public interface PrimaryDataStoreDriver extends DataStoreDriver {
|
public interface PrimaryDataStoreDriver extends DataStoreDriver {
|
||||||
public ChapInfo getChapInfo(VolumeInfo volumeInfo);
|
ChapInfo getChapInfo(VolumeInfo volumeInfo);
|
||||||
|
|
||||||
public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore);
|
boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore);
|
||||||
|
|
||||||
public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore);
|
void revokeAccess(DataObject dataObject, Host host, DataStore dataStore);
|
||||||
|
|
||||||
// intended for managed storage (cloud.storage_pool.managed = true)
|
// intended for managed storage (cloud.storage_pool.managed = true)
|
||||||
// if not managed, return volume.getSize()
|
// if not managed, return volume.getSize()
|
||||||
public long getVolumeSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool storagePool);
|
long getVolumeSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool storagePool);
|
||||||
|
|
||||||
// intended for managed storage (cloud.storage_pool.managed = true)
|
// intended for managed storage (cloud.storage_pool.managed = true)
|
||||||
// if managed storage, return the total number of bytes currently in use for the storage pool in question
|
// if managed storage, return the total number of bytes currently in use for the storage pool in question
|
||||||
// if not managed storage, return 0
|
// if not managed storage, return 0
|
||||||
public long getUsedBytes(StoragePool storagePool);
|
long getUsedBytes(StoragePool storagePool);
|
||||||
|
|
||||||
// intended for managed storage (cloud.storage_pool.managed = true)
|
// intended for managed storage (cloud.storage_pool.managed = true)
|
||||||
// if managed storage, return the total number of IOPS currently in use for the storage pool in question
|
// if managed storage, return the total number of IOPS currently in use for the storage pool in question
|
||||||
// if not managed storage, return 0
|
// if not managed storage, return 0
|
||||||
public long getUsedIops(StoragePool storagePool);
|
long getUsedIops(StoragePool storagePool);
|
||||||
|
|
||||||
public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CreateCmdResult> callback);
|
void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CreateCmdResult> callback);
|
||||||
|
|
||||||
public void revertSnapshot(SnapshotInfo snapshotOnImageStore, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback<CommandResult> callback);
|
void revertSnapshot(SnapshotInfo snapshotOnImageStore, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback<CommandResult> callback);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -42,7 +42,7 @@ public interface AgentManager {
|
|||||||
Add, Del, Contains,
|
Add, Del, Contains,
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean handleDirectConnectAgent(Host host, StartupCommand[] cmds, ServerResource resource, boolean forRebalance) throws ConnectionException;
|
boolean handleDirectConnectAgent(Host host, StartupCommand[] cmds, ServerResource resource, boolean forRebalance, boolean newHost) throws ConnectionException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* easy send method that returns null if there's any errors. It handles all exceptions.
|
* easy send method that returns null if there's any errors. It handles all exceptions.
|
||||||
@ -131,8 +131,6 @@ public interface AgentManager {
|
|||||||
|
|
||||||
Answer sendTo(Long dcId, HypervisorType type, Command cmd);
|
Answer sendTo(Long dcId, HypervisorType type, Command cmd);
|
||||||
|
|
||||||
// public AgentAttache handleDirectConnectAgent(HostVO host, StartupCommand[] cmds, ServerResource resource, boolean forRebalance) throws ConnectionException;
|
|
||||||
|
|
||||||
public boolean agentStatusTransitTo(HostVO host, Status.Event e, long msId);
|
public boolean agentStatusTransitTo(HostVO host, Status.Event e, long msId);
|
||||||
|
|
||||||
boolean isAgentAttached(long hostId);
|
boolean isAgentAttached(long hostId);
|
||||||
@ -146,4 +144,10 @@ public interface AgentManager {
|
|||||||
boolean reconnect(long hostId);
|
boolean reconnect(long hostId);
|
||||||
|
|
||||||
void rescan();
|
void rescan();
|
||||||
|
|
||||||
|
void notifyMonitorsOfNewlyAddedHost(long hostId);
|
||||||
|
|
||||||
|
void notifyMonitorsOfHostAboutToBeRemoved(long hostId);
|
||||||
|
|
||||||
|
void notifyMonitorsOfRemovedHost(long hostId, long clusterId);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -63,6 +63,12 @@ public interface Listener {
|
|||||||
*/
|
*/
|
||||||
AgentControlAnswer processControlCommand(long agentId, AgentControlCommand cmd);
|
AgentControlAnswer processControlCommand(long agentId, AgentControlCommand cmd);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This method is called by AgentManager when a host is added to a cluster.
|
||||||
|
* @param long the ID of the newly added host
|
||||||
|
*/
|
||||||
|
void processHostAdded(long hostId);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This method is called by AgentManager when an agent made a
|
* This method is called by AgentManager when an agent made a
|
||||||
* connection to this server if the listener has
|
* connection to this server if the listener has
|
||||||
@ -86,6 +92,18 @@ public interface Listener {
|
|||||||
*/
|
*/
|
||||||
boolean processDisconnect(long agentId, Status state);
|
boolean processDisconnect(long agentId, Status state);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This method is called by AgentManager when a host is about to be removed from a cluster.
|
||||||
|
* @param long the ID of the host that's about to be removed
|
||||||
|
*/
|
||||||
|
void processHostAboutToBeRemoved(long hostId);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This method is called by AgentManager when a host is removed from a cluster.
|
||||||
|
* @param long the ID of the newly removed host
|
||||||
|
*/
|
||||||
|
void processHostRemoved(long hostId, long clusterId);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If this Listener is passed to the send() method, this method
|
* If this Listener is passed to the send() method, this method
|
||||||
* is called by AgentManager after processing an answer
|
* is called by AgentManager after processing an answer
|
||||||
|
|||||||
@ -44,6 +44,7 @@ import com.cloud.agent.api.CleanupNetworkRulesCmd;
|
|||||||
import com.cloud.agent.api.Command;
|
import com.cloud.agent.api.Command;
|
||||||
import com.cloud.agent.api.MaintainCommand;
|
import com.cloud.agent.api.MaintainCommand;
|
||||||
import com.cloud.agent.api.MigrateCommand;
|
import com.cloud.agent.api.MigrateCommand;
|
||||||
|
import com.cloud.agent.api.ModifyTargetsCommand;
|
||||||
import com.cloud.agent.api.PingTestCommand;
|
import com.cloud.agent.api.PingTestCommand;
|
||||||
import com.cloud.agent.api.PvlanSetupCommand;
|
import com.cloud.agent.api.PvlanSetupCommand;
|
||||||
import com.cloud.agent.api.ReadyCommand;
|
import com.cloud.agent.api.ReadyCommand;
|
||||||
@ -109,11 +110,12 @@ public abstract class AgentAttache {
|
|||||||
|
|
||||||
protected AgentManagerImpl _agentMgr;
|
protected AgentManagerImpl _agentMgr;
|
||||||
|
|
||||||
public final static String[] s_commandsAllowedInMaintenanceMode = new String[] {MaintainCommand.class.toString(), MigrateCommand.class.toString(),
|
public final static String[] s_commandsAllowedInMaintenanceMode = new String[] { MaintainCommand.class.toString(), MigrateCommand.class.toString(),
|
||||||
StopCommand.class.toString(), CheckVirtualMachineCommand.class.toString(), PingTestCommand.class.toString(), CheckHealthCommand.class.toString(),
|
StopCommand.class.toString(), CheckVirtualMachineCommand.class.toString(), PingTestCommand.class.toString(), CheckHealthCommand.class.toString(),
|
||||||
ReadyCommand.class.toString(), ShutdownCommand.class.toString(), SetupCommand.class.toString(),
|
ReadyCommand.class.toString(), ShutdownCommand.class.toString(), SetupCommand.class.toString(),
|
||||||
CleanupNetworkRulesCmd.class.toString(), CheckNetworkCommand.class.toString(), PvlanSetupCommand.class.toString(), CheckOnHostCommand.class.toString()};
|
CleanupNetworkRulesCmd.class.toString(), CheckNetworkCommand.class.toString(), PvlanSetupCommand.class.toString(), CheckOnHostCommand.class.toString(),
|
||||||
protected final static String[] s_commandsNotAllowedInConnectingMode = new String[] {StartCommand.class.toString(), CreateCommand.class.toString()};
|
ModifyTargetsCommand.class.toString() };
|
||||||
|
protected final static String[] s_commandsNotAllowedInConnectingMode = new String[] { StartCommand.class.toString(), CreateCommand.class.toString() };
|
||||||
static {
|
static {
|
||||||
Arrays.sort(s_commandsAllowedInMaintenanceMode);
|
Arrays.sort(s_commandsAllowedInMaintenanceMode);
|
||||||
Arrays.sort(s_commandsNotAllowedInConnectingMode);
|
Arrays.sort(s_commandsNotAllowedInConnectingMode);
|
||||||
|
|||||||
@ -538,6 +538,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void notifyMonitorsOfNewlyAddedHost(long hostId) {
|
||||||
|
for (final Pair<Integer, Listener> monitor : _hostMonitors) {
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug("Sending host added to listener: " + monitor.second().getClass().getSimpleName());
|
||||||
|
}
|
||||||
|
|
||||||
|
monitor.second().processHostAdded(hostId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, final StartupCommand[] cmd, final boolean forRebalance) throws ConnectionException {
|
protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, final StartupCommand[] cmd, final boolean forRebalance) throws ConnectionException {
|
||||||
final long hostId = attache.getId();
|
final long hostId = attache.getId();
|
||||||
final HostVO host = _hostDao.findById(hostId);
|
final HostVO host = _hostDao.findById(hostId);
|
||||||
@ -1001,6 +1012,28 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void notifyMonitorsOfHostAboutToBeRemoved(long hostId) {
|
||||||
|
for (final Pair<Integer, Listener> monitor : _hostMonitors) {
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug("Sending host about to be removed to listener: " + monitor.second().getClass().getSimpleName());
|
||||||
|
}
|
||||||
|
|
||||||
|
monitor.second().processHostAboutToBeRemoved(hostId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void notifyMonitorsOfRemovedHost(long hostId, long clusterId) {
|
||||||
|
for (final Pair<Integer, Listener> monitor : _hostMonitors) {
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug("Sending host removed to listener: " + monitor.second().getClass().getSimpleName());
|
||||||
|
}
|
||||||
|
|
||||||
|
monitor.second().processHostRemoved(hostId, clusterId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException {
|
public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException {
|
||||||
if (event == Event.AgentDisconnected) {
|
if (event == Event.AgentDisconnected) {
|
||||||
if (s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
@ -1464,7 +1497,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean handleDirectConnectAgent(final Host host, final StartupCommand[] cmds, final ServerResource resource, final boolean forRebalance) throws ConnectionException {
|
public boolean handleDirectConnectAgent(final Host host, final StartupCommand[] cmds, final ServerResource resource,
|
||||||
|
final boolean forRebalance, boolean newHost) throws ConnectionException {
|
||||||
AgentAttache attache;
|
AgentAttache attache;
|
||||||
|
|
||||||
attache = createAttacheForDirectConnect(host, resource);
|
attache = createAttacheForDirectConnect(host, resource);
|
||||||
@ -1473,6 +1507,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||||||
answers[i] = new StartupAnswer(cmds[i], attache.getId(), PingInterval.value());
|
answers[i] = new StartupAnswer(cmds[i], attache.getId(), PingInterval.value());
|
||||||
}
|
}
|
||||||
attache.process(answers);
|
attache.process(answers);
|
||||||
|
|
||||||
|
if (newHost) {
|
||||||
|
notifyMonitorsOfNewlyAddedHost(host.getId());
|
||||||
|
}
|
||||||
|
|
||||||
attache = notifyMonitorsOfConnection(attache, cmds, forRebalance);
|
attache = notifyMonitorsOfConnection(attache, cmds, forRebalance);
|
||||||
|
|
||||||
return attache != null;
|
return attache != null;
|
||||||
@ -1617,6 +1656,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(final Host host, final StartupCommand cmd, final boolean forRebalance) {
|
public void processConnect(final Host host, final StartupCommand cmd, final boolean forRebalance) {
|
||||||
if (host.getType().equals(Host.Type.TrafficMonitor) || host.getType().equals(Host.Type.SecondaryStorage)) {
|
if (host.getType().equals(Host.Type.TrafficMonitor) || host.getType().equals(Host.Type.SecondaryStorage)) {
|
||||||
@ -1633,6 +1676,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean processTimeout(final long agentId, final long seq) {
|
public boolean processTimeout(final long agentId, final long seq) {
|
||||||
return true;
|
return true;
|
||||||
|
|||||||
@ -78,6 +78,18 @@ public class SynchronousListener implements Listener {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) {
|
public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) {
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2786,6 +2786,18 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(final Host agent, final StartupCommand cmd, final boolean forRebalance) throws ConnectionException {
|
public void processConnect(final Host agent, final StartupCommand cmd, final boolean forRebalance) throws ConnectionException {
|
||||||
if (!(cmd instanceof StartupRoutingCommand)) {
|
if (!(cmd instanceof StartupRoutingCommand)) {
|
||||||
|
|||||||
@ -3001,6 +3001,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(final Host host, final StartupCommand cmd, final boolean forRebalance) throws ConnectionException {
|
public void processConnect(final Host host, final StartupCommand cmd, final boolean forRebalance) throws ConnectionException {
|
||||||
if (!(cmd instanceof StartupRoutingCommand)) {
|
if (!(cmd instanceof StartupRoutingCommand)) {
|
||||||
@ -3088,6 +3091,14 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isRecurring() {
|
public boolean isRecurring() {
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -64,6 +64,8 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
|
|||||||
*/
|
*/
|
||||||
List<StoragePoolVO> findPoolByName(String name);
|
List<StoragePoolVO> findPoolByName(String name);
|
||||||
|
|
||||||
|
List<StoragePoolVO> findPoolsByProvider(String provider);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Find pools by the pod that matches the details.
|
* Find pools by the pod that matches the details.
|
||||||
*
|
*
|
||||||
|
|||||||
@ -79,6 +79,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
|
|||||||
AllFieldSearch.and("path", AllFieldSearch.entity().getPath(), SearchCriteria.Op.EQ);
|
AllFieldSearch.and("path", AllFieldSearch.entity().getPath(), SearchCriteria.Op.EQ);
|
||||||
AllFieldSearch.and("podId", AllFieldSearch.entity().getPodId(), Op.EQ);
|
AllFieldSearch.and("podId", AllFieldSearch.entity().getPodId(), Op.EQ);
|
||||||
AllFieldSearch.and("clusterId", AllFieldSearch.entity().getClusterId(), Op.EQ);
|
AllFieldSearch.and("clusterId", AllFieldSearch.entity().getClusterId(), Op.EQ);
|
||||||
|
AllFieldSearch.and("storage_provider_name", AllFieldSearch.entity().getStorageProviderName(), Op.EQ);
|
||||||
AllFieldSearch.done();
|
AllFieldSearch.done();
|
||||||
|
|
||||||
DcPodSearch = createSearchBuilder();
|
DcPodSearch = createSearchBuilder();
|
||||||
@ -128,6 +129,13 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
|
|||||||
return listIncludingRemovedBy(sc);
|
return listIncludingRemovedBy(sc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<StoragePoolVO> findPoolsByProvider(String provider) {
|
||||||
|
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
|
||||||
|
sc.setParameters("storage_provider_name", provider);
|
||||||
|
return listBy(sc);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public StoragePoolVO findPoolByUUID(String uuid) {
|
public StoragePoolVO findPoolByUUID(String uuid) {
|
||||||
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
|
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
|
||||||
|
|||||||
@ -278,9 +278,16 @@ public class DirectAgentManagerSimpleImpl extends ManagerBase implements AgentMa
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean handleDirectConnectAgent(Host host, StartupCommand[] cmds, ServerResource resource, boolean forRebalance) throws ConnectionException {
|
public boolean handleDirectConnectAgent(Host host, StartupCommand[] cmds, ServerResource resource, boolean forRebalance, boolean newHost) throws ConnectionException {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void notifyMonitorsOfHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void notifyMonitorsOfRemovedHost(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -160,6 +160,10 @@ public class RemoteHostEndPoint implements EndPoint {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
@ -172,6 +176,14 @@ public class RemoteHostEndPoint implements EndPoint {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isRecurring() {
|
public boolean isRecurring() {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
|
|||||||
@ -218,6 +218,7 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto
|
|||||||
this.imageStoreProviderMgr = imageDataStoreProviderMgr;
|
this.imageStoreProviderMgr = imageDataStoreProviderMgr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public List<DataStoreProvider> getProviders() {
|
public List<DataStoreProvider> getProviders() {
|
||||||
return providers;
|
return providers;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -54,6 +54,11 @@ public class DefaultHostListener implements HypervisorHostListener {
|
|||||||
@Inject
|
@Inject
|
||||||
PrimaryDataStoreDao primaryStoreDao;
|
PrimaryDataStoreDao primaryStoreDao;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hostAdded(long hostId) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean hostConnect(long hostId, long poolId) throws StorageConflictException {
|
public boolean hostConnect(long hostId, long poolId) throws StorageConflictException {
|
||||||
StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
|
StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
|
||||||
@ -109,4 +114,13 @@ public class DefaultHostListener implements HypervisorHostListener {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hostAboutToBeRemoved(long hostId) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hostRemoved(long hostId, long clusterId) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -106,6 +106,10 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public final void processConnect(final Host agent, final StartupCommand cmd, final boolean forRebalance) throws ConnectionException {
|
public final void processConnect(final Host agent, final StartupCommand cmd, final boolean forRebalance) throws ConnectionException {
|
||||||
// Limit the commands we can process
|
// Limit the commands we can process
|
||||||
@ -176,6 +180,14 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public final boolean isRecurring() {
|
public final boolean isRecurring() {
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -338,6 +338,10 @@ public class Ovm3Discoverer extends DiscovererBase implements Discoverer,
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
/* for reconnecting */
|
/* for reconnecting */
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host host, StartupCommand cmd,
|
public void processConnect(Host host, StartupCommand cmd,
|
||||||
@ -351,6 +355,14 @@ public class Ovm3Discoverer extends DiscovererBase implements Discoverer,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isRecurring() {
|
public boolean isRecurring() {
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -245,6 +245,10 @@ public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, L
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
||||||
|
|
||||||
@ -273,6 +277,14 @@ public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, L
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isRecurring() {
|
public boolean isRecurring() {
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -140,6 +140,10 @@ public class SimulatorSecondaryDiscoverer extends SecondaryStorageDiscoverer imp
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
||||||
|
|
||||||
@ -155,6 +159,14 @@ public class SimulatorSecondaryDiscoverer extends SecondaryStorageDiscoverer imp
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean processTimeout(long agentId, long seq) {
|
public boolean processTimeout(long agentId, long seq) {
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -841,6 +841,10 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) {
|
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) {
|
||||||
if (cmd instanceof StartupCommand) {
|
if (cmd instanceof StartupCommand) {
|
||||||
@ -882,6 +886,14 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isRecurring() {
|
public boolean isRecurring() {
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -148,6 +148,8 @@ import com.cloud.agent.api.MigrateWithStorageCommand;
|
|||||||
import com.cloud.agent.api.ModifySshKeysCommand;
|
import com.cloud.agent.api.ModifySshKeysCommand;
|
||||||
import com.cloud.agent.api.ModifyStoragePoolAnswer;
|
import com.cloud.agent.api.ModifyStoragePoolAnswer;
|
||||||
import com.cloud.agent.api.ModifyStoragePoolCommand;
|
import com.cloud.agent.api.ModifyStoragePoolCommand;
|
||||||
|
import com.cloud.agent.api.ModifyTargetsAnswer;
|
||||||
|
import com.cloud.agent.api.ModifyTargetsCommand;
|
||||||
import com.cloud.agent.api.NetworkUsageAnswer;
|
import com.cloud.agent.api.NetworkUsageAnswer;
|
||||||
import com.cloud.agent.api.NetworkUsageCommand;
|
import com.cloud.agent.api.NetworkUsageCommand;
|
||||||
import com.cloud.agent.api.PingCommand;
|
import com.cloud.agent.api.PingCommand;
|
||||||
@ -409,6 +411,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
answer = execute((DestroyCommand)cmd);
|
answer = execute((DestroyCommand)cmd);
|
||||||
} else if (clz == CreateStoragePoolCommand.class) {
|
} else if (clz == CreateStoragePoolCommand.class) {
|
||||||
return execute((CreateStoragePoolCommand)cmd);
|
return execute((CreateStoragePoolCommand)cmd);
|
||||||
|
} else if (clz == ModifyTargetsCommand.class) {
|
||||||
|
answer = execute((ModifyTargetsCommand)cmd);
|
||||||
} else if (clz == ModifyStoragePoolCommand.class) {
|
} else if (clz == ModifyStoragePoolCommand.class) {
|
||||||
answer = execute((ModifyStoragePoolCommand)cmd);
|
answer = execute((ModifyStoragePoolCommand)cmd);
|
||||||
} else if (clz == DeleteStoragePoolCommand.class) {
|
} else if (clz == DeleteStoragePoolCommand.class) {
|
||||||
@ -933,7 +937,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
*/
|
*/
|
||||||
// Fallback to E1000 if no specific nicAdapter is passed
|
// Fallback to E1000 if no specific nicAdapter is passed
|
||||||
VirtualEthernetCardType nicDeviceType = VirtualEthernetCardType.E1000;
|
VirtualEthernetCardType nicDeviceType = VirtualEthernetCardType.E1000;
|
||||||
Map details = cmd.getDetails();
|
Map<String, String> details = cmd.getDetails();
|
||||||
if (details != null) {
|
if (details != null) {
|
||||||
nicDeviceType = VirtualEthernetCardType.valueOf((String) details.get("nicAdapter"));
|
nicDeviceType = VirtualEthernetCardType.valueOf((String) details.get("nicAdapter"));
|
||||||
}
|
}
|
||||||
@ -3527,7 +3531,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
private Answer execute(MigrateVolumeCommand cmd) {
|
private Answer execute(MigrateVolumeCommand cmd) {
|
||||||
String volumePath = cmd.getVolumePath();
|
String volumePath = cmd.getVolumePath();
|
||||||
StorageFilerTO poolTo = cmd.getPool();
|
StorageFilerTO poolTo = cmd.getPool();
|
||||||
Volume.Type volumeType = cmd.getVolumeType();
|
|
||||||
|
|
||||||
if (s_logger.isInfoEnabled()) {
|
if (s_logger.isInfoEnabled()) {
|
||||||
s_logger.info("Executing resource MigrateVolumeCommand: " + _gson.toJson(cmd));
|
s_logger.info("Executing resource MigrateVolumeCommand: " + _gson.toJson(cmd));
|
||||||
@ -3608,7 +3611,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
// Consolidate VM disks.
|
// Consolidate VM disks.
|
||||||
// In case of a linked clone VM, if VM's disks are not consolidated,
|
// In case of a linked clone VM, if VM's disks are not consolidated,
|
||||||
// further volume operations on the ROOT volume such as volume snapshot etc. will result in DB inconsistencies.
|
// further volume operations on the ROOT volume such as volume snapshot etc. will result in DB inconsistencies.
|
||||||
String apiVersion = HypervisorHostHelper.getVcenterApiVersion(vmMo.getContext());
|
|
||||||
if (!vmMo.consolidateVmDisks()) {
|
if (!vmMo.consolidateVmDisks()) {
|
||||||
s_logger.warn("VM disk consolidation failed after storage migration.");
|
s_logger.warn("VM disk consolidation failed after storage migration.");
|
||||||
} else {
|
} else {
|
||||||
@ -3677,6 +3679,14 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
return new Answer(cmd, true, "success");
|
return new Answer(cmd, true, "success");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected Answer execute(ModifyTargetsCommand cmd) {
|
||||||
|
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
|
||||||
|
|
||||||
|
handleTargets(cmd.getAdd(), cmd.getTargets(), (HostMO)hyperHost);
|
||||||
|
|
||||||
|
return new ModifyTargetsAnswer();
|
||||||
|
}
|
||||||
|
|
||||||
protected Answer execute(ModifyStoragePoolCommand cmd) {
|
protected Answer execute(ModifyStoragePoolCommand cmd) {
|
||||||
if (s_logger.isInfoEnabled()) {
|
if (s_logger.isInfoEnabled()) {
|
||||||
s_logger.info("Executing resource ModifyStoragePoolCommand: " + _gson.toJson(cmd));
|
s_logger.info("Executing resource ModifyStoragePoolCommand: " + _gson.toJson(cmd));
|
||||||
@ -3690,34 +3700,53 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
throw new Exception("Unsupported storage pool type " + pool.getType());
|
throw new Exception("Unsupported storage pool type " + pool.getType());
|
||||||
}
|
}
|
||||||
|
|
||||||
ManagedObjectReference morDatastore = null;
|
ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, pool.getUuid());
|
||||||
morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, pool.getUuid());
|
|
||||||
if (morDatastore == null)
|
if (morDatastore == null) {
|
||||||
morDatastore =
|
morDatastore = hyperHost.mountDatastore(pool.getType() == StoragePoolType.VMFS, pool.getHost(), pool.getPort(), pool.getPath(), pool.getUuid().replace("-", ""));
|
||||||
hyperHost.mountDatastore(pool.getType() == StoragePoolType.VMFS, pool.getHost(), pool.getPort(), pool.getPath(), pool.getUuid().replace("-", ""));
|
}
|
||||||
|
|
||||||
assert (morDatastore != null);
|
assert (morDatastore != null);
|
||||||
|
|
||||||
DatastoreSummary summary = new DatastoreMO(getServiceContext(), morDatastore).getSummary();
|
DatastoreSummary summary = new DatastoreMO(getServiceContext(), morDatastore).getSummary();
|
||||||
|
|
||||||
long capacity = summary.getCapacity();
|
long capacity = summary.getCapacity();
|
||||||
long available = summary.getFreeSpace();
|
long available = summary.getFreeSpace();
|
||||||
|
|
||||||
Map<String, TemplateProp> tInfo = new HashMap<String, TemplateProp>();
|
Map<String, TemplateProp> tInfo = new HashMap<String, TemplateProp>();
|
||||||
ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(cmd, capacity, available, tInfo);
|
ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(cmd, capacity, available, tInfo);
|
||||||
|
|
||||||
if (cmd.getAdd() && pool.getType() == StoragePoolType.VMFS) {
|
if (cmd.getAdd() && pool.getType() == StoragePoolType.VMFS) {
|
||||||
answer.setLocalDatastoreName(morDatastore.getValue());
|
answer.setLocalDatastoreName(morDatastore.getValue());
|
||||||
}
|
}
|
||||||
|
|
||||||
return answer;
|
return answer;
|
||||||
} catch (Throwable e) {
|
} catch (Throwable e) {
|
||||||
if (e instanceof RemoteException) {
|
if (e instanceof RemoteException) {
|
||||||
s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context");
|
s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context");
|
||||||
|
|
||||||
invalidateServiceContext();
|
invalidateServiceContext();
|
||||||
}
|
}
|
||||||
|
|
||||||
String msg = "ModifyStoragePoolCommand failed due to " + VmwareHelper.getExceptionMessage(e);
|
String msg = "ModifyStoragePoolCommand failed due to " + VmwareHelper.getExceptionMessage(e);
|
||||||
|
|
||||||
s_logger.error(msg, e);
|
s_logger.error(msg, e);
|
||||||
|
|
||||||
return new Answer(cmd, false, msg);
|
return new Answer(cmd, false, msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void handleTargets(boolean add, List<Map<String, String>> targets, HostMO host) {
|
||||||
|
if (targets != null && targets.size() > 0) {
|
||||||
|
try {
|
||||||
|
_storageProcessor.handleTargetsForHost(add, targets, host);
|
||||||
|
}
|
||||||
|
catch (Exception ex) {
|
||||||
|
s_logger.warn(ex.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
protected Answer execute(DeleteStoragePoolCommand cmd) {
|
protected Answer execute(DeleteStoragePoolCommand cmd) {
|
||||||
if (s_logger.isInfoEnabled()) {
|
if (s_logger.isInfoEnabled()) {
|
||||||
s_logger.info("Executing resource DeleteStoragePoolCommand: " + _gson.toJson(cmd));
|
s_logger.info("Executing resource DeleteStoragePoolCommand: " + _gson.toJson(cmd));
|
||||||
@ -4701,12 +4730,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean isVmInCluster(String vmName) throws Exception {
|
|
||||||
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
|
|
||||||
|
|
||||||
return hyperHost.findVmOnPeerHyperHost(vmName) != null;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected OptionValue[] configureVnc(OptionValue[] optionsToMerge, VmwareHypervisorHost hyperHost, String vmName, String vncPassword, String keyboardLayout)
|
protected OptionValue[] configureVnc(OptionValue[] optionsToMerge, VmwareHypervisorHost hyperHost, String vmName, String vncPassword, String keyboardLayout)
|
||||||
throws Exception {
|
throws Exception {
|
||||||
|
|
||||||
|
|||||||
@ -71,6 +71,7 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
|||||||
|
|
||||||
import com.cloud.agent.api.Answer;
|
import com.cloud.agent.api.Answer;
|
||||||
import com.cloud.agent.api.Command;
|
import com.cloud.agent.api.Command;
|
||||||
|
import com.cloud.agent.api.ModifyTargetsCommand;
|
||||||
import com.cloud.agent.api.to.DataStoreTO;
|
import com.cloud.agent.api.to.DataStoreTO;
|
||||||
import com.cloud.agent.api.to.DataTO;
|
import com.cloud.agent.api.to.DataTO;
|
||||||
import com.cloud.agent.api.to.DiskTO;
|
import com.cloud.agent.api.to.DiskTO;
|
||||||
@ -1911,14 +1912,77 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||||||
return (int)(bytes / (1024L * 1024L));
|
return (int)(bytes / (1024L * 1024L));
|
||||||
}
|
}
|
||||||
|
|
||||||
private void addRemoveInternetScsiTargetsToAllHosts(VmwareContext context, final boolean add, final List<HostInternetScsiHbaStaticTarget> lstTargets,
|
public void handleTargetsForHost(boolean add, List<Map<String, String>> targets, HostMO host) throws Exception {
|
||||||
List<Pair<ManagedObjectReference, String>> lstHosts) throws Exception {
|
List<HostInternetScsiHbaStaticTarget> lstTargets = new ArrayList<HostInternetScsiHbaStaticTarget>();
|
||||||
ExecutorService executorService = Executors.newFixedThreadPool(lstHosts.size());
|
|
||||||
|
for (Map<String, String> mapTarget : targets) {
|
||||||
|
HostInternetScsiHbaStaticTarget target = new HostInternetScsiHbaStaticTarget();
|
||||||
|
|
||||||
|
String targetAddress = mapTarget.get(ModifyTargetsCommand.STORAGE_HOST);
|
||||||
|
Integer targetPort = Integer.parseInt(mapTarget.get(ModifyTargetsCommand.STORAGE_PORT));
|
||||||
|
String iScsiName = trimIqn(mapTarget.get(ModifyTargetsCommand.IQN));
|
||||||
|
|
||||||
|
target.setAddress(targetAddress);
|
||||||
|
target.setPort(targetPort);
|
||||||
|
target.setIScsiName(iScsiName);
|
||||||
|
|
||||||
|
String chapName = mapTarget.get(ModifyTargetsCommand.CHAP_NAME);
|
||||||
|
String chapSecret = mapTarget.get(ModifyTargetsCommand.CHAP_SECRET);
|
||||||
|
|
||||||
|
if (StringUtils.isNotBlank(chapName) && StringUtils.isNotBlank(chapSecret)) {
|
||||||
|
HostInternetScsiHbaAuthenticationProperties auth = new HostInternetScsiHbaAuthenticationProperties();
|
||||||
|
|
||||||
|
String strAuthType = "chapRequired";
|
||||||
|
|
||||||
|
auth.setChapAuthEnabled(true);
|
||||||
|
auth.setChapInherited(false);
|
||||||
|
auth.setChapAuthenticationType(strAuthType);
|
||||||
|
auth.setChapName(chapName);
|
||||||
|
auth.setChapSecret(chapSecret);
|
||||||
|
|
||||||
|
String mutualChapName = mapTarget.get(ModifyTargetsCommand.MUTUAL_CHAP_NAME);
|
||||||
|
String mutualChapSecret = mapTarget.get(ModifyTargetsCommand.MUTUAL_CHAP_SECRET);
|
||||||
|
|
||||||
|
if (StringUtils.isNotBlank(mutualChapName) && StringUtils.isNotBlank(mutualChapSecret)) {
|
||||||
|
auth.setMutualChapInherited(false);
|
||||||
|
auth.setMutualChapAuthenticationType(strAuthType);
|
||||||
|
auth.setMutualChapName(mutualChapName);
|
||||||
|
auth.setMutualChapSecret(mutualChapSecret);
|
||||||
|
}
|
||||||
|
|
||||||
|
target.setAuthenticationProperties(auth);
|
||||||
|
}
|
||||||
|
|
||||||
|
lstTargets.add(target);
|
||||||
|
}
|
||||||
|
|
||||||
|
List<HostMO> hosts = new ArrayList<>();
|
||||||
|
|
||||||
|
hosts.add(host);
|
||||||
|
|
||||||
|
addRemoveInternetScsiTargetsToAllHosts(add, lstTargets, hosts);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void addRemoveInternetScsiTargetsToAllHosts(VmwareContext context, final boolean add, final List<HostInternetScsiHbaStaticTarget> targets,
|
||||||
|
List<Pair<ManagedObjectReference, String>> hostPairs) throws Exception {
|
||||||
|
List<HostMO> hosts = new ArrayList<>();
|
||||||
|
|
||||||
|
for (Pair<ManagedObjectReference, String> hostPair : hostPairs) {
|
||||||
|
HostMO host = new HostMO(context, hostPair.first());
|
||||||
|
|
||||||
|
hosts.add(host);
|
||||||
|
}
|
||||||
|
|
||||||
|
addRemoveInternetScsiTargetsToAllHosts(add, targets, hosts);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void addRemoveInternetScsiTargetsToAllHosts(final boolean add, final List<HostInternetScsiHbaStaticTarget> targets,
|
||||||
|
List<HostMO> hosts) throws Exception {
|
||||||
|
ExecutorService executorService = Executors.newFixedThreadPool(hosts.size());
|
||||||
|
|
||||||
final List<Exception> exceptions = new ArrayList<Exception>();
|
final List<Exception> exceptions = new ArrayList<Exception>();
|
||||||
|
|
||||||
for (Pair<ManagedObjectReference, String> hostPair : lstHosts) {
|
for (HostMO host : hosts) {
|
||||||
HostMO host = new HostMO(context, hostPair.first());
|
|
||||||
HostStorageSystemMO hostStorageSystem = host.getHostStorageSystemMO();
|
HostStorageSystemMO hostStorageSystem = host.getHostStorageSystemMO();
|
||||||
|
|
||||||
boolean iScsiHbaConfigured = false;
|
boolean iScsiHbaConfigured = false;
|
||||||
@ -1938,9 +2002,9 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||||||
public void run() {
|
public void run() {
|
||||||
try {
|
try {
|
||||||
if (add) {
|
if (add) {
|
||||||
hss.addInternetScsiStaticTargets(iScsiHbaDevice, lstTargets);
|
hss.addInternetScsiStaticTargets(iScsiHbaDevice, targets);
|
||||||
} else {
|
} else {
|
||||||
hss.removeInternetScsiStaticTargets(iScsiHbaDevice, lstTargets);
|
hss.removeInternetScsiStaticTargets(iScsiHbaDevice, targets);
|
||||||
}
|
}
|
||||||
|
|
||||||
hss.rescanHba(iScsiHbaDevice);
|
hss.rescanHba(iScsiHbaDevice);
|
||||||
|
|||||||
@ -113,16 +113,15 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
|
|||||||
protected String _guestNic;
|
protected String _guestNic;
|
||||||
protected boolean _setupMultipath;
|
protected boolean _setupMultipath;
|
||||||
protected String _instance;
|
protected String _instance;
|
||||||
private String xs620snapshothotfix = "Xenserver-Vdi-Copy-HotFix";
|
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
protected AlertManager _alertMgr;
|
protected AlertManager _alertMgr;
|
||||||
@Inject
|
@Inject
|
||||||
protected AgentManager _agentMgr;
|
protected AgentManager _agentMgr;
|
||||||
@Inject
|
@Inject
|
||||||
VMTemplateDao _tmpltDao;
|
private VMTemplateDao _tmpltDao;
|
||||||
@Inject
|
@Inject
|
||||||
HostPodDao _podDao;
|
private HostPodDao _podDao;
|
||||||
|
|
||||||
protected XcpServerDiscoverer() {
|
protected XcpServerDiscoverer() {
|
||||||
}
|
}
|
||||||
@ -542,6 +541,10 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(com.cloud.host.Host agent, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
public void processConnect(com.cloud.host.Host agent, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
||||||
if (!(cmd instanceof StartupRoutingCommand)) {
|
if (!(cmd instanceof StartupRoutingCommand)) {
|
||||||
@ -629,6 +632,14 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean processTimeout(long agentId, long seq) {
|
public boolean processTimeout(long agentId, long seq) {
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -49,7 +49,8 @@ public final class CitrixModifyStoragePoolCommandWrapper extends CommandWrapper<
|
|||||||
final boolean add = command.getAdd();
|
final boolean add = command.getAdd();
|
||||||
if (add) {
|
if (add) {
|
||||||
try {
|
try {
|
||||||
final SR sr = citrixResourceBase.getStorageRepository(conn, pool.getUuid());
|
final String srName = command.getStoragePath() != null ? command.getStoragePath() : pool.getUuid();
|
||||||
|
final SR sr = citrixResourceBase.getStorageRepository(conn, srName);
|
||||||
citrixResourceBase.setupHeartbeatSr(conn, sr, false);
|
citrixResourceBase.setupHeartbeatSr(conn, sr, false);
|
||||||
final long capacity = sr.getPhysicalSize(conn);
|
final long capacity = sr.getPhysicalSize(conn);
|
||||||
final long available = capacity - sr.getPhysicalUtilisation(conn);
|
final long available = capacity - sr.getPhysicalUtilisation(conn);
|
||||||
@ -81,7 +82,7 @@ public final class CitrixModifyStoragePoolCommandWrapper extends CommandWrapper<
|
|||||||
if (result == null || !result.split("#")[1].equals("0")) {
|
if (result == null || !result.split("#")[1].equals("0")) {
|
||||||
throw new CloudRuntimeException("Unable to remove heartbeat file entry for SR " + srUuid + " due to " + result);
|
throw new CloudRuntimeException("Unable to remove heartbeat file entry for SR " + srUuid + " due to " + result);
|
||||||
}
|
}
|
||||||
return new Answer(command, true, "seccuss");
|
return new Answer(command, true, "success");
|
||||||
} catch (final XenAPIException e) {
|
} catch (final XenAPIException e) {
|
||||||
final String msg = "ModifyStoragePoolCommand remove XenAPIException:" + e.toString() + " host:" + citrixResourceBase.getHost().getUuid() + " pool: "
|
final String msg = "ModifyStoragePoolCommand remove XenAPIException:" + e.toString() + " host:" + citrixResourceBase.getHost().getUuid() + " pool: "
|
||||||
+ pool.getHost() + pool.getPath();
|
+ pool.getHost() + pool.getPath();
|
||||||
|
|||||||
@ -735,6 +735,11 @@ public class NuageVspManagerImpl extends ManagerBase implements NuageVspManager,
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
||||||
|
|
||||||
@ -745,6 +750,16 @@ public class NuageVspManagerImpl extends ManagerBase implements NuageVspManager,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isRecurring() {
|
public boolean isRecurring() {
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -76,6 +76,11 @@ public class ElastistorHostListener implements HypervisorHostListener {
|
|||||||
@Inject
|
@Inject
|
||||||
HostDao _hostDao;
|
HostDao _hostDao;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hostAdded(long hostId) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean hostConnect(long hostId, long poolId) {
|
public boolean hostConnect(long hostId, long poolId) {
|
||||||
StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
|
StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
|
||||||
@ -126,4 +131,13 @@ public class ElastistorHostListener implements HypervisorHostListener {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hostAboutToBeRemoved(long hostId) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hostRemoved(long hostId, long clusterId) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -23,13 +23,40 @@ import org.apache.log4j.Logger;
|
|||||||
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
||||||
|
|
||||||
public class NexentaHostListener implements HypervisorHostListener {
|
public class NexentaHostListener implements HypervisorHostListener {
|
||||||
private static final Logger logger = Logger.getLogger(NexentaHostListener.class);
|
private static final Logger s_logger = Logger.getLogger(NexentaHostListener.class);
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hostAdded(long hostId) {
|
||||||
|
s_logger.trace("hostAdded(long) invoked");
|
||||||
|
|
||||||
public boolean hostConnect(long hostId, long poolId) {
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hostConnect(long hostId, long poolId) {
|
||||||
|
s_logger.trace("hostConnect(long, long) invoked");
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public boolean hostDisconnected(long hostId, long poolId) {
|
public boolean hostDisconnected(long hostId, long poolId) {
|
||||||
|
s_logger.trace("hostDisconnected(long, long) invoked");
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hostAboutToBeRemoved(long hostId) {
|
||||||
|
s_logger.trace("hostAboutToBeRemoved(long) invoked");
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hostRemoved(long hostId, long clusterId) {
|
||||||
|
s_logger.trace("hostRemoved(long) invoked");
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -76,7 +76,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||||||
|
|
||||||
public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
||||||
private static final Logger s_logger = Logger.getLogger(SolidFirePrimaryDataStoreDriver.class);
|
private static final Logger s_logger = Logger.getLogger(SolidFirePrimaryDataStoreDriver.class);
|
||||||
private static final int s_lockTimeInSeconds = 300;
|
|
||||||
private static final int s_lowestHypervisorSnapshotReserve = 10;
|
private static final int s_lowestHypervisorSnapshotReserve = 10;
|
||||||
|
|
||||||
@Inject private AccountDao _accountDao;
|
@Inject private AccountDao _accountDao;
|
||||||
@ -141,8 +140,12 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
|
|
||||||
GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid());
|
GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid());
|
||||||
|
|
||||||
if (!lock.lock(s_lockTimeInSeconds)) {
|
if (!lock.lock(SolidFireUtil.s_lockTimeInSeconds)) {
|
||||||
s_logger.debug("Couldn't lock the DB (in grantAccess) on the following string: " + cluster.getUuid());
|
String errMsg = "Couldn't lock the DB (in grantAccess) on the following string: " + cluster.getUuid();
|
||||||
|
|
||||||
|
s_logger.debug(errMsg);
|
||||||
|
|
||||||
|
throw new CloudRuntimeException(errMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@ -161,10 +164,9 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
if (vagId != null) {
|
if (vagId != null) {
|
||||||
SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getSolidFireVag(sfConnection, Long.parseLong(vagId));
|
SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getSolidFireVag(sfConnection, Long.parseLong(vagId));
|
||||||
|
|
||||||
String[] hostIqns = SolidFireUtil.getNewHostIqns(sfVag.getInitiators(), SolidFireUtil.getIqnsFromHosts(hosts));
|
|
||||||
long[] volumeIds = SolidFireUtil.getNewVolumeIds(sfVag.getVolumeIds(), sfVolumeId, true);
|
long[] volumeIds = SolidFireUtil.getNewVolumeIds(sfVag.getVolumeIds(), sfVolumeId, true);
|
||||||
|
|
||||||
SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), hostIqns, volumeIds);
|
SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), sfVag.getInitiators(), volumeIds);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
SolidFireUtil.placeVolumeInVolumeAccessGroup(sfConnection, sfVolumeId, storagePoolId, cluster.getUuid(), hosts, _clusterDetailsDao);
|
SolidFireUtil.placeVolumeInVolumeAccessGroup(sfConnection, sfVolumeId, storagePoolId, cluster.getUuid(), hosts, _clusterDetailsDao);
|
||||||
@ -196,8 +198,12 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
|
|
||||||
GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid());
|
GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid());
|
||||||
|
|
||||||
if (!lock.lock(s_lockTimeInSeconds)) {
|
if (!lock.lock(SolidFireUtil.s_lockTimeInSeconds)) {
|
||||||
s_logger.debug("Couldn't lock the DB (in revokeAccess) on the following string: " + cluster.getUuid());
|
String errMsg = "Couldn't lock the DB (in revokeAccess) on the following string: " + cluster.getUuid();
|
||||||
|
|
||||||
|
s_logger.debug(errMsg);
|
||||||
|
|
||||||
|
throw new CloudRuntimeException(errMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@ -206,16 +212,13 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
String vagId = clusterDetail != null ? clusterDetail.getValue() : null;
|
String vagId = clusterDetail != null ? clusterDetail.getValue() : null;
|
||||||
|
|
||||||
if (vagId != null) {
|
if (vagId != null) {
|
||||||
List<HostVO> hosts = _hostDao.findByClusterId(clusterId);
|
|
||||||
|
|
||||||
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao);
|
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao);
|
||||||
|
|
||||||
SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getSolidFireVag(sfConnection, Long.parseLong(vagId));
|
SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getSolidFireVag(sfConnection, Long.parseLong(vagId));
|
||||||
|
|
||||||
String[] hostIqns = SolidFireUtil.getNewHostIqns(sfVag.getInitiators(), SolidFireUtil.getIqnsFromHosts(hosts));
|
|
||||||
long[] volumeIds = SolidFireUtil.getNewVolumeIds(sfVag.getVolumeIds(), sfVolumeId, false);
|
long[] volumeIds = SolidFireUtil.getNewVolumeIds(sfVag.getVolumeIds(), sfVolumeId, false);
|
||||||
|
|
||||||
SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), hostIqns, volumeIds);
|
SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), sfVag.getInitiators(), volumeIds);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
@ -701,7 +704,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void revertSnapshot(SnapshotInfo snapshot, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback<CommandResult> callback) {
|
public void revertSnapshot(SnapshotInfo snapshotOnImageStore, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback<CommandResult> callback) {
|
||||||
throw new UnsupportedOperationException("Reverting not supported. Create a template or volume based on the snapshot instead.");
|
throw new UnsupportedOperationException("Reverting not supported. Create a template or volume based on the snapshot instead.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -136,7 +136,7 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
|
|||||||
lClusterDefaultMinIops = Long.parseLong(clusterDefaultMinIops);
|
lClusterDefaultMinIops = Long.parseLong(clusterDefaultMinIops);
|
||||||
}
|
}
|
||||||
} catch (NumberFormatException ex) {
|
} catch (NumberFormatException ex) {
|
||||||
s_logger.warn("Cannot parse the setting of " + SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS +
|
s_logger.warn("Cannot parse the setting " + SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS +
|
||||||
", using default value: " + lClusterDefaultMinIops +
|
", using default value: " + lClusterDefaultMinIops +
|
||||||
". Exception: " + ex);
|
". Exception: " + ex);
|
||||||
}
|
}
|
||||||
@ -148,7 +148,7 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
|
|||||||
lClusterDefaultMaxIops = Long.parseLong(clusterDefaultMaxIops);
|
lClusterDefaultMaxIops = Long.parseLong(clusterDefaultMaxIops);
|
||||||
}
|
}
|
||||||
} catch (NumberFormatException ex) {
|
} catch (NumberFormatException ex) {
|
||||||
s_logger.warn("Cannot parse the setting of " + SolidFireUtil.CLUSTER_DEFAULT_MAX_IOPS +
|
s_logger.warn("Cannot parse the setting " + SolidFireUtil.CLUSTER_DEFAULT_MAX_IOPS +
|
||||||
", using default value: " + lClusterDefaultMaxIops +
|
", using default value: " + lClusterDefaultMaxIops +
|
||||||
". Exception: " + ex);
|
". Exception: " + ex);
|
||||||
}
|
}
|
||||||
@ -160,7 +160,7 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
|
|||||||
fClusterDefaultBurstIopsPercentOfMaxIops = Float.parseFloat(clusterDefaultBurstIopsPercentOfMaxIops);
|
fClusterDefaultBurstIopsPercentOfMaxIops = Float.parseFloat(clusterDefaultBurstIopsPercentOfMaxIops);
|
||||||
}
|
}
|
||||||
} catch (NumberFormatException ex) {
|
} catch (NumberFormatException ex) {
|
||||||
s_logger.warn("Cannot parse the setting of " + SolidFireUtil.CLUSTER_DEFAULT_BURST_IOPS_PERCENT_OF_MAX_IOPS +
|
s_logger.warn("Cannot parse the setting " + SolidFireUtil.CLUSTER_DEFAULT_BURST_IOPS_PERCENT_OF_MAX_IOPS +
|
||||||
", using default value: " + fClusterDefaultBurstIopsPercentOfMaxIops +
|
", using default value: " + fClusterDefaultBurstIopsPercentOfMaxIops +
|
||||||
". Exception: " + ex);
|
". Exception: " + ex);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -70,6 +70,7 @@ import com.cloud.user.Account;
|
|||||||
import com.cloud.user.AccountDetailsDao;
|
import com.cloud.user.AccountDetailsDao;
|
||||||
import com.cloud.user.AccountVO;
|
import com.cloud.user.AccountVO;
|
||||||
import com.cloud.user.dao.AccountDao;
|
import com.cloud.user.dao.AccountDao;
|
||||||
|
import com.cloud.utils.db.GlobalLock;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
|
||||||
public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
|
public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
|
||||||
@ -178,8 +179,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||||||
lMinIops = Long.parseLong(minIops);
|
lMinIops = Long.parseLong(minIops);
|
||||||
}
|
}
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
s_logger.info("[ignored]"
|
s_logger.info("[ignored] error getting Min IOPS: " + ex.getLocalizedMessage());
|
||||||
+ "error getting minimals iops: " + ex.getLocalizedMessage());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@ -189,8 +189,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||||||
lMaxIops = Long.parseLong(maxIops);
|
lMaxIops = Long.parseLong(maxIops);
|
||||||
}
|
}
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
s_logger.info("[ignored]"
|
s_logger.info("[ignored] error getting Max IOPS: " + ex.getLocalizedMessage());
|
||||||
+ "error getting maximal iops: " + ex.getLocalizedMessage());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@ -200,8 +199,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||||||
lBurstIops = Long.parseLong(burstIops);
|
lBurstIops = Long.parseLong(burstIops);
|
||||||
}
|
}
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
s_logger.info("[ignored]"
|
s_logger.info("[ignored] error getting Burst IOPS: " + ex.getLocalizedMessage());
|
||||||
+ "error getting iops bursts: " + ex.getLocalizedMessage());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lMinIops > lMaxIops) {
|
if (lMinIops > lMaxIops) {
|
||||||
@ -255,14 +253,27 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||||||
parameters.setPath(iqn);
|
parameters.setPath(iqn);
|
||||||
}
|
}
|
||||||
|
|
||||||
// this adds a row in the cloud.storage_pool table for this SolidFire volume
|
ClusterVO cluster = _clusterDao.findById(clusterId);
|
||||||
DataStore dataStore = _primaryDataStoreHelper.createPrimaryDataStore(parameters);
|
|
||||||
|
GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid());
|
||||||
|
|
||||||
|
if (!lock.lock(SolidFireUtil.s_lockTimeInSeconds)) {
|
||||||
|
String errMsg = "Couldn't lock the DB on the following string: " + cluster.getUuid();
|
||||||
|
|
||||||
|
s_logger.debug(errMsg);
|
||||||
|
|
||||||
|
throw new CloudRuntimeException(errMsg);
|
||||||
|
}
|
||||||
|
|
||||||
|
DataStore dataStore = null;
|
||||||
|
|
||||||
// now that we have a DataStore (we need the id from the DataStore instance), we can create a Volume Access Group, if need be, and
|
|
||||||
// place the newly created volume in the Volume Access Group
|
|
||||||
try {
|
try {
|
||||||
|
// this adds a row in the cloud.storage_pool table for this SolidFire volume
|
||||||
|
dataStore = _primaryDataStoreHelper.createPrimaryDataStore(parameters);
|
||||||
|
|
||||||
|
// now that we have a DataStore (we need the id from the DataStore instance), we can create a Volume Access Group, if need be, and
|
||||||
|
// place the newly created volume in the Volume Access Group
|
||||||
List<HostVO> hosts = _hostDao.findByClusterId(clusterId);
|
List<HostVO> hosts = _hostDao.findByClusterId(clusterId);
|
||||||
ClusterVO cluster = _clusterDao.findById(clusterId);
|
|
||||||
|
|
||||||
SolidFireUtil.placeVolumeInVolumeAccessGroup(sfConnection, sfVolume.getId(), dataStore.getId(), cluster.getUuid(), hosts, _clusterDetailsDao);
|
SolidFireUtil.placeVolumeInVolumeAccessGroup(sfConnection, sfVolume.getId(), dataStore.getId(), cluster.getUuid(), hosts, _clusterDetailsDao);
|
||||||
|
|
||||||
@ -275,6 +286,10 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||||||
|
|
||||||
throw new CloudRuntimeException(ex.getMessage());
|
throw new CloudRuntimeException(ex.getMessage());
|
||||||
}
|
}
|
||||||
|
finally {
|
||||||
|
lock.unlock();
|
||||||
|
lock.releaseRef();
|
||||||
|
}
|
||||||
|
|
||||||
return dataStore;
|
return dataStore;
|
||||||
}
|
}
|
||||||
@ -546,7 +561,25 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (clusterId != null) {
|
if (clusterId != null) {
|
||||||
removeVolumeFromVag(storagePool.getId(), clusterId);
|
ClusterVO cluster = _clusterDao.findById(clusterId);
|
||||||
|
|
||||||
|
GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid());
|
||||||
|
|
||||||
|
if (!lock.lock(SolidFireUtil.s_lockTimeInSeconds)) {
|
||||||
|
String errMsg = "Couldn't lock the DB on the following string: " + cluster.getUuid();
|
||||||
|
|
||||||
|
s_logger.debug(errMsg);
|
||||||
|
|
||||||
|
throw new CloudRuntimeException(errMsg);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
removeVolumeFromVag(storagePool.getId(), clusterId);
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
lock.unlock();
|
||||||
|
lock.releaseRef();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
deleteSolidFireVolume(storagePool.getId());
|
deleteSolidFireVolume(storagePool.getId());
|
||||||
@ -561,16 +594,13 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||||||
String vagId = clusterDetail != null ? clusterDetail.getValue() : null;
|
String vagId = clusterDetail != null ? clusterDetail.getValue() : null;
|
||||||
|
|
||||||
if (vagId != null) {
|
if (vagId != null) {
|
||||||
List<HostVO> hosts = _hostDao.findByClusterId(clusterId);
|
|
||||||
|
|
||||||
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao);
|
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao);
|
||||||
|
|
||||||
SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getSolidFireVag(sfConnection, Long.parseLong(vagId));
|
SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getSolidFireVag(sfConnection, Long.parseLong(vagId));
|
||||||
|
|
||||||
String[] hostIqns = SolidFireUtil.getNewHostIqns(sfVag.getInitiators(), SolidFireUtil.getIqnsFromHosts(hosts));
|
|
||||||
long[] volumeIds = SolidFireUtil.getNewVolumeIds(sfVag.getVolumeIds(), sfVolumeId, false);
|
long[] volumeIds = SolidFireUtil.getNewVolumeIds(sfVag.getVolumeIds(), sfVolumeId, false);
|
||||||
|
|
||||||
SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), hostIqns, volumeIds);
|
SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), sfVag.getInitiators(), volumeIds);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -18,40 +18,69 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cloudstack.storage.datastore.provider;
|
package org.apache.cloudstack.storage.datastore.provider;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
|
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
|
import org.apache.cloudstack.storage.datastore.util.SolidFireUtil;
|
||||||
|
|
||||||
import com.cloud.agent.AgentManager;
|
import com.cloud.agent.AgentManager;
|
||||||
import com.cloud.agent.api.Answer;
|
import com.cloud.agent.api.Answer;
|
||||||
import com.cloud.agent.api.ModifyStoragePoolAnswer;
|
import com.cloud.agent.api.ModifyStoragePoolAnswer;
|
||||||
import com.cloud.agent.api.ModifyStoragePoolCommand;
|
import com.cloud.agent.api.ModifyStoragePoolCommand;
|
||||||
|
import com.cloud.agent.api.ModifyTargetsCommand;
|
||||||
import com.cloud.alert.AlertManager;
|
import com.cloud.alert.AlertManager;
|
||||||
|
import com.cloud.dc.ClusterDetailsDao;
|
||||||
|
import com.cloud.dc.dao.ClusterDao;
|
||||||
import com.cloud.host.HostVO;
|
import com.cloud.host.HostVO;
|
||||||
import com.cloud.host.dao.HostDao;
|
import com.cloud.host.dao.HostDao;
|
||||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||||
import com.cloud.storage.DataStoreRole;
|
import com.cloud.storage.DataStoreRole;
|
||||||
import com.cloud.storage.StoragePool;
|
import com.cloud.storage.StoragePool;
|
||||||
import com.cloud.storage.StoragePoolHostVO;
|
import com.cloud.storage.StoragePoolHostVO;
|
||||||
|
import com.cloud.storage.VolumeVO;
|
||||||
import com.cloud.storage.dao.StoragePoolHostDao;
|
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||||
|
import com.cloud.storage.dao.VolumeDao;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
import com.cloud.vm.VMInstanceVO;
|
||||||
|
import com.cloud.vm.dao.VMInstanceDao;
|
||||||
|
|
||||||
public class SolidFireHostListener implements HypervisorHostListener {
|
public class SolidFireHostListener implements HypervisorHostListener {
|
||||||
private static final Logger s_logger = Logger.getLogger(SolidFireHostListener.class);
|
private static final Logger s_logger = Logger.getLogger(SolidFireHostListener.class);
|
||||||
|
|
||||||
@Inject
|
@Inject private AgentManager _agentMgr;
|
||||||
private AgentManager _agentMgr;
|
@Inject private AlertManager _alertMgr;
|
||||||
@Inject
|
@Inject private ClusterDao _clusterDao;
|
||||||
private AlertManager _alertMgr;
|
@Inject private ClusterDetailsDao _clusterDetailsDao;
|
||||||
@Inject
|
@Inject private DataStoreManager _dataStoreMgr;
|
||||||
private DataStoreManager _dataStoreMgr;
|
@Inject private HostDao _hostDao;
|
||||||
@Inject
|
@Inject private PrimaryDataStoreDao _storagePoolDao;
|
||||||
private HostDao _hostDao;
|
@Inject private StoragePoolDetailsDao _storagePoolDetailsDao;
|
||||||
@Inject
|
@Inject private StoragePoolHostDao storagePoolHostDao;
|
||||||
private StoragePoolHostDao storagePoolHostDao;
|
@Inject private VMInstanceDao _vmDao;
|
||||||
|
@Inject private VolumeDao _volumeDao;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hostAdded(long hostId) {
|
||||||
|
HostVO host = _hostDao.findById(hostId);
|
||||||
|
|
||||||
|
SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, host.getClusterId(), true, SolidFireUtil.PROVIDER_NAME,
|
||||||
|
_clusterDao, _clusterDetailsDao, _storagePoolDao, _storagePoolDetailsDao, _hostDao);
|
||||||
|
|
||||||
|
handleVMware(host, true);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean hostConnect(long hostId, long storagePoolId) {
|
public boolean hostConnect(long hostId, long storagePoolId) {
|
||||||
@ -65,33 +94,13 @@ public class SolidFireHostListener implements HypervisorHostListener {
|
|||||||
storagePoolHostDao.persist(storagePoolHost);
|
storagePoolHostDao.persist(storagePoolHost);
|
||||||
}
|
}
|
||||||
|
|
||||||
// just want to send the ModifyStoragePoolCommand for KVM
|
if (host.getHypervisorType().equals(HypervisorType.XenServer)) {
|
||||||
if (host.getHypervisorType() != HypervisorType.KVM) {
|
handleXenServer(host.getClusterId(), host.getId(), storagePoolId);
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
else if (host.getHypervisorType().equals(HypervisorType.KVM)) {
|
||||||
StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
|
handleKVM(hostId, storagePoolId);
|
||||||
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool);
|
|
||||||
|
|
||||||
Answer answer = _agentMgr.easySend(hostId, cmd);
|
|
||||||
|
|
||||||
if (answer == null) {
|
|
||||||
throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command (" + storagePool.getId() + ")");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!answer.getResult()) {
|
|
||||||
String msg = "Unable to attach storage pool " + storagePoolId + " to host " + hostId;
|
|
||||||
|
|
||||||
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg);
|
|
||||||
|
|
||||||
throw new CloudRuntimeException("Unable to establish a connection from agent to storage pool " + storagePool.getId() + " due to " + answer.getDetails() +
|
|
||||||
" (" + storagePool.getId() + ")");
|
|
||||||
}
|
|
||||||
|
|
||||||
assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; Pool = " + storagePool.getId() + " Host = " + hostId;
|
|
||||||
|
|
||||||
s_logger.info("Connection established between storage pool " + storagePool + " and host + " + hostId);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -105,4 +114,171 @@ public class SolidFireHostListener implements HypervisorHostListener {
|
|||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hostAboutToBeRemoved(long hostId) {
|
||||||
|
HostVO host = _hostDao.findById(hostId);
|
||||||
|
|
||||||
|
handleVMware(host, false);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hostRemoved(long hostId, long clusterId) {
|
||||||
|
SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, clusterId, false, SolidFireUtil.PROVIDER_NAME,
|
||||||
|
_clusterDao, _clusterDetailsDao, _storagePoolDao, _storagePoolDetailsDao, _hostDao);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void handleXenServer(long clusterId, long hostId, long storagePoolId) {
|
||||||
|
List<String> storagePaths = getStoragePaths(clusterId, storagePoolId);
|
||||||
|
|
||||||
|
StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
|
||||||
|
|
||||||
|
for (String storagePath : storagePaths) {
|
||||||
|
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool);
|
||||||
|
|
||||||
|
cmd.setStoragePath(storagePath);
|
||||||
|
|
||||||
|
sendModifyStoragePoolCommand(cmd, storagePool, hostId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void handleVMware(HostVO host, boolean add) {
|
||||||
|
if (HypervisorType.VMware.equals(host.getHypervisorType())) {
|
||||||
|
List<StoragePoolVO> storagePools = _storagePoolDao.findPoolsByProvider(SolidFireUtil.PROVIDER_NAME);
|
||||||
|
|
||||||
|
if (storagePools != null && storagePools.size() > 0) {
|
||||||
|
List<Map<String, String>> targets = new ArrayList<>();
|
||||||
|
|
||||||
|
for (StoragePoolVO storagePool : storagePools) {
|
||||||
|
List<Map<String, String>> targetsForClusterAndStoragePool = getTargets(host.getClusterId(), storagePool.getId());
|
||||||
|
|
||||||
|
targets.addAll(targetsForClusterAndStoragePool);
|
||||||
|
}
|
||||||
|
|
||||||
|
ModifyTargetsCommand cmd = new ModifyTargetsCommand();
|
||||||
|
|
||||||
|
cmd.setAdd(add);
|
||||||
|
cmd.setTargets(targets);
|
||||||
|
|
||||||
|
sendModifyTargetsCommand(cmd, host.getId());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void handleKVM(long hostId, long storagePoolId) {
|
||||||
|
StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
|
||||||
|
|
||||||
|
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool);
|
||||||
|
|
||||||
|
sendModifyStoragePoolCommand(cmd, storagePool, hostId);
|
||||||
|
}
|
||||||
|
|
||||||
|
private List<String> getStoragePaths(long clusterId, long storagePoolId) {
|
||||||
|
List<String> storagePaths = new ArrayList<>();
|
||||||
|
|
||||||
|
// If you do not pass in null for the second parameter, you only get back applicable ROOT disks.
|
||||||
|
List<VolumeVO> volumes = _volumeDao.findByPoolId(storagePoolId, null);
|
||||||
|
|
||||||
|
if (volumes != null) {
|
||||||
|
for (VolumeVO volume : volumes) {
|
||||||
|
Long instanceId = volume.getInstanceId();
|
||||||
|
|
||||||
|
if (instanceId != null) {
|
||||||
|
VMInstanceVO vmInstance = _vmDao.findById(instanceId);
|
||||||
|
|
||||||
|
Long hostIdForVm = vmInstance.getHostId() != null ? vmInstance.getHostId() : vmInstance.getLastHostId();
|
||||||
|
|
||||||
|
if (hostIdForVm != null) {
|
||||||
|
HostVO hostForVm = _hostDao.findById(hostIdForVm);
|
||||||
|
|
||||||
|
if (hostForVm.getClusterId().equals(clusterId)) {
|
||||||
|
storagePaths.add(volume.get_iScsiName());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return storagePaths;
|
||||||
|
}
|
||||||
|
|
||||||
|
private List<Map<String, String>> getTargets(long clusterId, long storagePoolId) {
|
||||||
|
List<Map<String, String>> targets = new ArrayList<>();
|
||||||
|
|
||||||
|
StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
|
||||||
|
|
||||||
|
// If you do not pass in null for the second parameter, you only get back applicable ROOT disks.
|
||||||
|
List<VolumeVO> volumes = _volumeDao.findByPoolId(storagePoolId, null);
|
||||||
|
|
||||||
|
if (volumes != null) {
|
||||||
|
for (VolumeVO volume : volumes) {
|
||||||
|
Long instanceId = volume.getInstanceId();
|
||||||
|
|
||||||
|
if (instanceId != null) {
|
||||||
|
VMInstanceVO vmInstance = _vmDao.findById(instanceId);
|
||||||
|
|
||||||
|
Long hostIdForVm = vmInstance.getHostId() != null ? vmInstance.getHostId() : vmInstance.getLastHostId();
|
||||||
|
|
||||||
|
if (hostIdForVm != null) {
|
||||||
|
HostVO hostForVm = _hostDao.findById(hostIdForVm);
|
||||||
|
|
||||||
|
if (hostForVm.getClusterId().equals(clusterId)) {
|
||||||
|
Map<String, String> details = new HashMap<>();
|
||||||
|
|
||||||
|
details.put(ModifyTargetsCommand.IQN, volume.get_iScsiName());
|
||||||
|
details.put(ModifyTargetsCommand.STORAGE_HOST, storagePool.getHostAddress());
|
||||||
|
details.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePool.getPort()));
|
||||||
|
|
||||||
|
targets.add(details);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return targets;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) {
|
||||||
|
Answer answer = _agentMgr.easySend(hostId, cmd);
|
||||||
|
|
||||||
|
if (answer == null) {
|
||||||
|
throw new CloudRuntimeException("Unable to get an answer to the modify targets command");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!answer.getResult()) {
|
||||||
|
String msg = "Unable to modify targets on the following host: " + hostId;
|
||||||
|
|
||||||
|
HostVO host = _hostDao.findById(hostId);
|
||||||
|
|
||||||
|
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), msg, msg);
|
||||||
|
|
||||||
|
throw new CloudRuntimeException(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, long hostId) {
|
||||||
|
Answer answer = _agentMgr.easySend(hostId, cmd);
|
||||||
|
|
||||||
|
if (answer == null) {
|
||||||
|
throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command (" + storagePool.getId() + ")");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!answer.getResult()) {
|
||||||
|
String msg = "Unable to attach storage pool " + storagePool.getId() + " to host " + hostId;
|
||||||
|
|
||||||
|
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg);
|
||||||
|
|
||||||
|
throw new CloudRuntimeException("Unable to establish a connection from agent to storage pool " + storagePool.getId() + " due to " + answer.getDetails() +
|
||||||
|
" (" + storagePool.getId() + ")");
|
||||||
|
}
|
||||||
|
|
||||||
|
assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; Pool = " + storagePool.getId() + " Host = " + hostId;
|
||||||
|
|
||||||
|
s_logger.info("Connection established between storage pool " + storagePool + " and host + " + hostId);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -18,18 +18,33 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cloudstack.storage.datastore.provider;
|
package org.apache.cloudstack.storage.datastore.provider;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
||||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
|
import org.apache.cloudstack.storage.datastore.util.SolidFireUtil;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
import com.cloud.agent.AgentManager;
|
import com.cloud.agent.AgentManager;
|
||||||
import com.cloud.agent.api.Answer;
|
import com.cloud.agent.api.Answer;
|
||||||
import com.cloud.agent.api.ModifyStoragePoolAnswer;
|
import com.cloud.agent.api.ModifyStoragePoolAnswer;
|
||||||
import com.cloud.agent.api.ModifyStoragePoolCommand;
|
import com.cloud.agent.api.ModifyStoragePoolCommand;
|
||||||
|
import com.cloud.agent.api.ModifyTargetsCommand;
|
||||||
import com.cloud.alert.AlertManager;
|
import com.cloud.alert.AlertManager;
|
||||||
|
import com.cloud.dc.ClusterDetailsDao;
|
||||||
|
import com.cloud.dc.dao.ClusterDao;
|
||||||
|
import com.cloud.host.HostVO;
|
||||||
|
import com.cloud.host.dao.HostDao;
|
||||||
|
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||||
import com.cloud.storage.DataStoreRole;
|
import com.cloud.storage.DataStoreRole;
|
||||||
import com.cloud.storage.StoragePool;
|
import com.cloud.storage.StoragePool;
|
||||||
import com.cloud.storage.StoragePoolHostVO;
|
import com.cloud.storage.StoragePoolHostVO;
|
||||||
@ -37,56 +52,168 @@ import com.cloud.storage.dao.StoragePoolHostDao;
|
|||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
|
||||||
public class SolidFireSharedHostListener implements HypervisorHostListener {
|
public class SolidFireSharedHostListener implements HypervisorHostListener {
|
||||||
private static final Logger s_logger = Logger.getLogger(DefaultHostListener.class);
|
private static final Logger s_logger = Logger.getLogger(SolidFireSharedHostListener.class);
|
||||||
|
|
||||||
@Inject private AgentManager agentMgr;
|
@Inject private AgentManager _agentMgr;
|
||||||
@Inject private DataStoreManager dataStoreMgr;
|
@Inject private AlertManager _alertMgr;
|
||||||
@Inject private AlertManager alertMgr;
|
@Inject private ClusterDao _clusterDao;
|
||||||
@Inject private StoragePoolHostDao storagePoolHostDao;
|
@Inject private ClusterDetailsDao _clusterDetailsDao;
|
||||||
@Inject private PrimaryDataStoreDao primaryStoreDao;
|
@Inject private DataStoreManager _dataStoreMgr;
|
||||||
|
@Inject private HostDao _hostDao;
|
||||||
|
@Inject private PrimaryDataStoreDao _storagePoolDao;
|
||||||
|
@Inject private StoragePoolHostDao _storagePoolHostDao;
|
||||||
|
@Inject private StoragePoolDetailsDao _storagePoolDetailsDao;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hostAdded(long hostId) {
|
||||||
|
HostVO host = _hostDao.findById(hostId);
|
||||||
|
|
||||||
|
SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, host.getClusterId(), true, SolidFireUtil.SHARED_PROVIDER_NAME,
|
||||||
|
_clusterDao, _clusterDetailsDao, _storagePoolDao, _storagePoolDetailsDao, _hostDao);
|
||||||
|
|
||||||
|
handleVMware(hostId, true);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean hostConnect(long hostId, long storagePoolId) {
|
public boolean hostConnect(long hostId, long storagePoolId) {
|
||||||
StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId);
|
StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
|
||||||
|
|
||||||
if (storagePoolHost == null) {
|
|
||||||
storagePoolHost = new StoragePoolHostVO(storagePoolId, hostId, "");
|
|
||||||
|
|
||||||
storagePoolHostDao.persist(storagePoolHost);
|
|
||||||
}
|
|
||||||
|
|
||||||
StoragePool storagePool = (StoragePool)dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
|
|
||||||
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool);
|
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool);
|
||||||
Answer answer = agentMgr.easySend(hostId, cmd);
|
|
||||||
|
|
||||||
if (answer == null) {
|
ModifyStoragePoolAnswer answer = sendModifyStoragePoolCommand(cmd, storagePool, hostId);
|
||||||
throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command for storage pool: " + storagePool.getId());
|
|
||||||
|
StoragePoolHostVO storagePoolHost = _storagePoolHostDao.findByPoolHost(storagePoolId, hostId);
|
||||||
|
|
||||||
|
if (storagePoolHost != null) {
|
||||||
|
storagePoolHost.setLocalPath(answer.getPoolInfo().getLocalPath().replaceAll("//", "/"));
|
||||||
|
} else {
|
||||||
|
storagePoolHost = new StoragePoolHostVO(storagePoolId, hostId, answer.getPoolInfo().getLocalPath().replaceAll("//", "/"));
|
||||||
|
|
||||||
|
_storagePoolHostDao.persist(storagePoolHost);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!answer.getResult()) {
|
StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId);
|
||||||
String msg = "Unable to attach storage pool " + storagePoolId + " to the host " + hostId;
|
|
||||||
|
|
||||||
alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg);
|
storagePoolVO.setCapacityBytes(answer.getPoolInfo().getCapacityBytes());
|
||||||
|
storagePoolVO.setUsedBytes(answer.getPoolInfo().getCapacityBytes() - answer.getPoolInfo().getAvailableBytes());
|
||||||
|
|
||||||
throw new CloudRuntimeException(msg);
|
_storagePoolDao.update(storagePoolId, storagePoolVO);
|
||||||
}
|
|
||||||
|
|
||||||
assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer not returned from ModifyStoragePoolCommand; Storage pool = " +
|
|
||||||
storagePool.getId() + "; Host=" + hostId;
|
|
||||||
|
|
||||||
s_logger.info("Connection established between storage pool " + storagePool + " and host + " + hostId);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean hostDisconnected(long hostId, long storagePoolId) {
|
public boolean hostDisconnected(long hostId, long storagePoolId) {
|
||||||
StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId);
|
StoragePoolHostVO storagePoolHost = _storagePoolHostDao.findByPoolHost(storagePoolId, hostId);
|
||||||
|
|
||||||
if (storagePoolHost != null) {
|
if (storagePoolHost != null) {
|
||||||
storagePoolHostDao.deleteStoragePoolHostDetails(hostId, storagePoolId);
|
_storagePoolHostDao.deleteStoragePoolHostDetails(hostId, storagePoolId);
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hostAboutToBeRemoved(long hostId) {
|
||||||
|
handleVMware(hostId, false);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hostRemoved(long hostId, long clusterId) {
|
||||||
|
SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, clusterId, false, SolidFireUtil.SHARED_PROVIDER_NAME,
|
||||||
|
_clusterDao, _clusterDetailsDao, _storagePoolDao, _storagePoolDetailsDao, _hostDao);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void handleVMware(long hostId, boolean add) {
|
||||||
|
HostVO host = _hostDao.findById(hostId);
|
||||||
|
|
||||||
|
if (HypervisorType.VMware.equals(host.getHypervisorType())) {
|
||||||
|
List<StoragePoolVO> storagePools = _storagePoolDao.findPoolsByProvider(SolidFireUtil.SHARED_PROVIDER_NAME);
|
||||||
|
|
||||||
|
if (storagePools != null && storagePools.size() > 0) {
|
||||||
|
List<Map<String, String>> targets = new ArrayList<>();
|
||||||
|
|
||||||
|
for (StoragePoolVO storagePool : storagePools) {
|
||||||
|
if (storagePool.getClusterId().equals(host.getClusterId())) {
|
||||||
|
long storagePoolId = storagePool.getId();
|
||||||
|
|
||||||
|
StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.IQN);
|
||||||
|
|
||||||
|
String iqn = storagePoolDetail.getValue();
|
||||||
|
|
||||||
|
storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_VIP);
|
||||||
|
|
||||||
|
String sVip = storagePoolDetail.getValue();
|
||||||
|
|
||||||
|
storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_PORT);
|
||||||
|
|
||||||
|
String sPort = storagePoolDetail.getValue();
|
||||||
|
|
||||||
|
Map<String, String> details = new HashMap<>();
|
||||||
|
|
||||||
|
details.put(ModifyTargetsCommand.IQN, iqn);
|
||||||
|
details.put(ModifyTargetsCommand.STORAGE_HOST, sVip);
|
||||||
|
details.put(ModifyTargetsCommand.STORAGE_PORT, sPort);
|
||||||
|
|
||||||
|
targets.add(details);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (targets.size() > 0) {
|
||||||
|
ModifyTargetsCommand cmd = new ModifyTargetsCommand();
|
||||||
|
|
||||||
|
cmd.setAdd(add);
|
||||||
|
cmd.setTargets(targets);
|
||||||
|
|
||||||
|
sendModifyTargetsCommand(cmd, hostId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) {
|
||||||
|
Answer answer = _agentMgr.easySend(hostId, cmd);
|
||||||
|
|
||||||
|
if (answer == null) {
|
||||||
|
throw new CloudRuntimeException("Unable to get an answer to the modify targets command");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!answer.getResult()) {
|
||||||
|
String msg = "Unable to modify targets on the following host: " + hostId;
|
||||||
|
|
||||||
|
HostVO host = _hostDao.findById(hostId);
|
||||||
|
|
||||||
|
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), msg, msg);
|
||||||
|
|
||||||
|
throw new CloudRuntimeException(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private ModifyStoragePoolAnswer sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, long hostId) {
|
||||||
|
Answer answer = _agentMgr.easySend(hostId, cmd);
|
||||||
|
|
||||||
|
if (answer == null) {
|
||||||
|
throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command for storage pool: " + storagePool.getId());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!answer.getResult()) {
|
||||||
|
String msg = "Unable to attach storage pool " + storagePool.getId() + " to the host " + hostId;
|
||||||
|
|
||||||
|
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg);
|
||||||
|
|
||||||
|
throw new CloudRuntimeException(msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer not returned from ModifyStoragePoolCommand; Storage pool = " +
|
||||||
|
storagePool.getId() + "; Host = " + hostId;
|
||||||
|
|
||||||
|
s_logger.info("Connection established between storage pool " + storagePool + " and host " + hostId);
|
||||||
|
|
||||||
|
return (ModifyStoragePoolAnswer)answer;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -28,6 +28,7 @@ import java.security.SecureRandom;
|
|||||||
import java.security.cert.CertificateException;
|
import java.security.cert.CertificateException;
|
||||||
import java.security.cert.X509Certificate;
|
import java.security.cert.X509Certificate;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
@ -58,14 +59,19 @@ import com.google.gson.GsonBuilder;
|
|||||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
import org.apache.cloudstack.utils.security.SSLUtils;
|
import org.apache.cloudstack.utils.security.SSLUtils;
|
||||||
|
|
||||||
import com.cloud.dc.ClusterDetailsDao;
|
import com.cloud.dc.ClusterDetailsDao;
|
||||||
import com.cloud.dc.ClusterDetailsVO;
|
import com.cloud.dc.ClusterDetailsVO;
|
||||||
|
import com.cloud.dc.ClusterVO;
|
||||||
|
import com.cloud.dc.dao.ClusterDao;
|
||||||
import com.cloud.host.Host;
|
import com.cloud.host.Host;
|
||||||
import com.cloud.host.HostVO;
|
import com.cloud.host.HostVO;
|
||||||
|
import com.cloud.host.dao.HostDao;
|
||||||
import com.cloud.user.AccountDetailVO;
|
import com.cloud.user.AccountDetailVO;
|
||||||
import com.cloud.user.AccountDetailsDao;
|
import com.cloud.user.AccountDetailsDao;
|
||||||
|
import com.cloud.utils.db.GlobalLock;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
|
||||||
public class SolidFireUtil {
|
public class SolidFireUtil {
|
||||||
@ -73,6 +79,8 @@ public class SolidFireUtil {
|
|||||||
public static final String PROVIDER_NAME = "SolidFire";
|
public static final String PROVIDER_NAME = "SolidFire";
|
||||||
public static final String SHARED_PROVIDER_NAME = "SolidFireShared";
|
public static final String SHARED_PROVIDER_NAME = "SolidFireShared";
|
||||||
|
|
||||||
|
public static final int s_lockTimeInSeconds = 300;
|
||||||
|
|
||||||
public static final String LOG_PREFIX = "SolidFire: ";
|
public static final String LOG_PREFIX = "SolidFire: ";
|
||||||
|
|
||||||
public static final String MANAGEMENT_VIP = "mVip";
|
public static final String MANAGEMENT_VIP = "mVip";
|
||||||
@ -124,6 +132,22 @@ public class SolidFireUtil {
|
|||||||
private final String _clusterAdminPassword;
|
private final String _clusterAdminPassword;
|
||||||
|
|
||||||
public SolidFireConnection(String managementVip, int managementPort, String clusterAdminUsername, String clusterAdminPassword) {
|
public SolidFireConnection(String managementVip, int managementPort, String clusterAdminUsername, String clusterAdminPassword) {
|
||||||
|
if (managementVip == null) {
|
||||||
|
throw new CloudRuntimeException("The management VIP cannot be 'null'.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (managementPort <= 0) {
|
||||||
|
throw new CloudRuntimeException("The management port must be a positive integer.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (clusterAdminUsername == null) {
|
||||||
|
throw new CloudRuntimeException("The cluster admin username cannot be 'null'.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (clusterAdminPassword == null) {
|
||||||
|
throw new CloudRuntimeException("The cluster admin password cannot be 'null'.");
|
||||||
|
}
|
||||||
|
|
||||||
_managementVip = managementVip;
|
_managementVip = managementVip;
|
||||||
_managementPort = managementPort;
|
_managementPort = managementPort;
|
||||||
_clusterAdminUsername = clusterAdminUsername;
|
_clusterAdminUsername = clusterAdminUsername;
|
||||||
@ -145,6 +169,22 @@ public class SolidFireUtil {
|
|||||||
public String getClusterAdminPassword() {
|
public String getClusterAdminPassword() {
|
||||||
return _clusterAdminPassword;
|
return _clusterAdminPassword;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return _managementVip.hashCode();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object obj) {
|
||||||
|
if (!(obj instanceof SolidFireConnection)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
SolidFireConnection sfConnection = (SolidFireConnection)obj;
|
||||||
|
|
||||||
|
return _managementVip.equals(sfConnection.getManagementVip());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static SolidFireConnection getSolidFireConnection(long storagePoolId, StoragePoolDetailsDao storagePoolDetailsDao) {
|
public static SolidFireConnection getSolidFireConnection(long storagePoolId, StoragePoolDetailsDao storagePoolDetailsDao) {
|
||||||
@ -238,6 +278,58 @@ public class SolidFireUtil {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static void hostAddedToOrRemovedFromCluster(long hostId, long clusterId, boolean added, String storageProvider,
|
||||||
|
ClusterDao clusterDao, ClusterDetailsDao clusterDetailsDao, PrimaryDataStoreDao storagePoolDao, StoragePoolDetailsDao storagePoolDetailsDao, HostDao hostDao) {
|
||||||
|
ClusterVO cluster = clusterDao.findById(clusterId);
|
||||||
|
|
||||||
|
GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid());
|
||||||
|
|
||||||
|
if (!lock.lock(s_lockTimeInSeconds)) {
|
||||||
|
String errMsg = "Couldn't lock the DB on the following string: " + cluster.getUuid();
|
||||||
|
|
||||||
|
s_logger.debug(errMsg);
|
||||||
|
|
||||||
|
throw new CloudRuntimeException(errMsg);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
List<StoragePoolVO> storagePools = storagePoolDao.findPoolsByProvider(storageProvider);
|
||||||
|
|
||||||
|
if (storagePools != null && storagePools.size() > 0) {
|
||||||
|
List<SolidFireUtil.SolidFireConnection> sfConnections = new ArrayList<SolidFireUtil.SolidFireConnection>();
|
||||||
|
|
||||||
|
for (StoragePoolVO storagePool : storagePools) {
|
||||||
|
ClusterDetailsVO clusterDetail = clusterDetailsDao.findDetail(clusterId, SolidFireUtil.getVagKey(storagePool.getId()));
|
||||||
|
|
||||||
|
String vagId = clusterDetail != null ? clusterDetail.getValue() : null;
|
||||||
|
|
||||||
|
if (vagId != null) {
|
||||||
|
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePool.getId(), storagePoolDetailsDao);
|
||||||
|
|
||||||
|
if (!sfConnections.contains(sfConnection)) {
|
||||||
|
sfConnections.add(sfConnection);
|
||||||
|
|
||||||
|
SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getSolidFireVag(sfConnection, Long.parseLong(vagId));
|
||||||
|
|
||||||
|
List<HostVO> hostsToAddOrRemove = new ArrayList<>();
|
||||||
|
HostVO hostToAddOrRemove = hostDao.findByIdIncludingRemoved(hostId);
|
||||||
|
|
||||||
|
hostsToAddOrRemove.add(hostToAddOrRemove);
|
||||||
|
|
||||||
|
String[] hostIqns = SolidFireUtil.getNewHostIqns(sfVag.getInitiators(), SolidFireUtil.getIqnsFromHosts(hostsToAddOrRemove), added);
|
||||||
|
|
||||||
|
SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), hostIqns, sfVag.getVolumeIds());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
lock.unlock();
|
||||||
|
lock.releaseRef();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public static long placeVolumeInVolumeAccessGroup(SolidFireConnection sfConnection, long sfVolumeId, long storagePoolId,
|
public static long placeVolumeInVolumeAccessGroup(SolidFireConnection sfConnection, long sfVolumeId, long storagePoolId,
|
||||||
String vagUuid, List<HostVO> hosts, ClusterDetailsDao clusterDetailsDao) {
|
String vagUuid, List<HostVO> hosts, ClusterDetailsDao clusterDetailsDao) {
|
||||||
if (hosts == null || hosts.isEmpty()) {
|
if (hosts == null || hosts.isEmpty()) {
|
||||||
@ -264,8 +356,7 @@ public class SolidFireUtil {
|
|||||||
|
|
||||||
long[] volumeIds = getNewVolumeIds(sfVag.getVolumeIds(), sfVolumeId, true);
|
long[] volumeIds = getNewVolumeIds(sfVag.getVolumeIds(), sfVolumeId, true);
|
||||||
|
|
||||||
SolidFireUtil.modifySolidFireVag(sfConnection, lVagId,
|
SolidFireUtil.modifySolidFireVag(sfConnection, lVagId, sfVag.getInitiators(), volumeIds);
|
||||||
sfVag.getInitiators(), volumeIds);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ClusterDetailsVO clusterDetail = new ClusterDetailsVO(hosts.get(0).getClusterId(), getVagKey(storagePoolId), String.valueOf(lVagId));
|
ClusterDetailsVO clusterDetail = new ClusterDetailsVO(hosts.get(0).getClusterId(), getVagKey(storagePoolId), String.valueOf(lVagId));
|
||||||
@ -289,20 +380,34 @@ public class SolidFireUtil {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String[] getNewHostIqns(String[] currentIqns, String[] newIqns) {
|
public static String[] getNewHostIqns(String[] iqns, String[] iqnsToAddOrRemove, boolean add) {
|
||||||
List<String> lstIqns = new ArrayList<String>();
|
if (add) {
|
||||||
|
return getNewHostIqnsAdd(iqns, iqnsToAddOrRemove);
|
||||||
|
}
|
||||||
|
|
||||||
if (currentIqns != null) {
|
return getNewHostIqnsRemove(iqns, iqnsToAddOrRemove);
|
||||||
for (String currentIqn : currentIqns) {
|
}
|
||||||
lstIqns.add(currentIqn);
|
|
||||||
|
private static String[] getNewHostIqnsAdd(String[] iqns, String[] iqnsToAdd) {
|
||||||
|
List<String> lstIqns = iqns != null ? new ArrayList<>(Arrays.asList(iqns)) : new ArrayList<String>();
|
||||||
|
|
||||||
|
if (iqnsToAdd != null) {
|
||||||
|
for (String iqnToAdd : iqnsToAdd) {
|
||||||
|
if (!lstIqns.contains(iqnToAdd)) {
|
||||||
|
lstIqns.add(iqnToAdd);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (newIqns != null) {
|
return lstIqns.toArray(new String[0]);
|
||||||
for (String newIqn : newIqns) {
|
}
|
||||||
if (!lstIqns.contains(newIqn)) {
|
|
||||||
lstIqns.add(newIqn);
|
private static String[] getNewHostIqnsRemove(String[] iqns, String[] iqnsToRemove) {
|
||||||
}
|
List<String> lstIqns = iqns != null ? new ArrayList<>(Arrays.asList(iqns)) : new ArrayList<String>();
|
||||||
|
|
||||||
|
if (iqnsToRemove != null) {
|
||||||
|
for (String iqnToRemove : iqnsToRemove) {
|
||||||
|
lstIqns.remove(iqnToRemove);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -981,6 +981,10 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
@ -993,6 +997,14 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isRecurring() {
|
public boolean isRecurring() {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
|
|||||||
@ -55,6 +55,10 @@ public class ComputeCapacityListener implements Listener {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host server, StartupCommand startup, boolean forRebalance) throws ConnectionException {
|
public void processConnect(Host server, StartupCommand startup, boolean forRebalance) throws ConnectionException {
|
||||||
if (!(startup instanceof StartupRoutingCommand)) {
|
if (!(startup instanceof StartupRoutingCommand)) {
|
||||||
@ -68,6 +72,14 @@ public class ComputeCapacityListener implements Listener {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isRecurring() {
|
public boolean isRecurring() {
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -58,6 +58,10 @@ public class StorageCapacityListener implements Listener {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host server, StartupCommand startup, boolean forRebalance) throws ConnectionException {
|
public void processConnect(Host server, StartupCommand startup, boolean forRebalance) throws ConnectionException {
|
||||||
|
|
||||||
@ -81,6 +85,14 @@ public class StorageCapacityListener implements Listener {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isRecurring() {
|
public boolean isRecurring() {
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -63,6 +63,10 @@ public class ConsoleProxyListener implements Listener {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) {
|
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) {
|
||||||
_proxyMgr.onAgentConnect(host, cmd);
|
_proxyMgr.onAgentConnect(host, cmd);
|
||||||
@ -78,6 +82,14 @@ public class ConsoleProxyListener implements Listener {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean processTimeout(long agentId, long seq) {
|
public boolean processTimeout(long agentId, long seq) {
|
||||||
return true;
|
return true;
|
||||||
|
|||||||
@ -899,6 +899,10 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
||||||
if (!(cmd instanceof StartupRoutingCommand)) {
|
if (!(cmd instanceof StartupRoutingCommand)) {
|
||||||
@ -920,6 +924,14 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isRecurring() {
|
public boolean isRecurring() {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
|
|||||||
@ -85,6 +85,10 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) {
|
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) {
|
||||||
}
|
}
|
||||||
@ -95,6 +99,14 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isRecurring() {
|
public boolean isRecurring() {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
|
|||||||
@ -483,6 +483,10 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) {
|
public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) {
|
||||||
if (cmd instanceof StartupTrafficMonitorCommand) {
|
if (cmd instanceof StartupTrafficMonitorCommand) {
|
||||||
@ -498,6 +502,14 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean processTimeout(long agentId, long seq) {
|
public boolean processTimeout(long agentId, long seq) {
|
||||||
return true;
|
return true;
|
||||||
|
|||||||
@ -40,12 +40,10 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
|||||||
public class SshKeysDistriMonitor implements Listener {
|
public class SshKeysDistriMonitor implements Listener {
|
||||||
private static final Logger s_logger = Logger.getLogger(SshKeysDistriMonitor.class);
|
private static final Logger s_logger = Logger.getLogger(SshKeysDistriMonitor.class);
|
||||||
AgentManager _agentMgr;
|
AgentManager _agentMgr;
|
||||||
private final HostDao _hostDao;
|
|
||||||
private ConfigurationDao _configDao;
|
private ConfigurationDao _configDao;
|
||||||
|
|
||||||
public SshKeysDistriMonitor(AgentManager mgr, HostDao host, ConfigurationDao config) {
|
public SshKeysDistriMonitor(AgentManager mgr, HostDao host, ConfigurationDao config) {
|
||||||
this._agentMgr = mgr;
|
_agentMgr = mgr;
|
||||||
_hostDao = host;
|
|
||||||
_configDao = config;
|
_configDao = config;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -67,6 +65,18 @@ public class SshKeysDistriMonitor implements Listener {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
||||||
if (cmd instanceof StartupRoutingCommand) {
|
if (cmd instanceof StartupRoutingCommand) {
|
||||||
|
|||||||
@ -251,159 +251,98 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V
|
|||||||
Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualMachine> {
|
Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualMachine> {
|
||||||
private static final Logger s_logger = Logger.getLogger(VirtualNetworkApplianceManagerImpl.class);
|
private static final Logger s_logger = Logger.getLogger(VirtualNetworkApplianceManagerImpl.class);
|
||||||
|
|
||||||
@Inject
|
@Inject private EntityManager _entityMgr;
|
||||||
EntityManager _entityMgr;
|
@Inject private DataCenterDao _dcDao;
|
||||||
@Inject
|
@Inject protected VlanDao _vlanDao;
|
||||||
DataCenterDao _dcDao = null;
|
@Inject private FirewallRulesDao _rulesDao;
|
||||||
@Inject
|
@Inject private LoadBalancerDao _loadBalancerDao;
|
||||||
VlanDao _vlanDao = null;
|
@Inject private LoadBalancerVMMapDao _loadBalancerVMMapDao;
|
||||||
@Inject
|
@Inject protected IPAddressDao _ipAddressDao;
|
||||||
FirewallRulesDao _rulesDao = null;
|
@Inject private VMTemplateDao _templateDao;
|
||||||
@Inject
|
@Inject protected DomainRouterDao _routerDao;
|
||||||
LoadBalancerDao _loadBalancerDao = null;
|
@Inject private UserDao _userDao;
|
||||||
@Inject
|
@Inject protected UserStatisticsDao _userStatsDao;
|
||||||
LoadBalancerVMMapDao _loadBalancerVMMapDao = null;
|
@Inject private HostDao _hostDao;
|
||||||
@Inject
|
@Inject private ConfigurationDao _configDao;
|
||||||
IPAddressDao _ipAddressDao = null;
|
@Inject private HostPodDao _podDao;
|
||||||
@Inject
|
@Inject private UserStatsLogDao _userStatsLogDao;
|
||||||
VMTemplateDao _templateDao = null;
|
@Inject protected AgentManager _agentMgr;
|
||||||
@Inject
|
@Inject private AlertManager _alertMgr;
|
||||||
DomainRouterDao _routerDao = null;
|
@Inject private AccountManager _accountMgr;
|
||||||
@Inject
|
@Inject private ConfigurationManager _configMgr;
|
||||||
UserDao _userDao = null;
|
@Inject private ConfigurationServer _configServer;
|
||||||
@Inject
|
@Inject private ServiceOfferingDao _serviceOfferingDao;
|
||||||
UserStatisticsDao _userStatsDao = null;
|
@Inject private UserVmDao _userVmDao;
|
||||||
@Inject
|
@Inject private VMInstanceDao _vmDao;
|
||||||
HostDao _hostDao = null;
|
@Inject private NetworkOfferingDao _networkOfferingDao;
|
||||||
@Inject
|
@Inject private GuestOSDao _guestOSDao;
|
||||||
ConfigurationDao _configDao;
|
@Inject private NetworkOrchestrationService _networkMgr;
|
||||||
@Inject
|
@Inject protected NetworkModel _networkModel;
|
||||||
HostPodDao _podDao = null;
|
@Inject protected VirtualMachineManager _itMgr;
|
||||||
@Inject
|
@Inject private VpnUserDao _vpnUsersDao;
|
||||||
UserStatsLogDao _userStatsLogDao = null;
|
@Inject private RulesManager _rulesMgr;
|
||||||
@Inject
|
@Inject protected NetworkDao _networkDao;
|
||||||
AgentManager _agentMgr;
|
@Inject private LoadBalancingRulesManager _lbMgr;
|
||||||
@Inject
|
@Inject private PortForwardingRulesDao _pfRulesDao;
|
||||||
AlertManager _alertMgr;
|
@Inject protected RemoteAccessVpnDao _vpnDao;
|
||||||
@Inject
|
@Inject protected NicDao _nicDao;
|
||||||
AccountManager _accountMgr;
|
@Inject private NicIpAliasDao _nicIpAliasDao;
|
||||||
@Inject
|
@Inject private VolumeDao _volumeDao;
|
||||||
ConfigurationManager _configMgr;
|
@Inject private UserVmDetailsDao _vmDetailsDao;
|
||||||
@Inject
|
@Inject private ClusterDao _clusterDao;
|
||||||
ConfigurationServer _configServer;
|
@Inject private ResourceManager _resourceMgr;
|
||||||
@Inject
|
@Inject private PhysicalNetworkServiceProviderDao _physicalProviderDao;
|
||||||
ServiceOfferingDao _serviceOfferingDao = null;
|
@Inject protected VirtualRouterProviderDao _vrProviderDao;
|
||||||
@Inject
|
@Inject private ManagementServerHostDao _msHostDao;
|
||||||
UserVmDao _userVmDao;
|
@Inject private Site2SiteCustomerGatewayDao _s2sCustomerGatewayDao;
|
||||||
@Inject
|
@Inject private Site2SiteVpnGatewayDao _s2sVpnGatewayDao;
|
||||||
VMInstanceDao _vmDao;
|
@Inject private Site2SiteVpnConnectionDao _s2sVpnConnectionDao;
|
||||||
@Inject
|
@Inject private Site2SiteVpnManager _s2sVpnMgr;
|
||||||
NetworkOfferingDao _networkOfferingDao = null;
|
@Inject private UserIpv6AddressDao _ipv6Dao;
|
||||||
@Inject
|
@Inject private NetworkService _networkSvc;
|
||||||
GuestOSDao _guestOSDao = null;
|
@Inject private IpAddressManager _ipAddrMgr;
|
||||||
@Inject
|
@Inject private ConfigDepot _configDepot;
|
||||||
NetworkOrchestrationService _networkMgr;
|
@Inject private MonitoringServiceDao _monitorServiceDao;
|
||||||
@Inject
|
@Inject private AsyncJobManager _asyncMgr;
|
||||||
NetworkModel _networkModel;
|
@Inject protected VpcDao _vpcDao;
|
||||||
@Inject
|
@Inject protected ApiAsyncJobDispatcher _asyncDispatcher;
|
||||||
VirtualMachineManager _itMgr;
|
@Inject private OpRouterMonitorServiceDao _opRouterMonitorServiceDao;
|
||||||
@Inject
|
|
||||||
VpnUserDao _vpnUsersDao;
|
|
||||||
@Inject
|
|
||||||
RulesManager _rulesMgr;
|
|
||||||
@Inject
|
|
||||||
NetworkDao _networkDao;
|
|
||||||
@Inject
|
|
||||||
LoadBalancingRulesManager _lbMgr;
|
|
||||||
@Inject
|
|
||||||
PortForwardingRulesDao _pfRulesDao;
|
|
||||||
@Inject
|
|
||||||
RemoteAccessVpnDao _vpnDao;
|
|
||||||
@Inject
|
|
||||||
NicDao _nicDao;
|
|
||||||
@Inject
|
|
||||||
NicIpAliasDao _nicIpAliasDao;
|
|
||||||
@Inject
|
|
||||||
VolumeDao _volumeDao = null;
|
|
||||||
@Inject
|
|
||||||
UserVmDetailsDao _vmDetailsDao;
|
|
||||||
@Inject
|
|
||||||
ClusterDao _clusterDao;
|
|
||||||
@Inject
|
|
||||||
ResourceManager _resourceMgr;
|
|
||||||
@Inject
|
|
||||||
PhysicalNetworkServiceProviderDao _physicalProviderDao;
|
|
||||||
@Inject
|
|
||||||
VirtualRouterProviderDao _vrProviderDao;
|
|
||||||
@Inject
|
|
||||||
ManagementServerHostDao _msHostDao;
|
|
||||||
@Inject
|
|
||||||
Site2SiteCustomerGatewayDao _s2sCustomerGatewayDao;
|
|
||||||
@Inject
|
|
||||||
Site2SiteVpnGatewayDao _s2sVpnGatewayDao;
|
|
||||||
@Inject
|
|
||||||
Site2SiteVpnConnectionDao _s2sVpnConnectionDao;
|
|
||||||
@Inject
|
|
||||||
Site2SiteVpnManager _s2sVpnMgr;
|
|
||||||
@Inject
|
|
||||||
UserIpv6AddressDao _ipv6Dao;
|
|
||||||
@Inject
|
|
||||||
NetworkService _networkSvc;
|
|
||||||
@Inject
|
|
||||||
IpAddressManager _ipAddrMgr;
|
|
||||||
@Inject
|
|
||||||
ConfigDepot _configDepot;
|
|
||||||
@Inject
|
|
||||||
MonitoringServiceDao _monitorServiceDao;
|
|
||||||
@Inject
|
|
||||||
AsyncJobManager _asyncMgr;
|
|
||||||
@Inject
|
|
||||||
protected VpcDao _vpcDao;
|
|
||||||
@Inject
|
|
||||||
protected ApiAsyncJobDispatcher _asyncDispatcher;
|
|
||||||
@Inject
|
|
||||||
OpRouterMonitorServiceDao _opRouterMonitorServiceDao;
|
|
||||||
|
|
||||||
@Inject
|
@Inject protected NetworkTopologyContext _networkTopologyContext;
|
||||||
protected NetworkTopologyContext _networkTopologyContext;
|
|
||||||
|
|
||||||
@Autowired
|
@Autowired
|
||||||
@Qualifier("networkHelper")
|
@Qualifier("networkHelper")
|
||||||
protected NetworkHelper _nwHelper;
|
protected NetworkHelper _nwHelper;
|
||||||
|
|
||||||
@Inject
|
@Inject protected RouterControlHelper _routerControlHelper;
|
||||||
protected RouterControlHelper _routerControlHelper;
|
|
||||||
|
|
||||||
@Inject
|
@Inject protected CommandSetupHelper _commandSetupHelper;
|
||||||
protected CommandSetupHelper _commandSetupHelper;
|
@Inject protected RouterDeploymentDefinitionBuilder _routerDeploymentManagerBuilder;
|
||||||
@Inject
|
|
||||||
protected RouterDeploymentDefinitionBuilder _routerDeploymentManagerBuilder;
|
|
||||||
|
|
||||||
int _routerRamSize;
|
private int _routerRamSize;
|
||||||
int _routerCpuMHz;
|
private int _routerCpuMHz;
|
||||||
int _retry = 2;
|
private String _mgmtCidr;
|
||||||
String _mgmtCidr;
|
|
||||||
|
|
||||||
int _routerStatsInterval = 300;
|
private int _routerStatsInterval = 300;
|
||||||
int _routerCheckInterval = 30;
|
private int _routerCheckInterval = 30;
|
||||||
int _rvrStatusUpdatePoolSize = 10;
|
private int _rvrStatusUpdatePoolSize = 10;
|
||||||
private String _dnsBasicZoneUpdates = "all";
|
private String _dnsBasicZoneUpdates = "all";
|
||||||
private final Set<String> _guestOSNeedGatewayOnNonDefaultNetwork = new HashSet<String>();
|
private final Set<String> _guestOSNeedGatewayOnNonDefaultNetwork = new HashSet<>();
|
||||||
|
|
||||||
private boolean _disableRpFilter = false;
|
private boolean _disableRpFilter = false;
|
||||||
int _routerExtraPublicNics = 2;
|
private int _routerExtraPublicNics = 2;
|
||||||
private int _usageAggregationRange = 1440;
|
private int _usageAggregationRange = 1440;
|
||||||
private String _usageTimeZone = "GMT";
|
private String _usageTimeZone = "GMT";
|
||||||
private final long mgmtSrvrId = MacAddress.getMacAddress().toLong();
|
private final long mgmtSrvrId = MacAddress.getMacAddress().toLong();
|
||||||
private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 5; // 5 seconds
|
private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 5; // 5 seconds
|
||||||
private boolean _dailyOrHourly = false;
|
private boolean _dailyOrHourly = false;
|
||||||
|
|
||||||
ScheduledExecutorService _executor;
|
private ScheduledExecutorService _executor;
|
||||||
ScheduledExecutorService _checkExecutor;
|
private ScheduledExecutorService _checkExecutor;
|
||||||
ScheduledExecutorService _networkStatsUpdateExecutor;
|
private ScheduledExecutorService _networkStatsUpdateExecutor;
|
||||||
ExecutorService _rvrStatusUpdateExecutor;
|
private ExecutorService _rvrStatusUpdateExecutor;
|
||||||
|
|
||||||
BlockingQueue<Long> _vrUpdateQueue = null;
|
private BlockingQueue<Long> _vrUpdateQueue;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public VirtualRouter destroyRouter(final long routerId, final Account caller, final Long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException {
|
public VirtualRouter destroyRouter(final long routerId, final Account caller, final Long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException {
|
||||||
@ -586,10 +525,7 @@ Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualM
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
String value = configs.get("start.retry");
|
String value = configs.get("router.stats.interval");
|
||||||
_retry = NumbersUtil.parseInt(value, 2);
|
|
||||||
|
|
||||||
value = configs.get("router.stats.interval");
|
|
||||||
_routerStatsInterval = NumbersUtil.parseInt(value, 300);
|
_routerStatsInterval = NumbersUtil.parseInt(value, 300);
|
||||||
|
|
||||||
value = configs.get("router.check.interval");
|
value = configs.get("router.check.interval");
|
||||||
@ -1591,9 +1527,6 @@ Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualM
|
|||||||
protected StringBuilder createRedundantRouterArgs(final NicProfile nic, final DomainRouterVO router) {
|
protected StringBuilder createRedundantRouterArgs(final NicProfile nic, final DomainRouterVO router) {
|
||||||
final StringBuilder buf = new StringBuilder();
|
final StringBuilder buf = new StringBuilder();
|
||||||
|
|
||||||
final long networkId = nic.getNetworkId();
|
|
||||||
final NetworkVO network = _networkDao.findById(networkId);
|
|
||||||
|
|
||||||
final boolean isRedundant = router.getIsRedundantRouter();
|
final boolean isRedundant = router.getIsRedundantRouter();
|
||||||
if (isRedundant) {
|
if (isRedundant) {
|
||||||
buf.append(" redundant_router=1");
|
buf.append(" redundant_router=1");
|
||||||
@ -2361,6 +2294,10 @@ Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualM
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(final Host host, final StartupCommand cmd, final boolean forRebalance) throws ConnectionException {
|
public void processConnect(final Host host, final StartupCommand cmd, final boolean forRebalance) throws ConnectionException {
|
||||||
final List<DomainRouterVO> routers = _routerDao.listIsolatedByHostId(host.getId());
|
final List<DomainRouterVO> routers = _routerDao.listIsolatedByHostId(host.getId());
|
||||||
@ -2395,6 +2332,14 @@ Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualM
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean processTimeout(final long agentId, final long seq) {
|
public boolean processTimeout(final long agentId, final long seq) {
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -81,7 +81,7 @@ public class SecurityGroupListener implements Listener {
|
|||||||
@Override
|
@Override
|
||||||
public boolean processAnswers(long agentId, long seq, Answer[] answers) {
|
public boolean processAnswers(long agentId, long seq, Answer[] answers) {
|
||||||
List<Long> affectedVms = new ArrayList<Long>();
|
List<Long> affectedVms = new ArrayList<Long>();
|
||||||
int commandNum = 0;
|
|
||||||
for (Answer ans : answers) {
|
for (Answer ans : answers) {
|
||||||
if (ans instanceof SecurityGroupRuleAnswer) {
|
if (ans instanceof SecurityGroupRuleAnswer) {
|
||||||
SecurityGroupRuleAnswer ruleAnswer = (SecurityGroupRuleAnswer)ans;
|
SecurityGroupRuleAnswer ruleAnswer = (SecurityGroupRuleAnswer)ans;
|
||||||
@ -106,7 +106,7 @@ public class SecurityGroupListener implements Listener {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
commandNum++;
|
|
||||||
if (_workTracker != null)
|
if (_workTracker != null)
|
||||||
_workTracker.processAnswers(agentId, seq, answers);
|
_workTracker.processAnswers(agentId, seq, answers);
|
||||||
}
|
}
|
||||||
@ -151,6 +151,10 @@ public class SecurityGroupListener implements Listener {
|
|||||||
return processed;
|
return processed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) {
|
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) {
|
||||||
if (s_logger.isInfoEnabled())
|
if (s_logger.isInfoEnabled())
|
||||||
@ -188,6 +192,14 @@ public class SecurityGroupListener implements Listener {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean processTimeout(long agentId, long seq) {
|
public boolean processTimeout(long agentId, long seq) {
|
||||||
if (_workTracker != null) {
|
if (_workTracker != null) {
|
||||||
|
|||||||
@ -771,6 +771,9 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||||||
_hostTagsDao.persist(host.getId(), hostTags);
|
_hostTagsDao.persist(host.getId(), hostTags);
|
||||||
}
|
}
|
||||||
hosts.add(host);
|
hosts.add(host);
|
||||||
|
|
||||||
|
_agentMgr.notifyMonitorsOfNewlyAddedHost(host.getId());
|
||||||
|
|
||||||
return hosts;
|
return hosts;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -843,10 +846,13 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
long clusterId = host.getClusterId();
|
||||||
|
|
||||||
|
_agentMgr.notifyMonitorsOfHostAboutToBeRemoved(host.getId());
|
||||||
|
|
||||||
Transaction.execute(new TransactionCallbackNoReturn() {
|
Transaction.execute(new TransactionCallbackNoReturn() {
|
||||||
@Override
|
@Override
|
||||||
public void doInTransactionWithoutResult(final TransactionStatus status) {
|
public void doInTransactionWithoutResult(final TransactionStatus status) {
|
||||||
|
|
||||||
_dcDao.releasePrivateIpAddress(host.getPrivateIpAddress(), host.getDataCenterId(), null);
|
_dcDao.releasePrivateIpAddress(host.getPrivateIpAddress(), host.getDataCenterId(), null);
|
||||||
_agentMgr.disconnectWithoutInvestigation(hostId, Status.Event.Remove);
|
_agentMgr.disconnectWithoutInvestigation(hostId, Status.Event.Remove);
|
||||||
|
|
||||||
@ -920,6 +926,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
_agentMgr.notifyMonitorsOfRemovedHost(host.getId(), clusterId);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1570,17 +1578,35 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private HostVO getNewHost(StartupCommand[] startupCommands) {
|
||||||
|
StartupCommand startupCommand = startupCommands[0];
|
||||||
|
|
||||||
|
HostVO host = findHostByGuid(startupCommand.getGuid());
|
||||||
|
|
||||||
|
if (host != null) {
|
||||||
|
return host;
|
||||||
|
}
|
||||||
|
|
||||||
|
host = findHostByGuid(startupCommand.getGuidWithoutResource());
|
||||||
|
|
||||||
|
if (host != null) {
|
||||||
|
return host;
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
protected HostVO createHostVO(final StartupCommand[] cmds, final ServerResource resource, final Map<String, String> details, List<String> hostTags,
|
protected HostVO createHostVO(final StartupCommand[] cmds, final ServerResource resource, final Map<String, String> details, List<String> hostTags,
|
||||||
final ResourceStateAdapter.Event stateEvent) {
|
final ResourceStateAdapter.Event stateEvent) {
|
||||||
final StartupCommand startup = cmds[0];
|
boolean newHost = false;
|
||||||
HostVO host = findHostByGuid(startup.getGuid());
|
StartupCommand startup = cmds[0];
|
||||||
boolean isNew = false;
|
|
||||||
if (host == null) {
|
HostVO host = getNewHost(cmds);
|
||||||
host = findHostByGuid(startup.getGuidWithoutResource());
|
|
||||||
}
|
|
||||||
if (host == null) {
|
if (host == null) {
|
||||||
host = new HostVO(startup.getGuid());
|
host = new HostVO(startup.getGuid());
|
||||||
isNew = true;
|
|
||||||
|
newHost = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
String dataCenter = startup.getDataCenter();
|
String dataCenter = startup.getDataCenter();
|
||||||
@ -1695,7 +1721,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||||||
throw new CloudRuntimeException("No resource state adapter response");
|
throw new CloudRuntimeException("No resource state adapter response");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isNew) {
|
if (newHost) {
|
||||||
host = _hostDao.persist(host);
|
host = _hostDao.persist(host);
|
||||||
} else {
|
} else {
|
||||||
_hostDao.update(host.getId(), host);
|
_hostDao.update(host.getId(), host);
|
||||||
@ -1794,9 +1820,13 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// find out if the host we want to connect to is new (so we can send an event)
|
||||||
|
boolean newHost = getNewHost(cmds) == null;
|
||||||
|
|
||||||
host = createHostVO(cmds, resource, details, hostTags, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT);
|
host = createHostVO(cmds, resource, details, hostTags, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT);
|
||||||
|
|
||||||
if (host != null) {
|
if (host != null) {
|
||||||
created = _agentMgr.handleDirectConnectAgent(host, cmds, resource, forRebalance);
|
created = _agentMgr.handleDirectConnectAgent(host, cmds, resource, forRebalance, newHost);
|
||||||
/* reload myself from database */
|
/* reload myself from database */
|
||||||
host = _hostDao.findById(host.getId());
|
host = _hostDao.findById(host.getId());
|
||||||
}
|
}
|
||||||
@ -1866,12 +1896,19 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||||||
}
|
}
|
||||||
|
|
||||||
host = null;
|
host = null;
|
||||||
|
boolean newHost = false;
|
||||||
|
|
||||||
final GlobalLock addHostLock = GlobalLock.getInternLock("AddHostLock");
|
final GlobalLock addHostLock = GlobalLock.getInternLock("AddHostLock");
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (addHostLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) {
|
if (addHostLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) {
|
||||||
// to safely determine first host in cluster in multi-MS scenario
|
// to safely determine first host in cluster in multi-MS scenario
|
||||||
try {
|
try {
|
||||||
|
// find out if the host we want to connect to is new (so we can send an event)
|
||||||
|
newHost = getNewHost(cmds) == null;
|
||||||
|
|
||||||
host = createHostVO(cmds, resource, details, hostTags, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT);
|
host = createHostVO(cmds, resource, details, hostTags, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT);
|
||||||
|
|
||||||
if (host != null) {
|
if (host != null) {
|
||||||
// if first host in cluster no need to defer agent creation
|
// if first host in cluster no need to defer agent creation
|
||||||
deferAgentCreation = !isFirstHostInCluster(host);
|
deferAgentCreation = !isFirstHostInCluster(host);
|
||||||
@ -1886,7 +1923,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||||||
|
|
||||||
if (host != null) {
|
if (host != null) {
|
||||||
if (!deferAgentCreation) { // if first host in cluster then
|
if (!deferAgentCreation) { // if first host in cluster then
|
||||||
created = _agentMgr.handleDirectConnectAgent(host, cmds, resource, forRebalance);
|
created = _agentMgr.handleDirectConnectAgent(host, cmds, resource, forRebalance, newHost);
|
||||||
host = _hostDao.findById(host.getId()); // reload
|
host = _hostDao.findById(host.getId()); // reload
|
||||||
} else {
|
} else {
|
||||||
host = _hostDao.findById(host.getId()); // reload
|
host = _hostDao.findById(host.getId()); // reload
|
||||||
|
|||||||
@ -152,6 +152,14 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isRecurring() {
|
public boolean isRecurring() {
|
||||||
return false;
|
return false;
|
||||||
@ -167,6 +175,10 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
||||||
}
|
}
|
||||||
|
|||||||
@ -18,8 +18,6 @@ package com.cloud.storage;
|
|||||||
|
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
|
|
||||||
import org.apache.log4j.Logger;
|
|
||||||
|
|
||||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
|
|
||||||
import com.cloud.agent.Listener;
|
import com.cloud.agent.Listener;
|
||||||
@ -39,17 +37,11 @@ import com.cloud.storage.dao.StoragePoolHostDao;
|
|||||||
import com.cloud.utils.db.DB;
|
import com.cloud.utils.db.DB;
|
||||||
|
|
||||||
public class LocalStoragePoolListener implements Listener {
|
public class LocalStoragePoolListener implements Listener {
|
||||||
private final static Logger s_logger = Logger.getLogger(LocalStoragePoolListener.class);
|
@Inject private PrimaryDataStoreDao _storagePoolDao;
|
||||||
@Inject
|
@Inject private StoragePoolHostDao _storagePoolHostDao;
|
||||||
PrimaryDataStoreDao _storagePoolDao;
|
@Inject private CapacityDao _capacityDao;
|
||||||
@Inject
|
@Inject private StorageManager _storageMgr;
|
||||||
StoragePoolHostDao _storagePoolHostDao;
|
@Inject private DataCenterDao _dcDao;
|
||||||
@Inject
|
|
||||||
CapacityDao _capacityDao;
|
|
||||||
@Inject
|
|
||||||
StorageManager _storageMgr;
|
|
||||||
@Inject
|
|
||||||
DataCenterDao _dcDao;
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int getTimeout() {
|
public int getTimeout() {
|
||||||
@ -71,6 +63,10 @@ public class LocalStoragePoolListener implements Listener {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@DB
|
@DB
|
||||||
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
|
||||||
@ -102,6 +98,14 @@ public class LocalStoragePoolListener implements Listener {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean processTimeout(long agentId, long seq) {
|
public boolean processTimeout(long agentId, long seq) {
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -458,7 +458,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
_storagePoolAcquisitionWaitSeconds = NumbersUtil.parseInt(configs.get("pool.acquisition.wait.seconds"), 1800);
|
_storagePoolAcquisitionWaitSeconds = NumbersUtil.parseInt(configs.get("pool.acquisition.wait.seconds"), 1800);
|
||||||
s_logger.info("pool.acquisition.wait.seconds is configured as " + _storagePoolAcquisitionWaitSeconds + " seconds");
|
s_logger.info("pool.acquisition.wait.seconds is configured as " + _storagePoolAcquisitionWaitSeconds + " seconds");
|
||||||
|
|
||||||
_agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _storagePoolDao), true, false, true);
|
_agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _storagePoolDao, _dataStoreProviderMgr), true, false, true);
|
||||||
|
|
||||||
String value = _configDao.getValue(Config.StorageTemplateCleanupEnabled.key());
|
String value = _configDao.getValue(Config.StorageTemplateCleanupEnabled.key());
|
||||||
_templateCleanupEnabled = (value == null ? true : Boolean.parseBoolean(value));
|
_templateCleanupEnabled = (value == null ? true : Boolean.parseBoolean(value));
|
||||||
|
|||||||
@ -270,9 +270,7 @@ public class DownloadListener implements Listener {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean processDisconnect(long agentId, com.cloud.host.Status state) {
|
public void processHostAdded(long hostId) {
|
||||||
setDisconnected();
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -310,6 +308,20 @@ public class DownloadListener implements Listener {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean processDisconnect(long agentId, com.cloud.host.Status state) {
|
||||||
|
setDisconnected();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
public void setCommand(DownloadCommand cmd) {
|
public void setCommand(DownloadCommand cmd) {
|
||||||
this._cmd = cmd;
|
this._cmd = cmd;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -22,6 +22,10 @@ import javax.inject.Inject;
|
|||||||
|
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
|
||||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
|
|
||||||
@ -46,13 +50,14 @@ public class StoragePoolMonitor implements Listener {
|
|||||||
private static final Logger s_logger = Logger.getLogger(StoragePoolMonitor.class);
|
private static final Logger s_logger = Logger.getLogger(StoragePoolMonitor.class);
|
||||||
private final StorageManagerImpl _storageManager;
|
private final StorageManagerImpl _storageManager;
|
||||||
private final PrimaryDataStoreDao _poolDao;
|
private final PrimaryDataStoreDao _poolDao;
|
||||||
|
private DataStoreProviderManager _dataStoreProviderMgr;
|
||||||
@Inject
|
@Inject
|
||||||
OCFS2Manager _ocfs2Mgr;
|
OCFS2Manager _ocfs2Mgr;
|
||||||
|
|
||||||
public StoragePoolMonitor(StorageManagerImpl mgr, PrimaryDataStoreDao poolDao) {
|
public StoragePoolMonitor(StorageManagerImpl mgr, PrimaryDataStoreDao poolDao, DataStoreProviderManager dataStoreProviderMgr) {
|
||||||
this._storageManager = mgr;
|
_storageManager = mgr;
|
||||||
this._poolDao = poolDao;
|
_poolDao = poolDao;
|
||||||
|
_dataStoreProviderMgr = dataStoreProviderMgr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -66,8 +71,25 @@ public class StoragePoolMonitor implements Listener {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized boolean processDisconnect(long agentId, Status state) {
|
public void processHostAdded(long hostId) {
|
||||||
return true;
|
List<DataStoreProvider> providers = _dataStoreProviderMgr.getProviders();
|
||||||
|
|
||||||
|
if (providers != null) {
|
||||||
|
for (DataStoreProvider provider : providers) {
|
||||||
|
if (provider instanceof PrimaryDataStoreProvider) {
|
||||||
|
try {
|
||||||
|
HypervisorHostListener hypervisorHostListener = provider.getHostListener();
|
||||||
|
|
||||||
|
if (hypervisorHostListener != null) {
|
||||||
|
hypervisorHostListener.hostAdded(hostId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (Exception ex) {
|
||||||
|
s_logger.error("hostAdded(long) failed for storage provider " + provider.getName(), ex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -111,6 +133,55 @@ public class StoragePoolMonitor implements Listener {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized boolean processDisconnect(long agentId, Status state) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
List<DataStoreProvider> providers = _dataStoreProviderMgr.getProviders();
|
||||||
|
|
||||||
|
if (providers != null) {
|
||||||
|
for (DataStoreProvider provider : providers) {
|
||||||
|
if (provider instanceof PrimaryDataStoreProvider) {
|
||||||
|
try {
|
||||||
|
HypervisorHostListener hypervisorHostListener = provider.getHostListener();
|
||||||
|
|
||||||
|
if (hypervisorHostListener != null) {
|
||||||
|
hypervisorHostListener.hostAboutToBeRemoved(hostId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (Exception ex) {
|
||||||
|
s_logger.error("hostAboutToBeRemoved(long) failed for storage provider " + provider.getName(), ex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
List<DataStoreProvider> providers = _dataStoreProviderMgr.getProviders();
|
||||||
|
|
||||||
|
if (providers != null) {
|
||||||
|
for (DataStoreProvider provider : providers) {
|
||||||
|
if (provider instanceof PrimaryDataStoreProvider) {
|
||||||
|
try {
|
||||||
|
HypervisorHostListener hypervisorHostListener = provider.getHostListener();
|
||||||
|
|
||||||
|
if (hypervisorHostListener != null) {
|
||||||
|
hypervisorHostListener.hostRemoved(hostId, clusterId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (Exception ex) {
|
||||||
|
s_logger.error("hostRemoved(long, long) failed for storage provider " + provider.getName(), ex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean processCommands(long agentId, long seq, Command[] req) {
|
public boolean processCommands(long agentId, long seq, Command[] req) {
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -50,6 +50,10 @@ public class StorageSyncListener implements Listener {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) {
|
public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) {
|
||||||
}
|
}
|
||||||
@ -60,6 +64,14 @@ public class StorageSyncListener implements Listener {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean processCommands(long agentId, long seq, Command[] request) {
|
public boolean processCommands(long agentId, long seq, Command[] request) {
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -65,6 +65,10 @@ public class SecondaryStorageListener implements Listener {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) {
|
public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) {
|
||||||
if ((cmd instanceof StartupStorageCommand)) {
|
if ((cmd instanceof StartupStorageCommand)) {
|
||||||
@ -91,6 +95,14 @@ public class SecondaryStorageListener implements Listener {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean processTimeout(long agentId, long seq) {
|
public boolean processTimeout(long agentId, long seq) {
|
||||||
return true;
|
return true;
|
||||||
|
|||||||
@ -113,8 +113,6 @@ public class UploadListener implements Listener {
|
|||||||
|
|
||||||
private DataStore sserver;
|
private DataStore sserver;
|
||||||
|
|
||||||
private boolean uploadActive = true;
|
|
||||||
|
|
||||||
private UploadDao uploadDao;
|
private UploadDao uploadDao;
|
||||||
|
|
||||||
private final UploadMonitorImpl uploadMonitor;
|
private final UploadMonitorImpl uploadMonitor;
|
||||||
@ -250,6 +248,10 @@ public class UploadListener implements Listener {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAdded(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) {
|
public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) {
|
||||||
if (!(cmd instanceof StartupStorageCommand)) {
|
if (!(cmd instanceof StartupStorageCommand)) {
|
||||||
@ -270,7 +272,6 @@ public class UploadListener implements Listener {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public void setUploadInactive(Status reason) {
|
public void setUploadInactive(Status reason) {
|
||||||
uploadActive = false;
|
|
||||||
uploadMonitor.handleUploadEvent(accountId, typeName, type, uploadId, reason, eventId);
|
uploadMonitor.handleUploadEvent(accountId, typeName, type, uploadId, reason, eventId);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -294,6 +295,14 @@ public class UploadListener implements Listener {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostAboutToBeRemoved(long hostId) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processHostRemoved(long hostId, long clusterId) {
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean processTimeout(long agentId, long seq) {
|
public boolean processTimeout(long agentId, long seq) {
|
||||||
return true;
|
return true;
|
||||||
|
|||||||
710
test/integration/plugins/solidfire/TestAddRemoveHosts.py
Normal file
710
test/integration/plugins/solidfire/TestAddRemoveHosts.py
Normal file
@ -0,0 +1,710 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import random
|
||||||
|
import SignedAPICall
|
||||||
|
import time
|
||||||
|
import XenAPI
|
||||||
|
|
||||||
|
# All tests inherit from cloudstackTestCase
|
||||||
|
from marvin.cloudstackTestCase import cloudstackTestCase
|
||||||
|
|
||||||
|
# Import Integration Libraries
|
||||||
|
|
||||||
|
# base - contains all resources as entities and defines create, delete, list operations on them
|
||||||
|
from marvin.lib.base import Account, ServiceOffering, User, Host, StoragePool, VirtualMachine
|
||||||
|
|
||||||
|
# common - commonly used methods for all tests are listed here
|
||||||
|
from marvin.lib.common import get_domain, get_template, get_zone, list_hosts, list_clusters, list_volumes
|
||||||
|
|
||||||
|
# utils - utility classes for common cleanup, external library wrappers, etc.
|
||||||
|
from marvin.lib.utils import cleanup_resources
|
||||||
|
|
||||||
|
from solidfire import solidfire_element_api as sf_api
|
||||||
|
|
||||||
|
|
||||||
|
class TestData:
|
||||||
|
account = "account"
|
||||||
|
capacityBytes = "capacitybytes"
|
||||||
|
capacityIops = "capacityiops"
|
||||||
|
clusterId = "clusterId"
|
||||||
|
computeOffering = "computeoffering"
|
||||||
|
displayText = "displaytext"
|
||||||
|
diskSize = "disksize"
|
||||||
|
domainId = "domainId"
|
||||||
|
hypervisor = "hypervisor"
|
||||||
|
login = "login"
|
||||||
|
mvip = "mvip"
|
||||||
|
name = "name"
|
||||||
|
newHost = "newHost"
|
||||||
|
newHostDisplayName = "newHostDisplayName"
|
||||||
|
osType = "ostype"
|
||||||
|
password = "password"
|
||||||
|
podId = "podid"
|
||||||
|
port = "port"
|
||||||
|
primaryStorage = "primarystorage"
|
||||||
|
primaryStorage2 = "primarystorage2"
|
||||||
|
provider = "provider"
|
||||||
|
scope = "scope"
|
||||||
|
solidFire = "solidfire"
|
||||||
|
storageTag = "SolidFire_SAN_1"
|
||||||
|
storageTag2 = "SolidFire_Volume_1"
|
||||||
|
tags = "tags"
|
||||||
|
url = "url"
|
||||||
|
urlOfNewHost = "urlOfNewHost"
|
||||||
|
user = "user"
|
||||||
|
username = "username"
|
||||||
|
virtualMachine = "virtualmachine"
|
||||||
|
volume_1 = "volume_1"
|
||||||
|
xenServer = "xenserver"
|
||||||
|
zoneId = "zoneid"
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.testdata = {
|
||||||
|
TestData.solidFire: {
|
||||||
|
TestData.mvip: "192.168.139.112",
|
||||||
|
TestData.login: "admin",
|
||||||
|
TestData.password: "admin",
|
||||||
|
TestData.port: 443,
|
||||||
|
TestData.url: "https://192.168.139.112:443"
|
||||||
|
},
|
||||||
|
TestData.xenServer: {
|
||||||
|
TestData.username: "root",
|
||||||
|
TestData.password: "solidfire"
|
||||||
|
},
|
||||||
|
TestData.urlOfNewHost: "https://192.168.129.243",
|
||||||
|
TestData.account: {
|
||||||
|
"email": "test@test.com",
|
||||||
|
"firstname": "John",
|
||||||
|
"lastname": "Doe",
|
||||||
|
TestData.username: "test",
|
||||||
|
TestData.password: "test"
|
||||||
|
},
|
||||||
|
TestData.user: {
|
||||||
|
"email": "user@test.com",
|
||||||
|
"firstname": "Jane",
|
||||||
|
"lastname": "Doe",
|
||||||
|
TestData.username: "testuser",
|
||||||
|
TestData.password: "password"
|
||||||
|
},
|
||||||
|
TestData.newHost: {
|
||||||
|
TestData.username: "root",
|
||||||
|
TestData.password: "solidfire",
|
||||||
|
TestData.url: "http://192.168.129.243",
|
||||||
|
TestData.podId : "1",
|
||||||
|
TestData.zoneId: "1"
|
||||||
|
},
|
||||||
|
TestData.primaryStorage: {
|
||||||
|
TestData.name: "SolidFire-%d" % random.randint(0, 100),
|
||||||
|
TestData.scope: "ZONE",
|
||||||
|
TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" +
|
||||||
|
"clusterAdminUsername=admin;clusterAdminPassword=admin;" +
|
||||||
|
"clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" +
|
||||||
|
"clusterDefaultBurstIopsPercentOfMaxIops=1.5;",
|
||||||
|
TestData.provider: "SolidFire",
|
||||||
|
TestData.tags: TestData.storageTag,
|
||||||
|
TestData.capacityIops: 4500000,
|
||||||
|
TestData.capacityBytes: 2251799813685248,
|
||||||
|
TestData.hypervisor: "Any"
|
||||||
|
},
|
||||||
|
TestData.primaryStorage2: {
|
||||||
|
TestData.name: "SolidFireShared-%d" % random.randint(0, 100),
|
||||||
|
TestData.scope: "CLUSTER",
|
||||||
|
TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" +
|
||||||
|
"clusterAdminUsername=admin;clusterAdminPassword=admin;" +
|
||||||
|
"minIops=5000;maxIops=50000;burstIops=75000",
|
||||||
|
TestData.provider: "SolidFireShared",
|
||||||
|
TestData.tags: TestData.storageTag2,
|
||||||
|
TestData.capacityIops: 5000,
|
||||||
|
TestData.capacityBytes: 1099511627776,
|
||||||
|
TestData.hypervisor: "XenServer",
|
||||||
|
TestData.podId: 1
|
||||||
|
},
|
||||||
|
TestData.virtualMachine: {
|
||||||
|
TestData.name: "TestVM",
|
||||||
|
"displayname": "Test VM"
|
||||||
|
},
|
||||||
|
TestData.computeOffering: {
|
||||||
|
TestData.name: "SF_CO_1",
|
||||||
|
TestData.displayText: "SF_CO_1 (Min IOPS = 10,000; Max IOPS = 15,000)",
|
||||||
|
"cpunumber": 1,
|
||||||
|
"cpuspeed": 100,
|
||||||
|
"memory": 128,
|
||||||
|
"storagetype": "shared",
|
||||||
|
"customizediops": False,
|
||||||
|
"miniops": "10000",
|
||||||
|
"maxiops": "15000",
|
||||||
|
"hypervisorsnapshotreserve": 200,
|
||||||
|
TestData.tags: TestData.storageTag
|
||||||
|
},
|
||||||
|
TestData.volume_1: {
|
||||||
|
"diskname": "testvolume",
|
||||||
|
},
|
||||||
|
"volume2": {
|
||||||
|
"diskname": "testvolume2",
|
||||||
|
},
|
||||||
|
TestData.newHostDisplayName: "XenServer-6.5-3",
|
||||||
|
TestData.osType: "CentOS 5.6(64-bit) no GUI (XenServer)",
|
||||||
|
TestData.zoneId: 1,
|
||||||
|
TestData.clusterId: 1,
|
||||||
|
TestData.domainId: 1,
|
||||||
|
TestData.url: "192.168.129.50"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class TestAddRemoveHosts(cloudstackTestCase):
|
||||||
|
_vag_id_should_be_non_zero_int_err_msg = "The SolidFire VAG ID should be a non-zero integer."
|
||||||
|
_sf_account_id_should_be_non_zero_int_err_msg = "The SolidFire account ID should be a non-zero integer."
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def setUpClass(cls):
|
||||||
|
# Set up API client
|
||||||
|
testclient = super(TestAddRemoveHosts, cls).getClsTestClient()
|
||||||
|
cls.apiClient = testclient.getApiClient()
|
||||||
|
cls.dbConnection = testclient.getDbConnection()
|
||||||
|
|
||||||
|
cls.testdata = TestData().testdata
|
||||||
|
|
||||||
|
cls.xs_pool_master_ip = list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress
|
||||||
|
|
||||||
|
# Set up XenAPI connection
|
||||||
|
host_ip = "https://" + cls.xs_pool_master_ip
|
||||||
|
|
||||||
|
cls.xen_session = XenAPI.Session(host_ip)
|
||||||
|
|
||||||
|
xenserver = cls.testdata[TestData.xenServer]
|
||||||
|
|
||||||
|
cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])
|
||||||
|
|
||||||
|
# Set up SolidFire connection
|
||||||
|
cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire])
|
||||||
|
|
||||||
|
# Get Resources from Cloud Infrastructure
|
||||||
|
cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
|
||||||
|
cls.cluster = list_clusters(cls.apiClient)[0]
|
||||||
|
cls.template = get_template(cls.apiClient, cls.zone.id, cls.testdata[TestData.osType])
|
||||||
|
cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])
|
||||||
|
|
||||||
|
# Create test account
|
||||||
|
cls.account = Account.create(
|
||||||
|
cls.apiClient,
|
||||||
|
cls.testdata[TestData.account],
|
||||||
|
admin=1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set up connection to make customized API calls
|
||||||
|
user = User.create(
|
||||||
|
cls.apiClient,
|
||||||
|
cls.testdata[TestData.user],
|
||||||
|
account=cls.account.name,
|
||||||
|
domainid=cls.domain.id
|
||||||
|
)
|
||||||
|
|
||||||
|
url = cls.testdata[TestData.url]
|
||||||
|
|
||||||
|
api_url = "http://" + url + ":8080/client/api"
|
||||||
|
userkeys = User.registerUserKeys(cls.apiClient, user.id)
|
||||||
|
|
||||||
|
cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey)
|
||||||
|
|
||||||
|
cls.compute_offering = ServiceOffering.create(
|
||||||
|
cls.apiClient,
|
||||||
|
cls.testdata[TestData.computeOffering]
|
||||||
|
)
|
||||||
|
|
||||||
|
cls._cleanup = [
|
||||||
|
cls.compute_offering,
|
||||||
|
user,
|
||||||
|
cls.account
|
||||||
|
]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def tearDownClass(cls):
|
||||||
|
try:
|
||||||
|
cleanup_resources(cls.apiClient, cls._cleanup)
|
||||||
|
|
||||||
|
cls._purge_solidfire_volumes()
|
||||||
|
except Exception as e:
|
||||||
|
logging.debug("Exception in tearDownClass(cls): %s" % e)
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.virtual_machine = None
|
||||||
|
|
||||||
|
self.cleanup = []
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
try:
|
||||||
|
if self.virtual_machine is not None:
|
||||||
|
self.virtual_machine.delete(self.apiClient, True)
|
||||||
|
|
||||||
|
cleanup_resources(self.apiClient, self.cleanup)
|
||||||
|
except Exception as e:
|
||||||
|
logging.debug("Exception in tearDown(self): %s" % e)
|
||||||
|
|
||||||
|
def test_add_remove_host_with_solidfire_plugin_1(self):
|
||||||
|
primarystorage = self.testdata[TestData.primaryStorage]
|
||||||
|
|
||||||
|
primary_storage = StoragePool.create(
|
||||||
|
self.apiClient,
|
||||||
|
primarystorage,
|
||||||
|
scope=primarystorage[TestData.scope],
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
provider=primarystorage[TestData.provider],
|
||||||
|
tags=primarystorage[TestData.tags],
|
||||||
|
capacityiops=primarystorage[TestData.capacityIops],
|
||||||
|
capacitybytes=primarystorage[TestData.capacityBytes],
|
||||||
|
hypervisor=primarystorage[TestData.hypervisor]
|
||||||
|
)
|
||||||
|
|
||||||
|
self.cleanup.append(primary_storage)
|
||||||
|
|
||||||
|
self.virtual_machine = VirtualMachine.create(
|
||||||
|
self.apiClient,
|
||||||
|
self.testdata[TestData.virtualMachine],
|
||||||
|
accountid=self.account.name,
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
serviceofferingid=self.compute_offering.id,
|
||||||
|
templateid=self.template.id,
|
||||||
|
domainid=self.domain.id,
|
||||||
|
startvm=True
|
||||||
|
)
|
||||||
|
|
||||||
|
root_volume = self._get_root_volume(self.virtual_machine)
|
||||||
|
|
||||||
|
sf_iscsi_name = self._get_iqn(root_volume)
|
||||||
|
|
||||||
|
self._perform_add_remove_host(primary_storage.id, sf_iscsi_name)
|
||||||
|
|
||||||
|
def test_add_remove_host_with_solidfire_plugin_2(self):
|
||||||
|
primarystorage2 = self.testdata[TestData.primaryStorage2]
|
||||||
|
|
||||||
|
primary_storage_2 = StoragePool.create(
|
||||||
|
self.apiClient,
|
||||||
|
primarystorage2,
|
||||||
|
scope=primarystorage2[TestData.scope],
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
clusterid=self.cluster.id,
|
||||||
|
provider=primarystorage2[TestData.provider],
|
||||||
|
tags=primarystorage2[TestData.tags],
|
||||||
|
capacityiops=primarystorage2[TestData.capacityIops],
|
||||||
|
capacitybytes=primarystorage2[TestData.capacityBytes],
|
||||||
|
hypervisor=primarystorage2[TestData.hypervisor]
|
||||||
|
)
|
||||||
|
|
||||||
|
self.cleanup.append(primary_storage_2)
|
||||||
|
|
||||||
|
sf_iscsi_name = self._get_iqn_2(primary_storage_2)
|
||||||
|
|
||||||
|
self._perform_add_remove_host(primary_storage_2.id, sf_iscsi_name)
|
||||||
|
|
||||||
|
def test_add_remove_host_with_solidfire_plugin_3(self):
|
||||||
|
primarystorage = self.testdata[TestData.primaryStorage]
|
||||||
|
|
||||||
|
primary_storage = StoragePool.create(
|
||||||
|
self.apiClient,
|
||||||
|
primarystorage,
|
||||||
|
scope=primarystorage[TestData.scope],
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
provider=primarystorage[TestData.provider],
|
||||||
|
tags=primarystorage[TestData.tags],
|
||||||
|
capacityiops=primarystorage[TestData.capacityIops],
|
||||||
|
capacitybytes=primarystorage[TestData.capacityBytes],
|
||||||
|
hypervisor=primarystorage[TestData.hypervisor]
|
||||||
|
)
|
||||||
|
|
||||||
|
self.cleanup.append(primary_storage)
|
||||||
|
|
||||||
|
self.virtual_machine = VirtualMachine.create(
|
||||||
|
self.apiClient,
|
||||||
|
self.testdata[TestData.virtualMachine],
|
||||||
|
accountid=self.account.name,
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
serviceofferingid=self.compute_offering.id,
|
||||||
|
templateid=self.template.id,
|
||||||
|
domainid=self.domain.id,
|
||||||
|
startvm=True
|
||||||
|
)
|
||||||
|
|
||||||
|
root_volume = self._get_root_volume(self.virtual_machine)
|
||||||
|
|
||||||
|
sf_iscsi_name = self._get_iqn(root_volume)
|
||||||
|
|
||||||
|
primarystorage2 = self.testdata[TestData.primaryStorage2]
|
||||||
|
|
||||||
|
primary_storage_2 = StoragePool.create(
|
||||||
|
self.apiClient,
|
||||||
|
primarystorage2,
|
||||||
|
scope=primarystorage2[TestData.scope],
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
clusterid=self.cluster.id,
|
||||||
|
provider=primarystorage2[TestData.provider],
|
||||||
|
tags=primarystorage2[TestData.tags],
|
||||||
|
capacityiops=primarystorage2[TestData.capacityIops],
|
||||||
|
capacitybytes=primarystorage2[TestData.capacityBytes],
|
||||||
|
hypervisor=primarystorage2[TestData.hypervisor]
|
||||||
|
)
|
||||||
|
|
||||||
|
self.cleanup.append(primary_storage_2)
|
||||||
|
|
||||||
|
self._perform_add_remove_host(primary_storage.id, sf_iscsi_name)
|
||||||
|
|
||||||
|
def test_add_remove_host_with_solidfire_plugin_4(self):
|
||||||
|
primarystorage2 = self.testdata[TestData.primaryStorage2]
|
||||||
|
|
||||||
|
primary_storage_2 = StoragePool.create(
|
||||||
|
self.apiClient,
|
||||||
|
primarystorage2,
|
||||||
|
scope=primarystorage2[TestData.scope],
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
clusterid=self.cluster.id,
|
||||||
|
provider=primarystorage2[TestData.provider],
|
||||||
|
tags=primarystorage2[TestData.tags],
|
||||||
|
capacityiops=primarystorage2[TestData.capacityIops],
|
||||||
|
capacitybytes=primarystorage2[TestData.capacityBytes],
|
||||||
|
hypervisor=primarystorage2[TestData.hypervisor]
|
||||||
|
)
|
||||||
|
|
||||||
|
self.cleanup.append(primary_storage_2)
|
||||||
|
|
||||||
|
sf_iscsi_name = self._get_iqn_2(primary_storage_2)
|
||||||
|
|
||||||
|
primarystorage = self.testdata[TestData.primaryStorage]
|
||||||
|
|
||||||
|
primary_storage = StoragePool.create(
|
||||||
|
self.apiClient,
|
||||||
|
primarystorage,
|
||||||
|
scope=primarystorage[TestData.scope],
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
provider=primarystorage[TestData.provider],
|
||||||
|
tags=primarystorage[TestData.tags],
|
||||||
|
capacityiops=primarystorage[TestData.capacityIops],
|
||||||
|
capacitybytes=primarystorage[TestData.capacityBytes],
|
||||||
|
hypervisor=primarystorage[TestData.hypervisor]
|
||||||
|
)
|
||||||
|
|
||||||
|
self.cleanup.append(primary_storage)
|
||||||
|
|
||||||
|
self.virtual_machine = VirtualMachine.create(
|
||||||
|
self.apiClient,
|
||||||
|
self.testdata[TestData.virtualMachine],
|
||||||
|
accountid=self.account.name,
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
serviceofferingid=self.compute_offering.id,
|
||||||
|
templateid=self.template.id,
|
||||||
|
domainid=self.domain.id,
|
||||||
|
startvm=True
|
||||||
|
)
|
||||||
|
|
||||||
|
self._perform_add_remove_host(primary_storage_2.id, sf_iscsi_name)
|
||||||
|
|
||||||
|
def _perform_add_remove_host(self, primary_storage_id, sf_iscsi_name):
|
||||||
|
xen_sr = self.xen_session.xenapi.SR.get_by_name_label(sf_iscsi_name)[0]
|
||||||
|
|
||||||
|
pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr)
|
||||||
|
|
||||||
|
self._verify_all_pbds_attached(pbds)
|
||||||
|
|
||||||
|
num_pbds = len(pbds)
|
||||||
|
|
||||||
|
sf_vag_id = self._get_sf_vag_id(self.cluster.id, primary_storage_id)
|
||||||
|
|
||||||
|
host_iscsi_iqns = self._get_host_iscsi_iqns()
|
||||||
|
|
||||||
|
sf_vag = self._get_sf_vag(sf_vag_id)
|
||||||
|
|
||||||
|
sf_vag_initiators = self._get_sf_vag_initiators(sf_vag)
|
||||||
|
|
||||||
|
self._verifyVag(host_iscsi_iqns, sf_vag_initiators)
|
||||||
|
|
||||||
|
sf_vag_initiators_len_orig = len(sf_vag_initiators)
|
||||||
|
|
||||||
|
xen_session = XenAPI.Session(self.testdata[TestData.urlOfNewHost])
|
||||||
|
|
||||||
|
xenserver = self.testdata[TestData.xenServer]
|
||||||
|
|
||||||
|
xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])
|
||||||
|
|
||||||
|
xen_session.xenapi.pool.join(self.xs_pool_master_ip, xenserver[TestData.username], xenserver[TestData.password])
|
||||||
|
|
||||||
|
time.sleep(60)
|
||||||
|
|
||||||
|
pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
len(pbds),
|
||||||
|
num_pbds + 1,
|
||||||
|
"'len(pbds)' is not equal to 'num_pbds + 1'."
|
||||||
|
)
|
||||||
|
|
||||||
|
num_pbds = num_pbds + 1
|
||||||
|
|
||||||
|
num_pbds_not_attached = 0
|
||||||
|
|
||||||
|
for pbd in pbds:
|
||||||
|
pbd_record = self.xen_session.xenapi.PBD.get_record(pbd)
|
||||||
|
|
||||||
|
if pbd_record["currently_attached"] == False:
|
||||||
|
num_pbds_not_attached = num_pbds_not_attached + 1
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
num_pbds_not_attached,
|
||||||
|
1,
|
||||||
|
"'num_pbds_not_attached' is not equal to 1."
|
||||||
|
)
|
||||||
|
|
||||||
|
host = Host.create(
|
||||||
|
self.apiClient,
|
||||||
|
self.cluster,
|
||||||
|
self.testdata[TestData.newHost],
|
||||||
|
hypervisor="XenServer"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertTrue(
|
||||||
|
isinstance(host, Host),
|
||||||
|
"'host' is not a 'Host'."
|
||||||
|
)
|
||||||
|
|
||||||
|
pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
len(pbds),
|
||||||
|
num_pbds,
|
||||||
|
"'len(pbds)' is not equal to 'num_pbds'."
|
||||||
|
)
|
||||||
|
|
||||||
|
self._verify_all_pbds_attached(pbds)
|
||||||
|
|
||||||
|
host_iscsi_iqns = self._get_host_iscsi_iqns()
|
||||||
|
|
||||||
|
sf_vag = self._get_sf_vag(sf_vag_id)
|
||||||
|
|
||||||
|
sf_vag_initiators = self._get_sf_vag_initiators(sf_vag)
|
||||||
|
|
||||||
|
self._verifyVag(host_iscsi_iqns, sf_vag_initiators)
|
||||||
|
|
||||||
|
sf_vag_initiators_len_new = len(sf_vag_initiators)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
sf_vag_initiators_len_new,
|
||||||
|
sf_vag_initiators_len_orig + 1,
|
||||||
|
"sf_vag_initiators_len_new' != sf_vag_initiators_len_orig + 1"
|
||||||
|
)
|
||||||
|
|
||||||
|
host.delete(self.apiClient)
|
||||||
|
|
||||||
|
pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
len(pbds),
|
||||||
|
num_pbds,
|
||||||
|
"'len(pbds)' is not equal to 'num_pbds'."
|
||||||
|
)
|
||||||
|
|
||||||
|
self._verify_all_pbds_attached(pbds)
|
||||||
|
|
||||||
|
host_iscsi_iqns = self._get_host_iscsi_iqns()
|
||||||
|
|
||||||
|
sf_vag = self._get_sf_vag(sf_vag_id)
|
||||||
|
|
||||||
|
sf_vag_initiators = self._get_sf_vag_initiators(sf_vag)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
len(host_iscsi_iqns) - 1,
|
||||||
|
len(sf_vag_initiators),
|
||||||
|
"'len(host_iscsi_iqns) - 1' is not equal to 'len(sf_vag_initiators)'."
|
||||||
|
)
|
||||||
|
|
||||||
|
host_ref = self.xen_session.xenapi.host.get_by_name_label(self.testdata[TestData.newHostDisplayName])[0]
|
||||||
|
|
||||||
|
self.xen_session.xenapi.pool.eject(host_ref)
|
||||||
|
|
||||||
|
time.sleep(120)
|
||||||
|
|
||||||
|
pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
len(pbds),
|
||||||
|
num_pbds - 1,
|
||||||
|
"'len(pbds)' is not equal to 'num_pbds - 1'."
|
||||||
|
)
|
||||||
|
|
||||||
|
self._verify_all_pbds_attached(pbds)
|
||||||
|
|
||||||
|
host_iscsi_iqns = self._get_host_iscsi_iqns()
|
||||||
|
|
||||||
|
sf_vag = self._get_sf_vag(sf_vag_id)
|
||||||
|
|
||||||
|
sf_vag_initiators = self._get_sf_vag_initiators(sf_vag)
|
||||||
|
|
||||||
|
self._verifyVag(host_iscsi_iqns, sf_vag_initiators)
|
||||||
|
|
||||||
|
sf_vag_initiators_len_new = len(sf_vag_initiators)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
sf_vag_initiators_len_new,
|
||||||
|
sf_vag_initiators_len_orig,
|
||||||
|
"sf_vag_initiators_len_new' != sf_vag_initiators_len_orig"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _verify_all_pbds_attached(self, pbds):
|
||||||
|
for pbd in pbds:
|
||||||
|
pbd_record = self.xen_session.xenapi.PBD.get_record(pbd)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
pbd_record["currently_attached"],
|
||||||
|
True,
|
||||||
|
"Not all PBDs are currently attached."
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_root_volume(self, vm):
|
||||||
|
list_volumes_response = list_volumes(
|
||||||
|
self.apiClient,
|
||||||
|
virtualmachineid=vm.id,
|
||||||
|
listall=True
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertNotEqual(
|
||||||
|
list_volumes_response,
|
||||||
|
None,
|
||||||
|
"'list_volumes_response' should not be equal to 'None'."
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
len(list_volumes_response) > 0,
|
||||||
|
True,
|
||||||
|
"'len(list_volumes_response)' should be greater than 0."
|
||||||
|
)
|
||||||
|
|
||||||
|
for volume in list_volumes_response:
|
||||||
|
if volume.type.upper() == "ROOT":
|
||||||
|
return volume
|
||||||
|
|
||||||
|
self.assert_(False, "Unable to locate the ROOT volume of the VM with the following ID: " + str(vm.id))
|
||||||
|
|
||||||
|
def _get_iqn(self, volume):
|
||||||
|
# Get volume IQN
|
||||||
|
sf_iscsi_name_request = {'volumeid': volume.id}
|
||||||
|
# put this commented line back once PR 1403 is in
|
||||||
|
# sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(sf_iscsi_name_request)
|
||||||
|
sf_iscsi_name_result = self.cs_api.getSolidFireVolumeIscsiName(sf_iscsi_name_request)
|
||||||
|
# sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
|
||||||
|
sf_iscsi_name = sf_iscsi_name_result['apisolidfirevolumeiscsiname']['solidFireVolumeIscsiName']
|
||||||
|
|
||||||
|
self._check_iscsi_name(sf_iscsi_name)
|
||||||
|
|
||||||
|
return sf_iscsi_name
|
||||||
|
|
||||||
|
def _get_iqn_2(self, primary_storage):
|
||||||
|
sql_query = "Select path From storage_pool Where uuid = '" + str(primary_storage.id) + "'"
|
||||||
|
|
||||||
|
# make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
|
||||||
|
sql_result = self.dbConnection.execute(sql_query)
|
||||||
|
|
||||||
|
return sql_result[0][0]
|
||||||
|
|
||||||
|
def _check_iscsi_name(self, sf_iscsi_name):
|
||||||
|
self.assertEqual(
|
||||||
|
sf_iscsi_name[0],
|
||||||
|
"/",
|
||||||
|
"The iSCSI name needs to start with a forward slash."
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_host_iscsi_iqns(self):
|
||||||
|
hosts = self.xen_session.xenapi.host.get_all()
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
isinstance(hosts, list),
|
||||||
|
True,
|
||||||
|
"'hosts' is not a list."
|
||||||
|
)
|
||||||
|
|
||||||
|
host_iscsi_iqns = []
|
||||||
|
|
||||||
|
for host in hosts:
|
||||||
|
host_iscsi_iqns.append(self._get_host_iscsi_iqn(host))
|
||||||
|
|
||||||
|
return host_iscsi_iqns
|
||||||
|
|
||||||
|
def _get_host_iscsi_iqn(self, host):
|
||||||
|
other_config = self.xen_session.xenapi.host.get_other_config(host)
|
||||||
|
|
||||||
|
return other_config["iscsi_iqn"]
|
||||||
|
|
||||||
|
def _get_sf_vag_id(self, cluster_id, primary_storage_id):
|
||||||
|
# Get SF Volume Access Group ID
|
||||||
|
sf_vag_id_request = {'clusterid': cluster_id, 'storageid': primary_storage_id}
|
||||||
|
sf_vag_id_result = self.cs_api.getSolidFireVolumeAccessGroupId(sf_vag_id_request)
|
||||||
|
sf_vag_id = sf_vag_id_result['apisolidfirevolumeaccessgroupid']['solidFireVolumeAccessGroupId']
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
isinstance(sf_vag_id, int),
|
||||||
|
True,
|
||||||
|
TestAddRemoveHosts._vag_id_should_be_non_zero_int_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
return sf_vag_id
|
||||||
|
|
||||||
|
def _get_sf_vag(self, sf_vag_id):
|
||||||
|
return self.sf_client.list_volume_access_groups(sf_vag_id, 1)["volumeAccessGroups"][0]
|
||||||
|
|
||||||
|
def _get_sf_vag_initiators(self, sf_vag):
|
||||||
|
return sf_vag["initiators"]
|
||||||
|
|
||||||
|
def _verifyVag(self, host_iscsi_iqns, sf_vag_initiators):
|
||||||
|
self.assertEqual(
|
||||||
|
isinstance(host_iscsi_iqns, list),
|
||||||
|
True,
|
||||||
|
"'host_iscsi_iqns' is not a list."
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
isinstance(sf_vag_initiators, list),
|
||||||
|
True,
|
||||||
|
"'sf_vag_initiators' is not a list."
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
len(host_iscsi_iqns),
|
||||||
|
len(sf_vag_initiators),
|
||||||
|
"Lists are not the same size."
|
||||||
|
)
|
||||||
|
|
||||||
|
for host_iscsi_iqn in host_iscsi_iqns:
|
||||||
|
# an error should occur if host_iscsi_iqn is not in sf_vag_initiators
|
||||||
|
sf_vag_initiators.index(host_iscsi_iqn)
|
||||||
|
|
||||||
|
def _check_list(self, in_list, expected_size_of_list, err_msg):
|
||||||
|
self.assertEqual(
|
||||||
|
isinstance(in_list, list),
|
||||||
|
True,
|
||||||
|
"'in_list' is not a list."
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
len(in_list),
|
||||||
|
expected_size_of_list,
|
||||||
|
err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _purge_solidfire_volumes(cls):
|
||||||
|
deleted_volumes = cls.sf_client.list_deleted_volumes()
|
||||||
|
|
||||||
|
for deleted_volume in deleted_volumes:
|
||||||
|
cls.sf_client.purge_deleted_volume(deleted_volume['volumeID'])
|
||||||
|
|
||||||
1472
test/integration/plugins/solidfire/TestSnapshots.py
Normal file
1472
test/integration/plugins/solidfire/TestSnapshots.py
Normal file
File diff suppressed because it is too large
Load Diff
862
test/integration/plugins/solidfire/TestVMSnapshots.py
Normal file
862
test/integration/plugins/solidfire/TestVMSnapshots.py
Normal file
@ -0,0 +1,862 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import random
|
||||||
|
import SignedAPICall
|
||||||
|
import XenAPI
|
||||||
|
|
||||||
|
# All tests inherit from cloudstackTestCase
|
||||||
|
from marvin.cloudstackTestCase import cloudstackTestCase
|
||||||
|
|
||||||
|
# Import Integration Libraries
|
||||||
|
|
||||||
|
# base - contains all resources as entities and defines create, delete, list operations on them
|
||||||
|
from marvin.lib.base import Account, DiskOffering, ServiceOffering, StoragePool, User, VirtualMachine, VmSnapshot, Volume
|
||||||
|
|
||||||
|
# common - commonly used methods for all tests are listed here
|
||||||
|
from marvin.lib.common import get_domain, get_template, get_zone, list_hosts, list_volumes
|
||||||
|
|
||||||
|
# utils - utility classes for common cleanup, external library wrappers, etc.
|
||||||
|
from marvin.lib.utils import cleanup_resources
|
||||||
|
|
||||||
|
from solidfire import solidfire_element_api as sf_api
|
||||||
|
|
||||||
|
# on April 15, 2016: Ran 2 tests in 800.299s with three hosts
|
||||||
|
# on May 2, 2016: Ran 2 tests in 789.729s with two hosts
|
||||||
|
|
||||||
|
|
||||||
|
class TestData:
|
||||||
|
account = "account"
|
||||||
|
capacityBytes = "capacitybytes"
|
||||||
|
capacityIops = "capacityiops"
|
||||||
|
clusterId = "clusterId"
|
||||||
|
computeOffering = "computeoffering"
|
||||||
|
diskOffering = "diskoffering"
|
||||||
|
domainId = "domainId"
|
||||||
|
hypervisor = "hypervisor"
|
||||||
|
login = "login"
|
||||||
|
mvip = "mvip"
|
||||||
|
password = "password"
|
||||||
|
port = "port"
|
||||||
|
primaryStorage = "primarystorage"
|
||||||
|
provider = "provider"
|
||||||
|
scope = "scope"
|
||||||
|
solidFire = "solidfire"
|
||||||
|
storageTag = "SolidFire_SAN_1"
|
||||||
|
tags = "tags"
|
||||||
|
templateName = "templatename"
|
||||||
|
url = "url"
|
||||||
|
user = "user"
|
||||||
|
username = "username"
|
||||||
|
virtualMachine = "virtualmachine"
|
||||||
|
volume_1 = "volume_1"
|
||||||
|
xenServer = "xenserver"
|
||||||
|
zoneId = "zoneId"
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.testdata = {
|
||||||
|
TestData.solidFire: {
|
||||||
|
TestData.mvip: "192.168.139.112",
|
||||||
|
TestData.login: "admin",
|
||||||
|
TestData.password: "admin",
|
||||||
|
TestData.port: 443,
|
||||||
|
TestData.url: "https://192.168.139.112:443"
|
||||||
|
},
|
||||||
|
TestData.xenServer: {
|
||||||
|
TestData.username: "root",
|
||||||
|
TestData.password: "solidfire"
|
||||||
|
},
|
||||||
|
TestData.account: {
|
||||||
|
"email": "test@test.com",
|
||||||
|
"firstname": "John",
|
||||||
|
"lastname": "Doe",
|
||||||
|
TestData.username: "test",
|
||||||
|
TestData.password: "test"
|
||||||
|
},
|
||||||
|
TestData.user: {
|
||||||
|
"email": "user@test.com",
|
||||||
|
"firstname": "Jane",
|
||||||
|
"lastname": "Doe",
|
||||||
|
TestData.username: "testuser",
|
||||||
|
TestData.password: "password"
|
||||||
|
},
|
||||||
|
TestData.primaryStorage: {
|
||||||
|
"name": "SolidFire-%d" % random.randint(0, 100),
|
||||||
|
TestData.scope: "ZONE",
|
||||||
|
"url": "MVIP=192.168.139.112;SVIP=10.10.8.112;" +
|
||||||
|
"clusterAdminUsername=admin;clusterAdminPassword=admin;" +
|
||||||
|
"clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" +
|
||||||
|
"clusterDefaultBurstIopsPercentOfMaxIops=1.5;",
|
||||||
|
TestData.provider: "SolidFire",
|
||||||
|
TestData.tags: TestData.storageTag,
|
||||||
|
TestData.capacityIops: 4500000,
|
||||||
|
TestData.capacityBytes: 2251799813685248,
|
||||||
|
TestData.hypervisor: "Any"
|
||||||
|
},
|
||||||
|
TestData.virtualMachine: {
|
||||||
|
"name": "TestVM",
|
||||||
|
"displayname": "Test VM"
|
||||||
|
},
|
||||||
|
TestData.computeOffering: {
|
||||||
|
"name": "SF_CO_1",
|
||||||
|
"displaytext": "SF_CO_1 (Min IOPS = 10,000; Max IOPS = 15,000)",
|
||||||
|
"cpunumber": 1,
|
||||||
|
"cpuspeed": 100,
|
||||||
|
"memory": 128,
|
||||||
|
"storagetype": "shared",
|
||||||
|
"customizediops": False,
|
||||||
|
"miniops": "10000",
|
||||||
|
"maxiops": "15000",
|
||||||
|
"hypervisorsnapshotreserve": 200,
|
||||||
|
TestData.tags: TestData.storageTag
|
||||||
|
},
|
||||||
|
TestData.diskOffering: {
|
||||||
|
"name": "SF_DO_1",
|
||||||
|
"displaytext": "SF_DO_1 (Min IOPS = 300; Max IOPS = 500)",
|
||||||
|
"disksize": 128,
|
||||||
|
"customizediops": False,
|
||||||
|
"miniops": 300,
|
||||||
|
"maxiops": 500,
|
||||||
|
"hypervisorsnapshotreserve": 200,
|
||||||
|
TestData.tags: TestData.storageTag,
|
||||||
|
"storagetype": "shared"
|
||||||
|
},
|
||||||
|
"testdiskofferings": {
|
||||||
|
"customiopsdo": {
|
||||||
|
"name": "SF_Custom_IOPS_DO",
|
||||||
|
"displaytext": "Customized IOPS DO (Size = 128 GB; Min IOPS = 500; Max IOPS = 1000)",
|
||||||
|
"disksize": 128,
|
||||||
|
"customizediops": True,
|
||||||
|
"miniops": 500,
|
||||||
|
"maxiops": 1000,
|
||||||
|
"hypervisorsnapshotreserve": 200,
|
||||||
|
TestData.tags: TestData.storageTag,
|
||||||
|
"storagetype": "shared"
|
||||||
|
},
|
||||||
|
"customsizedo": {
|
||||||
|
"name": "SF_Custom_Size_DO",
|
||||||
|
"displaytext": "Customized IOPS DO (Min IOPS = 500; Max IOPS = 1000)",
|
||||||
|
"disksize": 175,
|
||||||
|
"customizediops": False,
|
||||||
|
"miniops": 500,
|
||||||
|
"maxiops": 1000,
|
||||||
|
"hypervisorsnapshotreserve": 200,
|
||||||
|
TestData.tags: TestData.storageTag,
|
||||||
|
"storagetype": "shared"
|
||||||
|
},
|
||||||
|
"customsizeandiopsdo": {
|
||||||
|
"name": "SF_Custom_Size_IOPS_DO",
|
||||||
|
"displaytext": "Customized Size and IOPS DO",
|
||||||
|
"disksize": 200,
|
||||||
|
"customizediops": True,
|
||||||
|
"miniops": 400,
|
||||||
|
"maxiops": 800,
|
||||||
|
"hypervisorsnapshotreserve": 200,
|
||||||
|
TestData.tags: TestData.storageTag,
|
||||||
|
"storagetype": "shared"
|
||||||
|
},
|
||||||
|
"newiopsdo": {
|
||||||
|
"name": "SF_New_IOPS_DO",
|
||||||
|
"displaytext": "New IOPS (Size = 128 GB; Min IOPS = 350, Max IOPS = 700)",
|
||||||
|
"disksize": 128,
|
||||||
|
"miniops": 350,
|
||||||
|
"maxiops": 700,
|
||||||
|
"hypervisorsnapshotreserve": 200,
|
||||||
|
TestData.tags: TestData.storageTag,
|
||||||
|
"storagetype": "shared"
|
||||||
|
},
|
||||||
|
"newsizedo": {
|
||||||
|
"name": "SF_New_Size_DO",
|
||||||
|
"displaytext": "New Size: 175",
|
||||||
|
"disksize": 175,
|
||||||
|
"miniops": 400,
|
||||||
|
"maxiops": 800,
|
||||||
|
"hypervisorsnapshotreserve": 200,
|
||||||
|
TestData.tags: TestData.storageTag,
|
||||||
|
"storagetype": "shared"
|
||||||
|
},
|
||||||
|
"newsizeandiopsdo": {
|
||||||
|
"name": "SF_New_Size_IOPS_DO",
|
||||||
|
"displaytext": "New Size and IOPS",
|
||||||
|
"disksize": 200,
|
||||||
|
"miniops": 200,
|
||||||
|
"maxiops": 400,
|
||||||
|
"hypervisorsnapshotreserve": 200,
|
||||||
|
TestData.tags: TestData.storageTag,
|
||||||
|
"storagetype": "shared"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
TestData.volume_1: {
|
||||||
|
"diskname": "testvolume",
|
||||||
|
},
|
||||||
|
"volume2": {
|
||||||
|
"diskname": "testvolume2",
|
||||||
|
},
|
||||||
|
TestData.templateName: "CentOS 5.6(64-bit) no GUI (XenServer)",
|
||||||
|
TestData.zoneId: 1,
|
||||||
|
TestData.clusterId: 1,
|
||||||
|
TestData.domainId: 1,
|
||||||
|
TestData.url: "192.168.129.50"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class TestVMSnapshots(cloudstackTestCase):
|
||||||
|
_should_be_no_vm_snapshots_err_msg = "There should be no VM snapshots."
|
||||||
|
_should_only_be_one_vm_snapshot_err_msg = "There should only be one VM snapshot."
|
||||||
|
_should_only_be_one_root_volume_err_msg = "There should only be one root volume."
|
||||||
|
_path_should_have_changed_err_msg = "The 'path' in the 'DB' should have changed."
|
||||||
|
_path_should_not_have_changed_err_msg = "The 'path' in the 'DB' should not have changed."
|
||||||
|
_should_only_be_one_vdi_err_msg = "There should only be one VDI."
|
||||||
|
_should_be_three_vdis_err_msg = "There should be three VDIs."
|
||||||
|
_active_vdis_should_not_be_the_same_err_msg = "The active VDIs should not be the same."
|
||||||
|
_active_vdis_should_be_the_same_err_msg = "The active VDIs should be the same."
|
||||||
|
_snapshot_vdis_should_be_the_same_err_msg = "The snapshot VDIs should be the same."
|
||||||
|
_base_vdis_should_be_the_same_err_msg = "The base VDIs should be the same."
|
||||||
|
_snapshot_parent_not_correct_err_msg = "Snapshot's parent is not correct."
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def setUpClass(cls):
|
||||||
|
# Set up API client
|
||||||
|
testclient = super(TestVMSnapshots, cls).getClsTestClient()
|
||||||
|
cls.apiClient = testclient.getApiClient()
|
||||||
|
|
||||||
|
cls.testdata = TestData().testdata
|
||||||
|
|
||||||
|
# Set up XenAPI connection
|
||||||
|
host_ip = "https://" + \
|
||||||
|
list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress
|
||||||
|
|
||||||
|
cls.xen_session = XenAPI.Session(host_ip)
|
||||||
|
|
||||||
|
xenserver = cls.testdata[TestData.xenServer]
|
||||||
|
|
||||||
|
cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])
|
||||||
|
|
||||||
|
# Set up SolidFire connection
|
||||||
|
cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire])
|
||||||
|
|
||||||
|
# Get Resources from Cloud Infrastructure
|
||||||
|
cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
|
||||||
|
template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName])
|
||||||
|
cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])
|
||||||
|
|
||||||
|
# Create test account
|
||||||
|
cls.account = Account.create(
|
||||||
|
cls.apiClient,
|
||||||
|
cls.testdata[TestData.account],
|
||||||
|
admin=1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set up connection to make customized API calls
|
||||||
|
user = User.create(
|
||||||
|
cls.apiClient,
|
||||||
|
cls.testdata[TestData.user],
|
||||||
|
account=cls.account.name,
|
||||||
|
domainid=cls.domain.id
|
||||||
|
)
|
||||||
|
|
||||||
|
url = cls.testdata[TestData.url]
|
||||||
|
|
||||||
|
api_url = "http://" + url + ":8080/client/api"
|
||||||
|
userkeys = User.registerUserKeys(cls.apiClient, user.id)
|
||||||
|
|
||||||
|
cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey)
|
||||||
|
|
||||||
|
primarystorage = cls.testdata[TestData.primaryStorage]
|
||||||
|
|
||||||
|
cls.primary_storage = StoragePool.create(
|
||||||
|
cls.apiClient,
|
||||||
|
primarystorage,
|
||||||
|
scope=primarystorage[TestData.scope],
|
||||||
|
zoneid=cls.zone.id,
|
||||||
|
provider=primarystorage[TestData.provider],
|
||||||
|
tags=primarystorage[TestData.tags],
|
||||||
|
capacityiops=primarystorage[TestData.capacityIops],
|
||||||
|
capacitybytes=primarystorage[TestData.capacityBytes],
|
||||||
|
hypervisor=primarystorage[TestData.hypervisor]
|
||||||
|
)
|
||||||
|
|
||||||
|
compute_offering = ServiceOffering.create(
|
||||||
|
cls.apiClient,
|
||||||
|
cls.testdata[TestData.computeOffering]
|
||||||
|
)
|
||||||
|
|
||||||
|
cls.disk_offering = DiskOffering.create(
|
||||||
|
cls.apiClient,
|
||||||
|
cls.testdata[TestData.diskOffering]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create VM and volume for tests
|
||||||
|
cls.virtual_machine = VirtualMachine.create(
|
||||||
|
cls.apiClient,
|
||||||
|
cls.testdata[TestData.virtualMachine],
|
||||||
|
accountid=cls.account.name,
|
||||||
|
zoneid=cls.zone.id,
|
||||||
|
serviceofferingid=compute_offering.id,
|
||||||
|
templateid=template.id,
|
||||||
|
domainid=cls.domain.id,
|
||||||
|
startvm=True
|
||||||
|
)
|
||||||
|
|
||||||
|
cls._cleanup = [
|
||||||
|
cls.virtual_machine,
|
||||||
|
compute_offering,
|
||||||
|
cls.disk_offering,
|
||||||
|
user,
|
||||||
|
cls.account
|
||||||
|
]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def tearDownClass(cls):
|
||||||
|
try:
|
||||||
|
cleanup_resources(cls.apiClient, cls._cleanup)
|
||||||
|
|
||||||
|
cls.primary_storage.delete(cls.apiClient)
|
||||||
|
|
||||||
|
cls._purge_solidfire_volumes()
|
||||||
|
except Exception as e:
|
||||||
|
logging.debug("Exception in tearDownClass(cls): %s" % e)
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.cleanup = []
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
try:
|
||||||
|
cleanup_resources(self.apiClient, self.cleanup)
|
||||||
|
except Exception as e:
|
||||||
|
logging.debug("Exception in tearDown(self): %s" % e)
|
||||||
|
|
||||||
|
def test_01_take_VM_snapshot(self):
|
||||||
|
self.virtual_machine.start(self.apiClient)
|
||||||
|
|
||||||
|
root_volumes = list_volumes(self.apiClient, type="ROOT", listAll="true")
|
||||||
|
|
||||||
|
self._check_list(root_volumes, 1, TestVMSnapshots._should_only_be_one_root_volume_err_msg)
|
||||||
|
|
||||||
|
root_volume = root_volumes[0]
|
||||||
|
|
||||||
|
volume_id = {'volumeid': root_volume.id}
|
||||||
|
|
||||||
|
sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(volume_id)
|
||||||
|
sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
|
||||||
|
|
||||||
|
self._check_iscsi_name(sf_iscsi_name)
|
||||||
|
|
||||||
|
root_volume_path_1 = self._get_path(volume_id)
|
||||||
|
|
||||||
|
#######################################
|
||||||
|
#######################################
|
||||||
|
# STEP 1: Take snapshot of running VM #
|
||||||
|
#######################################
|
||||||
|
#######################################
|
||||||
|
vm_snapshot = VmSnapshot.create(
|
||||||
|
self.apiClient,
|
||||||
|
vmid=self.virtual_machine.id,
|
||||||
|
snapshotmemory="false",
|
||||||
|
name="Test Snapshot",
|
||||||
|
description="Test Snapshot Desc"
|
||||||
|
)
|
||||||
|
|
||||||
|
list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true")
|
||||||
|
|
||||||
|
self._verify_vm_snapshot(list_vm_snapshots, vm_snapshot)
|
||||||
|
|
||||||
|
root_volume_path_2 = self._get_path(volume_id)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
root_volume_path_1,
|
||||||
|
root_volume_path_2,
|
||||||
|
TestVMSnapshots._path_should_not_have_changed_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
xen_sr = self.xen_session.xenapi.SR.get_by_name_label(sf_iscsi_name)[0]
|
||||||
|
|
||||||
|
xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr)
|
||||||
|
|
||||||
|
self._check_list(xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
|
||||||
|
|
||||||
|
vdis_after_create = self._get_vdis(xen_vdis)
|
||||||
|
|
||||||
|
vdiSnapshotOf = self.xen_session.xenapi.VDI.get_record(vdis_after_create.snapshot_vdi["snapshot_of"])
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
vdiSnapshotOf["uuid"],
|
||||||
|
vdis_after_create.active_vdi["uuid"],
|
||||||
|
TestVMSnapshots._snapshot_parent_not_correct_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
#######################################
|
||||||
|
#######################################
|
||||||
|
### STEP 2: Revert VM to Snapshot ###
|
||||||
|
#######################################
|
||||||
|
#######################################
|
||||||
|
self.virtual_machine.stop(self.apiClient)
|
||||||
|
|
||||||
|
VmSnapshot.revertToSnapshot(self.apiClient, vmsnapshotid=vm_snapshot.id)
|
||||||
|
|
||||||
|
list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true")
|
||||||
|
|
||||||
|
self._check_list(list_vm_snapshots, 1, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
|
||||||
|
|
||||||
|
root_volume_path_3 = self._get_path(volume_id)
|
||||||
|
|
||||||
|
self.assertNotEqual(
|
||||||
|
root_volume_path_1,
|
||||||
|
root_volume_path_3,
|
||||||
|
TestVMSnapshots._path_should_have_changed_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr)
|
||||||
|
|
||||||
|
self._check_list(xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
|
||||||
|
|
||||||
|
vdis_after_revert = self._get_vdis(xen_vdis)
|
||||||
|
|
||||||
|
self.assertNotEqual(
|
||||||
|
vdis_after_create.active_vdi["uuid"],
|
||||||
|
vdis_after_revert.active_vdi["uuid"],
|
||||||
|
TestVMSnapshots._active_vdis_should_not_be_the_same_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
vdis_after_create.snapshot_vdi["uuid"],
|
||||||
|
vdis_after_revert.snapshot_vdi["uuid"],
|
||||||
|
TestVMSnapshots._snapshot_vdis_should_be_the_same_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
vdis_after_create.base_vdi["uuid"],
|
||||||
|
vdis_after_revert.base_vdi["uuid"],
|
||||||
|
TestVMSnapshots._base_vdis_should_be_the_same_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
#######################################
|
||||||
|
#######################################
|
||||||
|
##### STEP 3: Delete VM snapshot #####
|
||||||
|
#######################################
|
||||||
|
#######################################
|
||||||
|
VmSnapshot.deleteVMSnapshot(self.apiClient, vmsnapshotid=vm_snapshot.id)
|
||||||
|
|
||||||
|
list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true")
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
list_vm_snapshots,
|
||||||
|
None,
|
||||||
|
TestVMSnapshots._should_be_no_vm_snapshots_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
root_volume_path_4 = self._get_path(volume_id)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
root_volume_path_3,
|
||||||
|
root_volume_path_4,
|
||||||
|
TestVMSnapshots._path_should_not_have_changed_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr)
|
||||||
|
|
||||||
|
self._check_list(xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg)
|
||||||
|
|
||||||
|
vdis_after_delete = self._get_vdis(xen_vdis, True)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
vdis_after_revert.active_vdi["uuid"],
|
||||||
|
vdis_after_delete.active_vdi["uuid"],
|
||||||
|
TestVMSnapshots._active_vdis_should_be_the_same_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
#######################################
|
||||||
|
#######################################
|
||||||
|
##### STEP 4: Start VM #####
|
||||||
|
#######################################
|
||||||
|
#######################################
|
||||||
|
self.virtual_machine.start(self.apiClient)
|
||||||
|
|
||||||
|
def test_02_take_VM_snapshot_with_data_disk(self):
|
||||||
|
self.virtual_machine.start(self.apiClient)
|
||||||
|
|
||||||
|
data_volume = Volume.create(
|
||||||
|
self.apiClient,
|
||||||
|
self.testdata[TestData.volume_1],
|
||||||
|
account=self.account.name,
|
||||||
|
domainid=self.domain.id,
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
diskofferingid=self.disk_offering.id
|
||||||
|
)
|
||||||
|
|
||||||
|
self.cleanup = [data_volume]
|
||||||
|
|
||||||
|
self.virtual_machine.attach_volume(self.apiClient, data_volume)
|
||||||
|
|
||||||
|
root_volumes = list_volumes(self.apiClient, type="ROOT", listAll="true")
|
||||||
|
|
||||||
|
self._check_list(root_volumes, 1, TestVMSnapshots._should_only_be_one_root_volume_err_msg)
|
||||||
|
|
||||||
|
root_volume = root_volumes[0]
|
||||||
|
|
||||||
|
root_volume_id = {'volumeid': root_volume.id}
|
||||||
|
|
||||||
|
sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(root_volume_id)
|
||||||
|
sf_iscsi_root_volume_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
|
||||||
|
|
||||||
|
self._check_iscsi_name(sf_iscsi_root_volume_name)
|
||||||
|
|
||||||
|
root_volume_path_1 = self._get_path(root_volume_id)
|
||||||
|
|
||||||
|
data_volumes = list_volumes(self.apiClient, type="DATADISK", listAll="true")
|
||||||
|
|
||||||
|
self._check_list(data_volumes, 1, "There should only be one data volume.")
|
||||||
|
|
||||||
|
data_volume = data_volumes[0]
|
||||||
|
|
||||||
|
data_volume_id = {'volumeid': data_volume.id}
|
||||||
|
|
||||||
|
sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(data_volume_id)
|
||||||
|
sf_iscsi_data_volume_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
|
||||||
|
|
||||||
|
self._check_iscsi_name(sf_iscsi_data_volume_name)
|
||||||
|
|
||||||
|
data_volume_path_1 = self._get_path(data_volume_id)
|
||||||
|
|
||||||
|
#######################################
|
||||||
|
#######################################
|
||||||
|
# STEP 1: Take snapshot of running VM #
|
||||||
|
#######################################
|
||||||
|
#######################################
|
||||||
|
vm_snapshot = VmSnapshot.create(
|
||||||
|
self.apiClient,
|
||||||
|
vmid=self.virtual_machine.id,
|
||||||
|
snapshotmemory="false",
|
||||||
|
name="Test Snapshot",
|
||||||
|
description="Test Snapshot Desc"
|
||||||
|
)
|
||||||
|
|
||||||
|
list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true")
|
||||||
|
|
||||||
|
self._verify_vm_snapshot(list_vm_snapshots, vm_snapshot)
|
||||||
|
|
||||||
|
root_volume_path_2 = self._get_path(root_volume_id)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
root_volume_path_1,
|
||||||
|
root_volume_path_2,
|
||||||
|
TestVMSnapshots._path_should_not_have_changed_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
data_volume_path_2 = self._get_path(data_volume_id)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
data_volume_path_1,
|
||||||
|
data_volume_path_2,
|
||||||
|
TestVMSnapshots._path_should_not_have_changed_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
root_volume_xen_sr = self.xen_session.xenapi.SR.get_by_name_label(sf_iscsi_root_volume_name)[0]
|
||||||
|
|
||||||
|
root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr)
|
||||||
|
|
||||||
|
self._check_list(root_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
|
||||||
|
|
||||||
|
root_volume_vdis_after_create = self._get_vdis(root_volume_xen_vdis)
|
||||||
|
|
||||||
|
vdiSnapshotOf = self.xen_session.xenapi.VDI.get_record(root_volume_vdis_after_create.snapshot_vdi["snapshot_of"])
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
vdiSnapshotOf["uuid"],
|
||||||
|
root_volume_vdis_after_create.active_vdi["uuid"],
|
||||||
|
TestVMSnapshots._snapshot_parent_not_correct_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
data_volume_xen_sr = self.xen_session.xenapi.SR.get_by_name_label(sf_iscsi_data_volume_name)[0]
|
||||||
|
|
||||||
|
data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr)
|
||||||
|
|
||||||
|
self._check_list(data_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
|
||||||
|
|
||||||
|
data_volume_vdis_after_create = self._get_vdis(data_volume_xen_vdis)
|
||||||
|
|
||||||
|
vdiSnapshotOf = self.xen_session.xenapi.VDI.get_record(data_volume_vdis_after_create.snapshot_vdi["snapshot_of"])
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
vdiSnapshotOf["uuid"],
|
||||||
|
data_volume_vdis_after_create.active_vdi["uuid"],
|
||||||
|
TestVMSnapshots._snapshot_parent_not_correct_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
#######################################
|
||||||
|
#######################################
|
||||||
|
### STEP 2: Revert VM to Snapshot ###
|
||||||
|
#######################################
|
||||||
|
#######################################
|
||||||
|
self.virtual_machine.stop(self.apiClient)
|
||||||
|
|
||||||
|
VmSnapshot.revertToSnapshot(self.apiClient, vmsnapshotid=vm_snapshot.id)
|
||||||
|
|
||||||
|
list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true")
|
||||||
|
|
||||||
|
self._check_list(list_vm_snapshots, 1, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
|
||||||
|
|
||||||
|
root_volume_path_3 = self._get_path(root_volume_id)
|
||||||
|
|
||||||
|
self.assertNotEqual(
|
||||||
|
root_volume_path_1,
|
||||||
|
root_volume_path_3,
|
||||||
|
TestVMSnapshots._path_should_have_changed_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr)
|
||||||
|
|
||||||
|
self._check_list(root_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
|
||||||
|
|
||||||
|
root_volume_vdis_after_revert = self._get_vdis(root_volume_xen_vdis)
|
||||||
|
|
||||||
|
self.assertNotEqual(
|
||||||
|
root_volume_vdis_after_create.active_vdi["uuid"],
|
||||||
|
root_volume_vdis_after_revert.active_vdi["uuid"],
|
||||||
|
TestVMSnapshots._active_vdis_should_not_be_the_same_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
root_volume_vdis_after_create.snapshot_vdi["uuid"],
|
||||||
|
root_volume_vdis_after_revert.snapshot_vdi["uuid"],
|
||||||
|
TestVMSnapshots._snapshot_vdis_should_be_the_same_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
root_volume_vdis_after_create.base_vdi["uuid"],
|
||||||
|
root_volume_vdis_after_revert.base_vdi["uuid"],
|
||||||
|
TestVMSnapshots._base_vdis_should_be_the_same_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
data_volume_path_3 = self._get_path(data_volume_id)
|
||||||
|
|
||||||
|
self.assertNotEqual(
|
||||||
|
data_volume_path_1,
|
||||||
|
data_volume_path_3,
|
||||||
|
TestVMSnapshots._path_should_have_changed_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr)
|
||||||
|
|
||||||
|
self._check_list(data_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
|
||||||
|
|
||||||
|
data_volume_vdis_after_revert = self._get_vdis(data_volume_xen_vdis)
|
||||||
|
|
||||||
|
self.assertNotEqual(
|
||||||
|
data_volume_vdis_after_create.active_vdi["uuid"],
|
||||||
|
data_volume_vdis_after_revert.active_vdi["uuid"],
|
||||||
|
TestVMSnapshots._active_vdis_should_not_be_the_same_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
data_volume_vdis_after_create.snapshot_vdi["uuid"],
|
||||||
|
data_volume_vdis_after_revert.snapshot_vdi["uuid"],
|
||||||
|
TestVMSnapshots._snapshot_vdis_should_be_the_same_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
data_volume_vdis_after_create.base_vdi["uuid"],
|
||||||
|
data_volume_vdis_after_revert.base_vdi["uuid"],
|
||||||
|
TestVMSnapshots._base_vdis_should_be_the_same_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
#######################################
|
||||||
|
#######################################
|
||||||
|
##### STEP 3: Delete VM snapshot #####
|
||||||
|
#######################################
|
||||||
|
#######################################
|
||||||
|
VmSnapshot.deleteVMSnapshot(self.apiClient, vmsnapshotid=vm_snapshot.id)
|
||||||
|
|
||||||
|
list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true")
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
list_vm_snapshots,
|
||||||
|
None,
|
||||||
|
TestVMSnapshots._should_be_no_vm_snapshots_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
root_volume_path_4 = self._get_path(root_volume_id)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
root_volume_path_3,
|
||||||
|
root_volume_path_4,
|
||||||
|
TestVMSnapshots._path_should_not_have_changed_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr)
|
||||||
|
|
||||||
|
self._check_list(root_volume_xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg)
|
||||||
|
|
||||||
|
root_volume_vdis_after_delete = self._get_vdis(root_volume_xen_vdis, True)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
root_volume_vdis_after_revert.active_vdi["uuid"],
|
||||||
|
root_volume_vdis_after_delete.active_vdi["uuid"],
|
||||||
|
TestVMSnapshots._active_vdis_should_be_the_same_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
data_volume_path_4 = self._get_path(data_volume_id)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
data_volume_path_3,
|
||||||
|
data_volume_path_4,
|
||||||
|
TestVMSnapshots._path_should_not_have_changed_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr)
|
||||||
|
|
||||||
|
self._check_list(data_volume_xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg)
|
||||||
|
|
||||||
|
data_volume_vdis_after_delete = self._get_vdis(data_volume_xen_vdis, True)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
data_volume_vdis_after_revert.active_vdi["uuid"],
|
||||||
|
data_volume_vdis_after_delete.active_vdi["uuid"],
|
||||||
|
TestVMSnapshots._active_vdis_should_be_the_same_err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
#######################################
|
||||||
|
#######################################
|
||||||
|
##### STEP 4: Start VM #####
|
||||||
|
#######################################
|
||||||
|
#######################################
|
||||||
|
self.virtual_machine.detach_volume(self.apiClient, data_volume)
|
||||||
|
|
||||||
|
self.virtual_machine.start(self.apiClient)
|
||||||
|
|
||||||
|
def _get_path(self, volume_id):
|
||||||
|
path_result = self.cs_api.getPathForVolume(volume_id)
|
||||||
|
|
||||||
|
return path_result['apipathforvolume']['path']
|
||||||
|
|
||||||
|
def _verify_vm_snapshot(self, list_vm_snapshots, vm_snapshot):
|
||||||
|
self._check_list(list_vm_snapshots, 1, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
|
||||||
|
|
||||||
|
vm_snapshot_from_list = list_vm_snapshots[0]
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
vm_snapshot.id,
|
||||||
|
vm_snapshot_from_list.id,
|
||||||
|
"There is a problem with the VM snapshot ID."
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
vm_snapshot.virtualmachineid,
|
||||||
|
self.virtual_machine.id,
|
||||||
|
"The ID of the snapshot's virtual machine does not match the expected virtual machine."
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
vm_snapshot.state,
|
||||||
|
"Ready",
|
||||||
|
"The snapshot is not in the 'Ready' state."
|
||||||
|
)
|
||||||
|
|
||||||
|
def _check_iscsi_name(self, sf_iscsi_name):
|
||||||
|
self.assertEqual(
|
||||||
|
sf_iscsi_name[0],
|
||||||
|
"/",
|
||||||
|
"The iSCSI name needs to start with a forward slash."
|
||||||
|
)
|
||||||
|
|
||||||
|
def _check_list(self, in_list, expected_size_of_list, err_msg):
|
||||||
|
self.assertEqual(
|
||||||
|
isinstance(in_list, list),
|
||||||
|
True,
|
||||||
|
"'in_list' is not a list."
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
len(in_list),
|
||||||
|
expected_size_of_list,
|
||||||
|
err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_vdis(self, xen_vdis, only_active_expected=False):
|
||||||
|
expected_number_of_vdis = 1 if only_active_expected else 3
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
len(xen_vdis),
|
||||||
|
expected_number_of_vdis,
|
||||||
|
"The list had an unexpected number of items in it."
|
||||||
|
)
|
||||||
|
|
||||||
|
active_vdi = None
|
||||||
|
snapshot_vdi = None
|
||||||
|
base_vdi = None
|
||||||
|
|
||||||
|
for temp_vdi in xen_vdis:
|
||||||
|
temp = self.xen_session.xenapi.VDI.get_record(temp_vdi)
|
||||||
|
|
||||||
|
if temp["name_label"] == "base copy":
|
||||||
|
base_vdi = temp
|
||||||
|
else:
|
||||||
|
if temp["is_a_snapshot"] == True:
|
||||||
|
snapshot_vdi = temp
|
||||||
|
else:
|
||||||
|
active_vdi = temp
|
||||||
|
|
||||||
|
self.assertNotEqual(
|
||||||
|
active_vdi,
|
||||||
|
None,
|
||||||
|
"The active VDI could not be located."
|
||||||
|
)
|
||||||
|
|
||||||
|
if only_active_expected:
|
||||||
|
self.assertEqual(
|
||||||
|
snapshot_vdi,
|
||||||
|
None,
|
||||||
|
"The snapshot VDI should not be present."
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
base_vdi,
|
||||||
|
None,
|
||||||
|
"The base VDI should not be present."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.assertNotEqual(
|
||||||
|
snapshot_vdi,
|
||||||
|
None,
|
||||||
|
"The snapshot VDI could not be located."
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertNotEqual(
|
||||||
|
base_vdi,
|
||||||
|
None,
|
||||||
|
"The base VDI could not be located."
|
||||||
|
)
|
||||||
|
|
||||||
|
class VdiCollection(object):
|
||||||
|
pass
|
||||||
|
|
||||||
|
vdis = VdiCollection()
|
||||||
|
|
||||||
|
vdis.active_vdi = active_vdi
|
||||||
|
vdis.snapshot_vdi = snapshot_vdi
|
||||||
|
vdis.base_vdi = base_vdi
|
||||||
|
|
||||||
|
return vdis
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _purge_solidfire_volumes(cls):
|
||||||
|
deleted_volumes = cls.sf_client.list_deleted_volumes()
|
||||||
|
|
||||||
|
for deleted_volume in deleted_volumes:
|
||||||
|
cls.sf_client.purge_deleted_volume(deleted_volume['volumeID'])
|
||||||
|
|
||||||
1676
test/integration/plugins/solidfire/TestVolumes.py
Normal file
1676
test/integration/plugins/solidfire/TestVolumes.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -16336,8 +16336,11 @@
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
if (args.context.hosts[0].hypervisor == "XenServer"){
|
if (args.context.hosts[0].hypervisor == "XenServer") {
|
||||||
cloudStack.dialog.notice({ message: _s("The host has been deleted. Please eject the host from XenServer Pool") })
|
cloudStack.dialog.notice({ message: _s("The host has been removed. Please eject the host from the XenServer Resource Pool.") })
|
||||||
|
}
|
||||||
|
else if (args.context.hosts[0].hypervisor == "VMware") {
|
||||||
|
cloudStack.dialog.notice({ message: _s("The host has been removed. Please eject the host from the vSphere Cluster.") })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user