From dad9e5d86835ccdaa39a3c62047b7fce3a32de6d Mon Sep 17 00:00:00 2001 From: Mike Tutkowski Date: Thu, 13 Aug 2015 18:44:12 -0600 Subject: [PATCH] CLOUDSTACK-8813: Notify listeners when a host has been added to a cluster, is about to be removed from a cluster, or has been removed from a cluster --- .../agent/api/CreateStoragePoolCommand.java | 7 +- .../agent/api/ModifyStoragePoolAnswer.java | 37 +- .../agent/api/ModifyStoragePoolCommand.java | 50 +- .../cloud/agent/api/ModifyTargetsAnswer.java | 23 + .../cloud/agent/api/ModifyTargetsCommand.java | 57 + .../api/storage/DataStoreProviderManager.java | 4 + .../api/storage/HypervisorHostListener.java | 6 + .../api/storage/PrimaryDataStoreDriver.java | 16 +- .../src/com/cloud/agent/AgentManager.java | 10 +- .../src/com/cloud/agent/Listener.java | 18 + .../com/cloud/agent/manager/AgentAttache.java | 8 +- .../cloud/agent/manager/AgentManagerImpl.java | 53 +- .../agent/manager/SynchronousListener.java | 12 + .../cloud/vm/VirtualMachineManagerImpl.java | 12 + .../orchestration/NetworkOrchestrator.java | 11 + .../datastore/db/PrimaryDataStoreDao.java | 2 + .../datastore/db/PrimaryDataStoreDaoImpl.java | 8 + .../test/DirectAgentManagerSimpleImpl.java | 9 +- .../storage/RemoteHostEndPoint.java | 12 + .../DataStoreProviderManagerImpl.java | 1 + .../provider/DefaultHostListener.java | 14 + .../discoverer/HypervServerDiscoverer.java | 12 + .../ovm3/resources/Ovm3Discoverer.java | 12 + .../cloud/resource/SimulatorDiscoverer.java | 12 + .../SimulatorSecondaryDiscoverer.java | 12 + .../vmware/manager/VmwareManagerImpl.java | 12 + .../vmware/resource/VmwareResource.java | 51 +- .../resource/VmwareStorageProcessor.java | 78 +- .../discoverer/XcpServerDiscoverer.java | 17 +- ...CitrixModifyStoragePoolCommandWrapper.java | 5 +- .../network/manager/NuageVspManagerImpl.java | 15 + .../provider/ElastistorHostListener.java | 14 + .../provider/NexentaHostListener.java | 31 +- .../SolidFirePrimaryDataStoreDriver.java | 27 +- .../SolidFirePrimaryDataStoreLifeCycle.java | 6 +- ...idFireSharedPrimaryDataStoreLifeCycle.java | 62 +- .../provider/SolidFireHostListener.java | 244 ++- .../provider/SolidFireSharedHostListener.java | 187 +- .../storage/datastore/util/SolidFireUtil.java | 129 +- .../cloud/capacity/CapacityManagerImpl.java | 12 + .../capacity/ComputeCapacityListener.java | 12 + .../capacity/StorageCapacityListener.java | 12 + .../consoleproxy/ConsoleProxyListener.java | 12 + .../deploy/DeploymentPlanningManagerImpl.java | 12 + .../discoverer/LibvirtServerDiscoverer.java | 12 + .../network/NetworkUsageManagerImpl.java | 12 + .../cloud/network/SshKeysDistriMonitor.java | 16 +- .../VirtualNetworkApplianceManagerImpl.java | 227 +-- .../security/SecurityGroupListener.java | 16 +- .../cloud/resource/ResourceManagerImpl.java | 59 +- .../storage/ImageStoreUploadMonitorImpl.java | 12 + .../storage/LocalStoragePoolListener.java | 30 +- .../com/cloud/storage/StorageManagerImpl.java | 2 +- .../storage/download/DownloadListener.java | 18 +- .../storage/listener/StoragePoolMonitor.java | 83 +- .../storage/listener/StorageSyncListener.java | 12 + .../secondary/SecondaryStorageListener.java | 12 + .../cloud/storage/upload/UploadListener.java | 15 +- .../plugins/solidfire/TestAddRemoveHosts.py | 710 +++++++ .../plugins/solidfire/TestSnapshots.py | 1472 +++++++++++++++ .../plugins/solidfire/TestVMSnapshots.py | 862 +++++++++ .../plugins/solidfire/TestVolumes.py | 1676 +++++++++++++++++ ui/scripts/system.js | 7 +- 63 files changed, 6222 insertions(+), 387 deletions(-) create mode 100644 core/src/com/cloud/agent/api/ModifyTargetsAnswer.java create mode 100644 core/src/com/cloud/agent/api/ModifyTargetsCommand.java create mode 100644 test/integration/plugins/solidfire/TestAddRemoveHosts.py create mode 100644 test/integration/plugins/solidfire/TestSnapshots.py create mode 100644 test/integration/plugins/solidfire/TestVMSnapshots.py create mode 100644 test/integration/plugins/solidfire/TestVolumes.py diff --git a/core/src/com/cloud/agent/api/CreateStoragePoolCommand.java b/core/src/com/cloud/agent/api/CreateStoragePoolCommand.java index 90a5e397144..52746fefc5d 100644 --- a/core/src/com/cloud/agent/api/CreateStoragePoolCommand.java +++ b/core/src/com/cloud/agent/api/CreateStoragePoolCommand.java @@ -19,10 +19,10 @@ package com.cloud.agent.api; -import com.cloud.storage.StoragePool; - import java.util.Map; +import com.cloud.storage.StoragePool; + public class CreateStoragePoolCommand extends ModifyStoragePoolCommand { public static final String DATASTORE_NAME = "datastoreName"; public static final String IQN = "iqn"; @@ -32,9 +32,6 @@ public class CreateStoragePoolCommand extends ModifyStoragePoolCommand { private boolean _createDatastore; private Map _details; - public CreateStoragePoolCommand() { - } - public CreateStoragePoolCommand(boolean add, StoragePool pool) { super(add, pool); } diff --git a/core/src/com/cloud/agent/api/ModifyStoragePoolAnswer.java b/core/src/com/cloud/agent/api/ModifyStoragePoolAnswer.java index b92bb720823..0e42d9a6d11 100644 --- a/core/src/com/cloud/agent/api/ModifyStoragePoolAnswer.java +++ b/core/src/com/cloud/agent/api/ModifyStoragePoolAnswer.java @@ -24,44 +24,41 @@ import java.util.Map; import com.cloud.storage.template.TemplateProp; public class ModifyStoragePoolAnswer extends Answer { - StoragePoolInfo poolInfo; - Map templateInfo; - String localDatastoreName = null; - - protected ModifyStoragePoolAnswer() { - } + private StoragePoolInfo _poolInfo; + private Map _templateInfo; + private String _localDatastoreName; public ModifyStoragePoolAnswer(ModifyStoragePoolCommand cmd, long capacityBytes, long availableBytes, Map tInfo) { super(cmd); - this.result = true; - this.poolInfo = - new StoragePoolInfo(null, cmd.getPool().getHost(), cmd.getPool().getPath(), cmd.getLocalPath(), cmd.getPool().getType(), capacityBytes, availableBytes); - this.templateInfo = tInfo; - } + result = true; - public StoragePoolInfo getPoolInfo() { - return poolInfo; + _poolInfo = new StoragePoolInfo(null, cmd.getPool().getHost(), cmd.getPool().getPath(), cmd.getLocalPath(), cmd.getPool().getType(), capacityBytes, availableBytes); + + _templateInfo = tInfo; } public void setPoolInfo(StoragePoolInfo poolInfo) { - this.poolInfo = poolInfo; + _poolInfo = poolInfo; } - public Map getTemplateInfo() { - return templateInfo; + public StoragePoolInfo getPoolInfo() { + return _poolInfo; } public void setTemplateInfo(Map templateInfo) { - this.templateInfo = templateInfo; + _templateInfo = templateInfo; } - public String getLocalDatastoreName() { - return localDatastoreName; + public Map getTemplateInfo() { + return _templateInfo; } public void setLocalDatastoreName(String localDatastoreName) { - this.localDatastoreName = localDatastoreName; + _localDatastoreName = localDatastoreName; } + public String getLocalDatastoreName() { + return _localDatastoreName; + } } diff --git a/core/src/com/cloud/agent/api/ModifyStoragePoolCommand.java b/core/src/com/cloud/agent/api/ModifyStoragePoolCommand.java index 136eb0938e3..9ec4a278f2b 100644 --- a/core/src/com/cloud/agent/api/ModifyStoragePoolCommand.java +++ b/core/src/com/cloud/agent/api/ModifyStoragePoolCommand.java @@ -26,51 +26,49 @@ import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.storage.StoragePool; public class ModifyStoragePoolCommand extends Command { - - boolean add; - StorageFilerTO pool; - String localPath; - String[] options; public static final String LOCAL_PATH_PREFIX = "/mnt/"; - public ModifyStoragePoolCommand() { - - } + private boolean _add; + private StorageFilerTO _pool; + private String _localPath; + private String _storagePath; public ModifyStoragePoolCommand(boolean add, StoragePool pool, String localPath) { - this.add = add; - this.pool = new StorageFilerTO(pool); - this.localPath = localPath; - + _add = add; + _pool = new StorageFilerTO(pool); + _localPath = localPath; } public ModifyStoragePoolCommand(boolean add, StoragePool pool) { this(add, pool, LOCAL_PATH_PREFIX + File.separator + UUID.nameUUIDFromBytes((pool.getHostAddress() + pool.getPath()).getBytes())); } - public StorageFilerTO getPool() { - return pool; + public boolean getAdd() { + return _add; } public void setPool(StoragePool pool) { - this.pool = new StorageFilerTO(pool); + _pool = new StorageFilerTO(pool); } - public boolean getAdd() { - return add; + public StorageFilerTO getPool() { + return _pool; + } + + public String getLocalPath() { + return _localPath; + } + + public void setStoragePath(String storagePath) { + _storagePath = storagePath; + } + + public String getStoragePath() { + return _storagePath; } @Override public boolean executeInSequence() { return false; } - - public String getLocalPath() { - return localPath; - } - - public void setOptions(String[] options) { - this.options = options; - } - } diff --git a/core/src/com/cloud/agent/api/ModifyTargetsAnswer.java b/core/src/com/cloud/agent/api/ModifyTargetsAnswer.java new file mode 100644 index 00000000000..c192e4a0dfa --- /dev/null +++ b/core/src/com/cloud/agent/api/ModifyTargetsAnswer.java @@ -0,0 +1,23 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +public class ModifyTargetsAnswer extends Answer { +} diff --git a/core/src/com/cloud/agent/api/ModifyTargetsCommand.java b/core/src/com/cloud/agent/api/ModifyTargetsCommand.java new file mode 100644 index 00000000000..721516be2c8 --- /dev/null +++ b/core/src/com/cloud/agent/api/ModifyTargetsCommand.java @@ -0,0 +1,57 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import java.util.List; +import java.util.Map; + +public class ModifyTargetsCommand extends Command { + public static final String IQN = "iqn"; + public static final String STORAGE_HOST = "storageHost"; + public static final String STORAGE_PORT = "storagePort"; + public static final String CHAP_NAME = "chapName"; + public static final String CHAP_SECRET = "chapSecret"; + public static final String MUTUAL_CHAP_NAME = "mutualChapName"; + public static final String MUTUAL_CHAP_SECRET = "mutualChapSecret"; + + private boolean _add; + private List> _targets; + + public void setAdd(boolean add) { + _add = add; + } + + public boolean getAdd() { + return _add; + } + + public void setTargets(List> targets) { + _targets = targets; + } + + public List> getTargets() { + return _targets; + } + + @Override + public boolean executeInSequence() { + return false; + } +} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProviderManager.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProviderManager.java index d643d7fab78..e476d8f3b35 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProviderManager.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProviderManager.java @@ -18,6 +18,8 @@ */ package org.apache.cloudstack.engine.subsystem.api.storage; +import java.util.List; + import com.cloud.storage.DataStoreProviderApiService; import com.cloud.utils.component.Manager; @@ -29,4 +31,6 @@ public interface DataStoreProviderManager extends Manager, DataStoreProviderApiS DataStoreProvider getDefaultImageDataStoreProvider(); DataStoreProvider getDefaultCacheDataStoreProvider(); + + List getProviders(); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java index 82ba8b6105b..d7e8522a40c 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java @@ -21,7 +21,13 @@ package org.apache.cloudstack.engine.subsystem.api.storage; import com.cloud.exception.StorageConflictException; public interface HypervisorHostListener { + boolean hostAdded(long hostId); + boolean hostConnect(long hostId, long poolId) throws StorageConflictException; boolean hostDisconnected(long hostId, long poolId); + + boolean hostAboutToBeRemoved(long hostId); + + boolean hostRemoved(long hostId, long clusterId); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java index 0c6bd93982a..e0c0d28e9da 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java @@ -26,27 +26,27 @@ import com.cloud.storage.StoragePool; import com.cloud.storage.Volume; public interface PrimaryDataStoreDriver extends DataStoreDriver { - public ChapInfo getChapInfo(VolumeInfo volumeInfo); + ChapInfo getChapInfo(VolumeInfo volumeInfo); - public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore); + boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore); - public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore); + void revokeAccess(DataObject dataObject, Host host, DataStore dataStore); // intended for managed storage (cloud.storage_pool.managed = true) // if not managed, return volume.getSize() - public long getVolumeSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool storagePool); + long getVolumeSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool storagePool); // intended for managed storage (cloud.storage_pool.managed = true) // if managed storage, return the total number of bytes currently in use for the storage pool in question // if not managed storage, return 0 - public long getUsedBytes(StoragePool storagePool); + long getUsedBytes(StoragePool storagePool); // intended for managed storage (cloud.storage_pool.managed = true) // if managed storage, return the total number of IOPS currently in use for the storage pool in question // if not managed storage, return 0 - public long getUsedIops(StoragePool storagePool); + long getUsedIops(StoragePool storagePool); - public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback); + void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback); - public void revertSnapshot(SnapshotInfo snapshotOnImageStore, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback callback); + void revertSnapshot(SnapshotInfo snapshotOnImageStore, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback callback); } diff --git a/engine/components-api/src/com/cloud/agent/AgentManager.java b/engine/components-api/src/com/cloud/agent/AgentManager.java index e9e32495d83..244772d67d0 100644 --- a/engine/components-api/src/com/cloud/agent/AgentManager.java +++ b/engine/components-api/src/com/cloud/agent/AgentManager.java @@ -42,7 +42,7 @@ public interface AgentManager { Add, Del, Contains, } - boolean handleDirectConnectAgent(Host host, StartupCommand[] cmds, ServerResource resource, boolean forRebalance) throws ConnectionException; + boolean handleDirectConnectAgent(Host host, StartupCommand[] cmds, ServerResource resource, boolean forRebalance, boolean newHost) throws ConnectionException; /** * easy send method that returns null if there's any errors. It handles all exceptions. @@ -131,8 +131,6 @@ public interface AgentManager { Answer sendTo(Long dcId, HypervisorType type, Command cmd); -// public AgentAttache handleDirectConnectAgent(HostVO host, StartupCommand[] cmds, ServerResource resource, boolean forRebalance) throws ConnectionException; - public boolean agentStatusTransitTo(HostVO host, Status.Event e, long msId); boolean isAgentAttached(long hostId); @@ -146,4 +144,10 @@ public interface AgentManager { boolean reconnect(long hostId); void rescan(); + + void notifyMonitorsOfNewlyAddedHost(long hostId); + + void notifyMonitorsOfHostAboutToBeRemoved(long hostId); + + void notifyMonitorsOfRemovedHost(long hostId, long clusterId); } diff --git a/engine/components-api/src/com/cloud/agent/Listener.java b/engine/components-api/src/com/cloud/agent/Listener.java index 242f90cfeed..843a634b4c0 100644 --- a/engine/components-api/src/com/cloud/agent/Listener.java +++ b/engine/components-api/src/com/cloud/agent/Listener.java @@ -63,6 +63,12 @@ public interface Listener { */ AgentControlAnswer processControlCommand(long agentId, AgentControlCommand cmd); + /** + * This method is called by AgentManager when a host is added to a cluster. + * @param long the ID of the newly added host + */ + void processHostAdded(long hostId); + /** * This method is called by AgentManager when an agent made a * connection to this server if the listener has @@ -86,6 +92,18 @@ public interface Listener { */ boolean processDisconnect(long agentId, Status state); + /** + * This method is called by AgentManager when a host is about to be removed from a cluster. + * @param long the ID of the host that's about to be removed + */ + void processHostAboutToBeRemoved(long hostId); + + /** + * This method is called by AgentManager when a host is removed from a cluster. + * @param long the ID of the newly removed host + */ + void processHostRemoved(long hostId, long clusterId); + /** * If this Listener is passed to the send() method, this method * is called by AgentManager after processing an answer diff --git a/engine/orchestration/src/com/cloud/agent/manager/AgentAttache.java b/engine/orchestration/src/com/cloud/agent/manager/AgentAttache.java index 0acac3c0479..d5ec9009838 100644 --- a/engine/orchestration/src/com/cloud/agent/manager/AgentAttache.java +++ b/engine/orchestration/src/com/cloud/agent/manager/AgentAttache.java @@ -44,6 +44,7 @@ import com.cloud.agent.api.CleanupNetworkRulesCmd; import com.cloud.agent.api.Command; import com.cloud.agent.api.MaintainCommand; import com.cloud.agent.api.MigrateCommand; +import com.cloud.agent.api.ModifyTargetsCommand; import com.cloud.agent.api.PingTestCommand; import com.cloud.agent.api.PvlanSetupCommand; import com.cloud.agent.api.ReadyCommand; @@ -109,11 +110,12 @@ public abstract class AgentAttache { protected AgentManagerImpl _agentMgr; - public final static String[] s_commandsAllowedInMaintenanceMode = new String[] {MaintainCommand.class.toString(), MigrateCommand.class.toString(), + public final static String[] s_commandsAllowedInMaintenanceMode = new String[] { MaintainCommand.class.toString(), MigrateCommand.class.toString(), StopCommand.class.toString(), CheckVirtualMachineCommand.class.toString(), PingTestCommand.class.toString(), CheckHealthCommand.class.toString(), ReadyCommand.class.toString(), ShutdownCommand.class.toString(), SetupCommand.class.toString(), - CleanupNetworkRulesCmd.class.toString(), CheckNetworkCommand.class.toString(), PvlanSetupCommand.class.toString(), CheckOnHostCommand.class.toString()}; - protected final static String[] s_commandsNotAllowedInConnectingMode = new String[] {StartCommand.class.toString(), CreateCommand.class.toString()}; + CleanupNetworkRulesCmd.class.toString(), CheckNetworkCommand.class.toString(), PvlanSetupCommand.class.toString(), CheckOnHostCommand.class.toString(), + ModifyTargetsCommand.class.toString() }; + protected final static String[] s_commandsNotAllowedInConnectingMode = new String[] { StartCommand.class.toString(), CreateCommand.class.toString() }; static { Arrays.sort(s_commandsAllowedInMaintenanceMode); Arrays.sort(s_commandsNotAllowedInConnectingMode); diff --git a/engine/orchestration/src/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/com/cloud/agent/manager/AgentManagerImpl.java index 45a7dca4657..efd9eedef61 100644 --- a/engine/orchestration/src/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/com/cloud/agent/manager/AgentManagerImpl.java @@ -538,6 +538,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } + @Override + public void notifyMonitorsOfNewlyAddedHost(long hostId) { + for (final Pair monitor : _hostMonitors) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Sending host added to listener: " + monitor.second().getClass().getSimpleName()); + } + + monitor.second().processHostAdded(hostId); + } + } + protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, final StartupCommand[] cmd, final boolean forRebalance) throws ConnectionException { final long hostId = attache.getId(); final HostVO host = _hostDao.findById(hostId); @@ -1001,6 +1012,28 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return true; } + @Override + public void notifyMonitorsOfHostAboutToBeRemoved(long hostId) { + for (final Pair monitor : _hostMonitors) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Sending host about to be removed to listener: " + monitor.second().getClass().getSimpleName()); + } + + monitor.second().processHostAboutToBeRemoved(hostId); + } + } + + @Override + public void notifyMonitorsOfRemovedHost(long hostId, long clusterId) { + for (final Pair monitor : _hostMonitors) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Sending host removed to listener: " + monitor.second().getClass().getSimpleName()); + } + + monitor.second().processHostRemoved(hostId, clusterId); + } + } + public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException { if (event == Event.AgentDisconnected) { if (s_logger.isDebugEnabled()) { @@ -1464,7 +1497,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } @Override - public boolean handleDirectConnectAgent(final Host host, final StartupCommand[] cmds, final ServerResource resource, final boolean forRebalance) throws ConnectionException { + public boolean handleDirectConnectAgent(final Host host, final StartupCommand[] cmds, final ServerResource resource, + final boolean forRebalance, boolean newHost) throws ConnectionException { AgentAttache attache; attache = createAttacheForDirectConnect(host, resource); @@ -1473,6 +1507,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl answers[i] = new StartupAnswer(cmds[i], attache.getId(), PingInterval.value()); } attache.process(answers); + + if (newHost) { + notifyMonitorsOfNewlyAddedHost(host.getId()); + } + attache = notifyMonitorsOfConnection(attache, cmds, forRebalance); return attache != null; @@ -1617,6 +1656,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return null; } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(final Host host, final StartupCommand cmd, final boolean forRebalance) { if (host.getType().equals(Host.Type.TrafficMonitor) || host.getType().equals(Host.Type.SecondaryStorage)) { @@ -1633,6 +1676,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return true; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean processTimeout(final long agentId, final long seq) { return true; diff --git a/engine/orchestration/src/com/cloud/agent/manager/SynchronousListener.java b/engine/orchestration/src/com/cloud/agent/manager/SynchronousListener.java index 28c60c1a07f..96d40777f8e 100644 --- a/engine/orchestration/src/com/cloud/agent/manager/SynchronousListener.java +++ b/engine/orchestration/src/com/cloud/agent/manager/SynchronousListener.java @@ -78,6 +78,18 @@ public class SynchronousListener implements Listener { return true; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) { } diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java index 43228e8bfa9..9523b928ed4 100644 --- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -2786,6 +2786,18 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return true; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(final Host agent, final StartupCommand cmd, final boolean forRebalance) throws ConnectionException { if (!(cmd instanceof StartupRoutingCommand)) { diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index e4272720bc8..66185c60978 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -3001,6 +3001,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra return null; } + public void processHostAdded(long hostId) { + } + @Override public void processConnect(final Host host, final StartupCommand cmd, final boolean forRebalance) throws ConnectionException { if (!(cmd instanceof StartupRoutingCommand)) { @@ -3088,6 +3091,14 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra return false; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean isRecurring() { return false; diff --git a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java index d265da15de2..32d1d792244 100644 --- a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java +++ b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java @@ -64,6 +64,8 @@ public interface PrimaryDataStoreDao extends GenericDao { */ List findPoolByName(String name); + List findPoolsByProvider(String provider); + /** * Find pools by the pod that matches the details. * diff --git a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java index fd617cce09c..c451e1dee46 100644 --- a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java @@ -79,6 +79,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase AllFieldSearch.and("path", AllFieldSearch.entity().getPath(), SearchCriteria.Op.EQ); AllFieldSearch.and("podId", AllFieldSearch.entity().getPodId(), Op.EQ); AllFieldSearch.and("clusterId", AllFieldSearch.entity().getClusterId(), Op.EQ); + AllFieldSearch.and("storage_provider_name", AllFieldSearch.entity().getStorageProviderName(), Op.EQ); AllFieldSearch.done(); DcPodSearch = createSearchBuilder(); @@ -128,6 +129,13 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase return listIncludingRemovedBy(sc); } + @Override + public List findPoolsByProvider(String provider) { + SearchCriteria sc = AllFieldSearch.create(); + sc.setParameters("storage_provider_name", provider); + return listBy(sc); + } + @Override public StoragePoolVO findPoolByUUID(String uuid) { SearchCriteria sc = AllFieldSearch.create(); diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/DirectAgentManagerSimpleImpl.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/DirectAgentManagerSimpleImpl.java index b34b697af04..0e4755e0345 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/DirectAgentManagerSimpleImpl.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/DirectAgentManagerSimpleImpl.java @@ -278,9 +278,16 @@ public class DirectAgentManagerSimpleImpl extends ManagerBase implements AgentMa } @Override - public boolean handleDirectConnectAgent(Host host, StartupCommand[] cmds, ServerResource resource, boolean forRebalance) throws ConnectionException { + public boolean handleDirectConnectAgent(Host host, StartupCommand[] cmds, ServerResource resource, boolean forRebalance, boolean newHost) throws ConnectionException { // TODO Auto-generated method stub return false; } + @Override + public void notifyMonitorsOfHostAboutToBeRemoved(long hostId) { + } + + @Override + public void notifyMonitorsOfRemovedHost(long hostId, long clusterId) { + } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/RemoteHostEndPoint.java b/engine/storage/src/org/apache/cloudstack/storage/RemoteHostEndPoint.java index 1f59cc6cc8a..3bad62eb574 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/RemoteHostEndPoint.java +++ b/engine/storage/src/org/apache/cloudstack/storage/RemoteHostEndPoint.java @@ -160,6 +160,10 @@ public class RemoteHostEndPoint implements EndPoint { return null; } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException { // TODO Auto-generated method stub @@ -172,6 +176,14 @@ public class RemoteHostEndPoint implements EndPoint { return false; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean isRecurring() { // TODO Auto-generated method stub diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java index 6e36514c491..98eeb6b4405 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java @@ -218,6 +218,7 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto this.imageStoreProviderMgr = imageDataStoreProviderMgr; } + @Override public List getProviders() { return providers; } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java index 89af0765fdb..64533d54d2f 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java @@ -54,6 +54,11 @@ public class DefaultHostListener implements HypervisorHostListener { @Inject PrimaryDataStoreDao primaryStoreDao; + @Override + public boolean hostAdded(long hostId) { + return true; + } + @Override public boolean hostConnect(long hostId, long poolId) throws StorageConflictException { StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); @@ -109,4 +114,13 @@ public class DefaultHostListener implements HypervisorHostListener { return false; } + @Override + public boolean hostAboutToBeRemoved(long hostId) { + return true; + } + + @Override + public boolean hostRemoved(long hostId, long clusterId) { + return true; + } } diff --git a/plugins/hypervisors/hyperv/src/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java b/plugins/hypervisors/hyperv/src/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java index fd8db4a5644..fd54d43241b 100644 --- a/plugins/hypervisors/hyperv/src/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java +++ b/plugins/hypervisors/hyperv/src/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java @@ -106,6 +106,10 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer return null; } + @Override + public void processHostAdded(long hostId) { + } + @Override public final void processConnect(final Host agent, final StartupCommand cmd, final boolean forRebalance) throws ConnectionException { // Limit the commands we can process @@ -176,6 +180,14 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer return false; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public final boolean isRecurring() { return false; diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3Discoverer.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3Discoverer.java index 4743600d509..3f245273a27 100755 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3Discoverer.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3Discoverer.java @@ -338,6 +338,10 @@ public class Ovm3Discoverer extends DiscovererBase implements Discoverer, return null; } + @Override + public void processHostAdded(long hostId) { + } + /* for reconnecting */ @Override public void processConnect(Host host, StartupCommand cmd, @@ -351,6 +355,14 @@ public class Ovm3Discoverer extends DiscovererBase implements Discoverer, return false; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean isRecurring() { return false; diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java index 76eb1c08a7e..c942c8ff55c 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java @@ -245,6 +245,10 @@ public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, L return null; } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException { @@ -273,6 +277,14 @@ public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, L return false; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean isRecurring() { return false; diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java index 6e75244c323..e09a5a950a4 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java @@ -140,6 +140,10 @@ public class SimulatorSecondaryDiscoverer extends SecondaryStorageDiscoverer imp return false; } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException { @@ -155,6 +159,14 @@ public class SimulatorSecondaryDiscoverer extends SecondaryStorageDiscoverer imp return false; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean processTimeout(long agentId, long seq) { return false; diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java index f27e938e733..63124e1ca9e 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java @@ -841,6 +841,10 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw return null; } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) { if (cmd instanceof StartupCommand) { @@ -882,6 +886,14 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw return false; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean isRecurring() { return false; diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 14a73bb3a0c..82e7f65b6ad 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -148,6 +148,8 @@ import com.cloud.agent.api.MigrateWithStorageCommand; import com.cloud.agent.api.ModifySshKeysCommand; import com.cloud.agent.api.ModifyStoragePoolAnswer; import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.agent.api.ModifyTargetsAnswer; +import com.cloud.agent.api.ModifyTargetsCommand; import com.cloud.agent.api.NetworkUsageAnswer; import com.cloud.agent.api.NetworkUsageCommand; import com.cloud.agent.api.PingCommand; @@ -409,6 +411,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa answer = execute((DestroyCommand)cmd); } else if (clz == CreateStoragePoolCommand.class) { return execute((CreateStoragePoolCommand)cmd); + } else if (clz == ModifyTargetsCommand.class) { + answer = execute((ModifyTargetsCommand)cmd); } else if (clz == ModifyStoragePoolCommand.class) { answer = execute((ModifyStoragePoolCommand)cmd); } else if (clz == DeleteStoragePoolCommand.class) { @@ -933,7 +937,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa */ // Fallback to E1000 if no specific nicAdapter is passed VirtualEthernetCardType nicDeviceType = VirtualEthernetCardType.E1000; - Map details = cmd.getDetails(); + Map details = cmd.getDetails(); if (details != null) { nicDeviceType = VirtualEthernetCardType.valueOf((String) details.get("nicAdapter")); } @@ -3527,7 +3531,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa private Answer execute(MigrateVolumeCommand cmd) { String volumePath = cmd.getVolumePath(); StorageFilerTO poolTo = cmd.getPool(); - Volume.Type volumeType = cmd.getVolumeType(); if (s_logger.isInfoEnabled()) { s_logger.info("Executing resource MigrateVolumeCommand: " + _gson.toJson(cmd)); @@ -3608,7 +3611,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa // Consolidate VM disks. // In case of a linked clone VM, if VM's disks are not consolidated, // further volume operations on the ROOT volume such as volume snapshot etc. will result in DB inconsistencies. - String apiVersion = HypervisorHostHelper.getVcenterApiVersion(vmMo.getContext()); if (!vmMo.consolidateVmDisks()) { s_logger.warn("VM disk consolidation failed after storage migration."); } else { @@ -3677,6 +3679,14 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return new Answer(cmd, true, "success"); } + protected Answer execute(ModifyTargetsCommand cmd) { + VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); + + handleTargets(cmd.getAdd(), cmd.getTargets(), (HostMO)hyperHost); + + return new ModifyTargetsAnswer(); + } + protected Answer execute(ModifyStoragePoolCommand cmd) { if (s_logger.isInfoEnabled()) { s_logger.info("Executing resource ModifyStoragePoolCommand: " + _gson.toJson(cmd)); @@ -3690,34 +3700,53 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa throw new Exception("Unsupported storage pool type " + pool.getType()); } - ManagedObjectReference morDatastore = null; - morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, pool.getUuid()); - if (morDatastore == null) - morDatastore = - hyperHost.mountDatastore(pool.getType() == StoragePoolType.VMFS, pool.getHost(), pool.getPort(), pool.getPath(), pool.getUuid().replace("-", "")); + ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, pool.getUuid()); + + if (morDatastore == null) { + morDatastore = hyperHost.mountDatastore(pool.getType() == StoragePoolType.VMFS, pool.getHost(), pool.getPort(), pool.getPath(), pool.getUuid().replace("-", "")); + } assert (morDatastore != null); + DatastoreSummary summary = new DatastoreMO(getServiceContext(), morDatastore).getSummary(); + long capacity = summary.getCapacity(); long available = summary.getFreeSpace(); + Map tInfo = new HashMap(); ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(cmd, capacity, available, tInfo); + if (cmd.getAdd() && pool.getType() == StoragePoolType.VMFS) { answer.setLocalDatastoreName(morDatastore.getValue()); } + return answer; } catch (Throwable e) { if (e instanceof RemoteException) { s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); + invalidateServiceContext(); } String msg = "ModifyStoragePoolCommand failed due to " + VmwareHelper.getExceptionMessage(e); + s_logger.error(msg, e); + return new Answer(cmd, false, msg); } } + private void handleTargets(boolean add, List> targets, HostMO host) { + if (targets != null && targets.size() > 0) { + try { + _storageProcessor.handleTargetsForHost(add, targets, host); + } + catch (Exception ex) { + s_logger.warn(ex.getMessage()); + } + } + } + protected Answer execute(DeleteStoragePoolCommand cmd) { if (s_logger.isInfoEnabled()) { s_logger.info("Executing resource DeleteStoragePoolCommand: " + _gson.toJson(cmd)); @@ -4701,12 +4730,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } - private boolean isVmInCluster(String vmName) throws Exception { - VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); - - return hyperHost.findVmOnPeerHyperHost(vmName) != null; - } - protected OptionValue[] configureVnc(OptionValue[] optionsToMerge, VmwareHypervisorHost hyperHost, String vmName, String vncPassword, String keyboardLayout) throws Exception { diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java index 310313ab378..21f79f983e2 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -71,6 +71,7 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; +import com.cloud.agent.api.ModifyTargetsCommand; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DiskTO; @@ -1911,14 +1912,77 @@ public class VmwareStorageProcessor implements StorageProcessor { return (int)(bytes / (1024L * 1024L)); } - private void addRemoveInternetScsiTargetsToAllHosts(VmwareContext context, final boolean add, final List lstTargets, - List> lstHosts) throws Exception { - ExecutorService executorService = Executors.newFixedThreadPool(lstHosts.size()); + public void handleTargetsForHost(boolean add, List> targets, HostMO host) throws Exception { + List lstTargets = new ArrayList(); + + for (Map mapTarget : targets) { + HostInternetScsiHbaStaticTarget target = new HostInternetScsiHbaStaticTarget(); + + String targetAddress = mapTarget.get(ModifyTargetsCommand.STORAGE_HOST); + Integer targetPort = Integer.parseInt(mapTarget.get(ModifyTargetsCommand.STORAGE_PORT)); + String iScsiName = trimIqn(mapTarget.get(ModifyTargetsCommand.IQN)); + + target.setAddress(targetAddress); + target.setPort(targetPort); + target.setIScsiName(iScsiName); + + String chapName = mapTarget.get(ModifyTargetsCommand.CHAP_NAME); + String chapSecret = mapTarget.get(ModifyTargetsCommand.CHAP_SECRET); + + if (StringUtils.isNotBlank(chapName) && StringUtils.isNotBlank(chapSecret)) { + HostInternetScsiHbaAuthenticationProperties auth = new HostInternetScsiHbaAuthenticationProperties(); + + String strAuthType = "chapRequired"; + + auth.setChapAuthEnabled(true); + auth.setChapInherited(false); + auth.setChapAuthenticationType(strAuthType); + auth.setChapName(chapName); + auth.setChapSecret(chapSecret); + + String mutualChapName = mapTarget.get(ModifyTargetsCommand.MUTUAL_CHAP_NAME); + String mutualChapSecret = mapTarget.get(ModifyTargetsCommand.MUTUAL_CHAP_SECRET); + + if (StringUtils.isNotBlank(mutualChapName) && StringUtils.isNotBlank(mutualChapSecret)) { + auth.setMutualChapInherited(false); + auth.setMutualChapAuthenticationType(strAuthType); + auth.setMutualChapName(mutualChapName); + auth.setMutualChapSecret(mutualChapSecret); + } + + target.setAuthenticationProperties(auth); + } + + lstTargets.add(target); + } + + List hosts = new ArrayList<>(); + + hosts.add(host); + + addRemoveInternetScsiTargetsToAllHosts(add, lstTargets, hosts); + } + + private void addRemoveInternetScsiTargetsToAllHosts(VmwareContext context, final boolean add, final List targets, + List> hostPairs) throws Exception { + List hosts = new ArrayList<>(); + + for (Pair hostPair : hostPairs) { + HostMO host = new HostMO(context, hostPair.first()); + + hosts.add(host); + } + + addRemoveInternetScsiTargetsToAllHosts(add, targets, hosts); + } + + private void addRemoveInternetScsiTargetsToAllHosts(final boolean add, final List targets, + List hosts) throws Exception { + ExecutorService executorService = Executors.newFixedThreadPool(hosts.size()); final List exceptions = new ArrayList(); - for (Pair hostPair : lstHosts) { - HostMO host = new HostMO(context, hostPair.first()); + for (HostMO host : hosts) { HostStorageSystemMO hostStorageSystem = host.getHostStorageSystemMO(); boolean iScsiHbaConfigured = false; @@ -1938,9 +2002,9 @@ public class VmwareStorageProcessor implements StorageProcessor { public void run() { try { if (add) { - hss.addInternetScsiStaticTargets(iScsiHbaDevice, lstTargets); + hss.addInternetScsiStaticTargets(iScsiHbaDevice, targets); } else { - hss.removeInternetScsiStaticTargets(iScsiHbaDevice, lstTargets); + hss.removeInternetScsiStaticTargets(iScsiHbaDevice, targets); } hss.rescanHba(iScsiHbaDevice); diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java index 63c44853d1a..4416c20dd6a 100644 --- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java +++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java @@ -113,16 +113,15 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L protected String _guestNic; protected boolean _setupMultipath; protected String _instance; - private String xs620snapshothotfix = "Xenserver-Vdi-Copy-HotFix"; @Inject protected AlertManager _alertMgr; @Inject protected AgentManager _agentMgr; @Inject - VMTemplateDao _tmpltDao; + private VMTemplateDao _tmpltDao; @Inject - HostPodDao _podDao; + private HostPodDao _podDao; protected XcpServerDiscoverer() { } @@ -542,6 +541,10 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L } } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(com.cloud.host.Host agent, StartupCommand cmd, boolean forRebalance) throws ConnectionException { if (!(cmd instanceof StartupRoutingCommand)) { @@ -629,6 +632,14 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L return false; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean processTimeout(long agentId, long seq) { return false; diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixModifyStoragePoolCommandWrapper.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixModifyStoragePoolCommandWrapper.java index 67d456ff3c2..4c7136e28f5 100644 --- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixModifyStoragePoolCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixModifyStoragePoolCommandWrapper.java @@ -49,7 +49,8 @@ public final class CitrixModifyStoragePoolCommandWrapper extends CommandWrapper< final boolean add = command.getAdd(); if (add) { try { - final SR sr = citrixResourceBase.getStorageRepository(conn, pool.getUuid()); + final String srName = command.getStoragePath() != null ? command.getStoragePath() : pool.getUuid(); + final SR sr = citrixResourceBase.getStorageRepository(conn, srName); citrixResourceBase.setupHeartbeatSr(conn, sr, false); final long capacity = sr.getPhysicalSize(conn); final long available = capacity - sr.getPhysicalUtilisation(conn); @@ -81,7 +82,7 @@ public final class CitrixModifyStoragePoolCommandWrapper extends CommandWrapper< if (result == null || !result.split("#")[1].equals("0")) { throw new CloudRuntimeException("Unable to remove heartbeat file entry for SR " + srUuid + " due to " + result); } - return new Answer(command, true, "seccuss"); + return new Answer(command, true, "success"); } catch (final XenAPIException e) { final String msg = "ModifyStoragePoolCommand remove XenAPIException:" + e.toString() + " host:" + citrixResourceBase.getHost().getUuid() + " pool: " + pool.getHost() + pool.getPath(); diff --git a/plugins/network-elements/nuage-vsp/src/com/cloud/network/manager/NuageVspManagerImpl.java b/plugins/network-elements/nuage-vsp/src/com/cloud/network/manager/NuageVspManagerImpl.java index 16fd11de3c3..f38a9c702c6 100644 --- a/plugins/network-elements/nuage-vsp/src/com/cloud/network/manager/NuageVspManagerImpl.java +++ b/plugins/network-elements/nuage-vsp/src/com/cloud/network/manager/NuageVspManagerImpl.java @@ -735,6 +735,11 @@ public class NuageVspManagerImpl extends ManagerBase implements NuageVspManager, return null; } + @Override + public void processHostAdded(long hostId) { + + } + @Override public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException { @@ -745,6 +750,16 @@ public class NuageVspManagerImpl extends ManagerBase implements NuageVspManager, return true; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + + } + @Override public boolean isRecurring() { return false; diff --git a/plugins/storage/volume/cloudbyte/src/org/apache/cloudstack/storage/datastore/provider/ElastistorHostListener.java b/plugins/storage/volume/cloudbyte/src/org/apache/cloudstack/storage/datastore/provider/ElastistorHostListener.java index c2624eaa255..b40e60ecf72 100644 --- a/plugins/storage/volume/cloudbyte/src/org/apache/cloudstack/storage/datastore/provider/ElastistorHostListener.java +++ b/plugins/storage/volume/cloudbyte/src/org/apache/cloudstack/storage/datastore/provider/ElastistorHostListener.java @@ -76,6 +76,11 @@ public class ElastistorHostListener implements HypervisorHostListener { @Inject HostDao _hostDao; + @Override + public boolean hostAdded(long hostId) { + return true; + } + @Override public boolean hostConnect(long hostId, long poolId) { StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); @@ -126,4 +131,13 @@ public class ElastistorHostListener implements HypervisorHostListener { return false; } + @Override + public boolean hostAboutToBeRemoved(long hostId) { + return true; + } + + @Override + public boolean hostRemoved(long hostId, long clusterId) { + return true; + } } diff --git a/plugins/storage/volume/nexenta/src/org/apache/cloudstack/storage/datastore/provider/NexentaHostListener.java b/plugins/storage/volume/nexenta/src/org/apache/cloudstack/storage/datastore/provider/NexentaHostListener.java index 950325288f3..5fe759902b9 100644 --- a/plugins/storage/volume/nexenta/src/org/apache/cloudstack/storage/datastore/provider/NexentaHostListener.java +++ b/plugins/storage/volume/nexenta/src/org/apache/cloudstack/storage/datastore/provider/NexentaHostListener.java @@ -23,13 +23,40 @@ import org.apache.log4j.Logger; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; public class NexentaHostListener implements HypervisorHostListener { - private static final Logger logger = Logger.getLogger(NexentaHostListener.class); + private static final Logger s_logger = Logger.getLogger(NexentaHostListener.class); + + @Override + public boolean hostAdded(long hostId) { + s_logger.trace("hostAdded(long) invoked"); - public boolean hostConnect(long hostId, long poolId) { return true; } + @Override + public boolean hostConnect(long hostId, long poolId) { + s_logger.trace("hostConnect(long, long) invoked"); + + return true; + } + + @Override public boolean hostDisconnected(long hostId, long poolId) { + s_logger.trace("hostDisconnected(long, long) invoked"); + + return true; + } + + @Override + public boolean hostAboutToBeRemoved(long hostId) { + s_logger.trace("hostAboutToBeRemoved(long) invoked"); + + return true; + } + + @Override + public boolean hostRemoved(long hostId, long clusterId) { + s_logger.trace("hostRemoved(long) invoked"); + return true; } } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java index 61e199c74cf..5f647db1109 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java @@ -76,7 +76,6 @@ import com.cloud.utils.exception.CloudRuntimeException; public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { private static final Logger s_logger = Logger.getLogger(SolidFirePrimaryDataStoreDriver.class); - private static final int s_lockTimeInSeconds = 300; private static final int s_lowestHypervisorSnapshotReserve = 10; @Inject private AccountDao _accountDao; @@ -141,8 +140,12 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid()); - if (!lock.lock(s_lockTimeInSeconds)) { - s_logger.debug("Couldn't lock the DB (in grantAccess) on the following string: " + cluster.getUuid()); + if (!lock.lock(SolidFireUtil.s_lockTimeInSeconds)) { + String errMsg = "Couldn't lock the DB (in grantAccess) on the following string: " + cluster.getUuid(); + + s_logger.debug(errMsg); + + throw new CloudRuntimeException(errMsg); } try { @@ -161,10 +164,9 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { if (vagId != null) { SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getSolidFireVag(sfConnection, Long.parseLong(vagId)); - String[] hostIqns = SolidFireUtil.getNewHostIqns(sfVag.getInitiators(), SolidFireUtil.getIqnsFromHosts(hosts)); long[] volumeIds = SolidFireUtil.getNewVolumeIds(sfVag.getVolumeIds(), sfVolumeId, true); - SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), hostIqns, volumeIds); + SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), sfVag.getInitiators(), volumeIds); } else { SolidFireUtil.placeVolumeInVolumeAccessGroup(sfConnection, sfVolumeId, storagePoolId, cluster.getUuid(), hosts, _clusterDetailsDao); @@ -196,8 +198,12 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid()); - if (!lock.lock(s_lockTimeInSeconds)) { - s_logger.debug("Couldn't lock the DB (in revokeAccess) on the following string: " + cluster.getUuid()); + if (!lock.lock(SolidFireUtil.s_lockTimeInSeconds)) { + String errMsg = "Couldn't lock the DB (in revokeAccess) on the following string: " + cluster.getUuid(); + + s_logger.debug(errMsg); + + throw new CloudRuntimeException(errMsg); } try { @@ -206,16 +212,13 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { String vagId = clusterDetail != null ? clusterDetail.getValue() : null; if (vagId != null) { - List hosts = _hostDao.findByClusterId(clusterId); - SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao); SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getSolidFireVag(sfConnection, Long.parseLong(vagId)); - String[] hostIqns = SolidFireUtil.getNewHostIqns(sfVag.getInitiators(), SolidFireUtil.getIqnsFromHosts(hosts)); long[] volumeIds = SolidFireUtil.getNewVolumeIds(sfVag.getVolumeIds(), sfVolumeId, false); - SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), hostIqns, volumeIds); + SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), sfVag.getInitiators(), volumeIds); } } finally { @@ -701,7 +704,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { } @Override - public void revertSnapshot(SnapshotInfo snapshot, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback callback) { + public void revertSnapshot(SnapshotInfo snapshotOnImageStore, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback callback) { throw new UnsupportedOperationException("Reverting not supported. Create a template or volume based on the snapshot instead."); } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java index 4b38f221df0..f89c97a8999 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java @@ -136,7 +136,7 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC lClusterDefaultMinIops = Long.parseLong(clusterDefaultMinIops); } } catch (NumberFormatException ex) { - s_logger.warn("Cannot parse the setting of " + SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS + + s_logger.warn("Cannot parse the setting " + SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS + ", using default value: " + lClusterDefaultMinIops + ". Exception: " + ex); } @@ -148,7 +148,7 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC lClusterDefaultMaxIops = Long.parseLong(clusterDefaultMaxIops); } } catch (NumberFormatException ex) { - s_logger.warn("Cannot parse the setting of " + SolidFireUtil.CLUSTER_DEFAULT_MAX_IOPS + + s_logger.warn("Cannot parse the setting " + SolidFireUtil.CLUSTER_DEFAULT_MAX_IOPS + ", using default value: " + lClusterDefaultMaxIops + ". Exception: " + ex); } @@ -160,7 +160,7 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC fClusterDefaultBurstIopsPercentOfMaxIops = Float.parseFloat(clusterDefaultBurstIopsPercentOfMaxIops); } } catch (NumberFormatException ex) { - s_logger.warn("Cannot parse the setting of " + SolidFireUtil.CLUSTER_DEFAULT_BURST_IOPS_PERCENT_OF_MAX_IOPS + + s_logger.warn("Cannot parse the setting " + SolidFireUtil.CLUSTER_DEFAULT_BURST_IOPS_PERCENT_OF_MAX_IOPS + ", using default value: " + fClusterDefaultBurstIopsPercentOfMaxIops + ". Exception: " + ex); } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java index 7cb690014bb..6921e4f4872 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java @@ -70,6 +70,7 @@ import com.cloud.user.Account; import com.cloud.user.AccountDetailsDao; import com.cloud.user.AccountVO; import com.cloud.user.dao.AccountDao; +import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle { @@ -178,8 +179,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor lMinIops = Long.parseLong(minIops); } } catch (Exception ex) { - s_logger.info("[ignored]" - + "error getting minimals iops: " + ex.getLocalizedMessage()); + s_logger.info("[ignored] error getting Min IOPS: " + ex.getLocalizedMessage()); } try { @@ -189,8 +189,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor lMaxIops = Long.parseLong(maxIops); } } catch (Exception ex) { - s_logger.info("[ignored]" - + "error getting maximal iops: " + ex.getLocalizedMessage()); + s_logger.info("[ignored] error getting Max IOPS: " + ex.getLocalizedMessage()); } try { @@ -200,8 +199,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor lBurstIops = Long.parseLong(burstIops); } } catch (Exception ex) { - s_logger.info("[ignored]" - + "error getting iops bursts: " + ex.getLocalizedMessage()); + s_logger.info("[ignored] error getting Burst IOPS: " + ex.getLocalizedMessage()); } if (lMinIops > lMaxIops) { @@ -255,14 +253,27 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor parameters.setPath(iqn); } - // this adds a row in the cloud.storage_pool table for this SolidFire volume - DataStore dataStore = _primaryDataStoreHelper.createPrimaryDataStore(parameters); + ClusterVO cluster = _clusterDao.findById(clusterId); + + GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid()); + + if (!lock.lock(SolidFireUtil.s_lockTimeInSeconds)) { + String errMsg = "Couldn't lock the DB on the following string: " + cluster.getUuid(); + + s_logger.debug(errMsg); + + throw new CloudRuntimeException(errMsg); + } + + DataStore dataStore = null; - // now that we have a DataStore (we need the id from the DataStore instance), we can create a Volume Access Group, if need be, and - // place the newly created volume in the Volume Access Group try { + // this adds a row in the cloud.storage_pool table for this SolidFire volume + dataStore = _primaryDataStoreHelper.createPrimaryDataStore(parameters); + + // now that we have a DataStore (we need the id from the DataStore instance), we can create a Volume Access Group, if need be, and + // place the newly created volume in the Volume Access Group List hosts = _hostDao.findByClusterId(clusterId); - ClusterVO cluster = _clusterDao.findById(clusterId); SolidFireUtil.placeVolumeInVolumeAccessGroup(sfConnection, sfVolume.getId(), dataStore.getId(), cluster.getUuid(), hosts, _clusterDetailsDao); @@ -275,6 +286,10 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor throw new CloudRuntimeException(ex.getMessage()); } + finally { + lock.unlock(); + lock.releaseRef(); + } return dataStore; } @@ -546,7 +561,25 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor } if (clusterId != null) { - removeVolumeFromVag(storagePool.getId(), clusterId); + ClusterVO cluster = _clusterDao.findById(clusterId); + + GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid()); + + if (!lock.lock(SolidFireUtil.s_lockTimeInSeconds)) { + String errMsg = "Couldn't lock the DB on the following string: " + cluster.getUuid(); + + s_logger.debug(errMsg); + + throw new CloudRuntimeException(errMsg); + } + + try { + removeVolumeFromVag(storagePool.getId(), clusterId); + } + finally { + lock.unlock(); + lock.releaseRef(); + } } deleteSolidFireVolume(storagePool.getId()); @@ -561,16 +594,13 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor String vagId = clusterDetail != null ? clusterDetail.getValue() : null; if (vagId != null) { - List hosts = _hostDao.findByClusterId(clusterId); - SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao); SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getSolidFireVag(sfConnection, Long.parseLong(vagId)); - String[] hostIqns = SolidFireUtil.getNewHostIqns(sfVag.getInitiators(), SolidFireUtil.getIqnsFromHosts(hosts)); long[] volumeIds = SolidFireUtil.getNewVolumeIds(sfVag.getVolumeIds(), sfVolumeId, false); - SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), hostIqns, volumeIds); + SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), sfVag.getInitiators(), volumeIds); } } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java index d8e4ec61ec8..082e1a3d2ba 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java @@ -18,40 +18,69 @@ */ package org.apache.cloudstack.storage.datastore.provider; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + import javax.inject.Inject; import org.apache.log4j.Logger; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.util.SolidFireUtil; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.ModifyStoragePoolAnswer; import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.agent.api.ModifyTargetsCommand; import com.cloud.alert.AlertManager; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.dao.ClusterDao; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.DataStoreRole; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.dao.VMInstanceDao; public class SolidFireHostListener implements HypervisorHostListener { private static final Logger s_logger = Logger.getLogger(SolidFireHostListener.class); - @Inject - private AgentManager _agentMgr; - @Inject - private AlertManager _alertMgr; - @Inject - private DataStoreManager _dataStoreMgr; - @Inject - private HostDao _hostDao; - @Inject - private StoragePoolHostDao storagePoolHostDao; + @Inject private AgentManager _agentMgr; + @Inject private AlertManager _alertMgr; + @Inject private ClusterDao _clusterDao; + @Inject private ClusterDetailsDao _clusterDetailsDao; + @Inject private DataStoreManager _dataStoreMgr; + @Inject private HostDao _hostDao; + @Inject private PrimaryDataStoreDao _storagePoolDao; + @Inject private StoragePoolDetailsDao _storagePoolDetailsDao; + @Inject private StoragePoolHostDao storagePoolHostDao; + @Inject private VMInstanceDao _vmDao; + @Inject private VolumeDao _volumeDao; + + @Override + public boolean hostAdded(long hostId) { + HostVO host = _hostDao.findById(hostId); + + SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, host.getClusterId(), true, SolidFireUtil.PROVIDER_NAME, + _clusterDao, _clusterDetailsDao, _storagePoolDao, _storagePoolDetailsDao, _hostDao); + + handleVMware(host, true); + + return true; + } @Override public boolean hostConnect(long hostId, long storagePoolId) { @@ -65,33 +94,13 @@ public class SolidFireHostListener implements HypervisorHostListener { storagePoolHostDao.persist(storagePoolHost); } - // just want to send the ModifyStoragePoolCommand for KVM - if (host.getHypervisorType() != HypervisorType.KVM) { - return true; + if (host.getHypervisorType().equals(HypervisorType.XenServer)) { + handleXenServer(host.getClusterId(), host.getId(), storagePoolId); } - - StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); - ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool); - - Answer answer = _agentMgr.easySend(hostId, cmd); - - if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command (" + storagePool.getId() + ")"); + else if (host.getHypervisorType().equals(HypervisorType.KVM)) { + handleKVM(hostId, storagePoolId); } - if (!answer.getResult()) { - String msg = "Unable to attach storage pool " + storagePoolId + " to host " + hostId; - - _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); - - throw new CloudRuntimeException("Unable to establish a connection from agent to storage pool " + storagePool.getId() + " due to " + answer.getDetails() + - " (" + storagePool.getId() + ")"); - } - - assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; Pool = " + storagePool.getId() + " Host = " + hostId; - - s_logger.info("Connection established between storage pool " + storagePool + " and host + " + hostId); - return true; } @@ -105,4 +114,171 @@ public class SolidFireHostListener implements HypervisorHostListener { return true; } + + @Override + public boolean hostAboutToBeRemoved(long hostId) { + HostVO host = _hostDao.findById(hostId); + + handleVMware(host, false); + + return true; + } + + @Override + public boolean hostRemoved(long hostId, long clusterId) { + SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, clusterId, false, SolidFireUtil.PROVIDER_NAME, + _clusterDao, _clusterDetailsDao, _storagePoolDao, _storagePoolDetailsDao, _hostDao); + + return true; + } + + private void handleXenServer(long clusterId, long hostId, long storagePoolId) { + List storagePaths = getStoragePaths(clusterId, storagePoolId); + + StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); + + for (String storagePath : storagePaths) { + ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool); + + cmd.setStoragePath(storagePath); + + sendModifyStoragePoolCommand(cmd, storagePool, hostId); + } + } + + private void handleVMware(HostVO host, boolean add) { + if (HypervisorType.VMware.equals(host.getHypervisorType())) { + List storagePools = _storagePoolDao.findPoolsByProvider(SolidFireUtil.PROVIDER_NAME); + + if (storagePools != null && storagePools.size() > 0) { + List> targets = new ArrayList<>(); + + for (StoragePoolVO storagePool : storagePools) { + List> targetsForClusterAndStoragePool = getTargets(host.getClusterId(), storagePool.getId()); + + targets.addAll(targetsForClusterAndStoragePool); + } + + ModifyTargetsCommand cmd = new ModifyTargetsCommand(); + + cmd.setAdd(add); + cmd.setTargets(targets); + + sendModifyTargetsCommand(cmd, host.getId()); + } + } + } + + private void handleKVM(long hostId, long storagePoolId) { + StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); + + ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool); + + sendModifyStoragePoolCommand(cmd, storagePool, hostId); + } + + private List getStoragePaths(long clusterId, long storagePoolId) { + List storagePaths = new ArrayList<>(); + + // If you do not pass in null for the second parameter, you only get back applicable ROOT disks. + List volumes = _volumeDao.findByPoolId(storagePoolId, null); + + if (volumes != null) { + for (VolumeVO volume : volumes) { + Long instanceId = volume.getInstanceId(); + + if (instanceId != null) { + VMInstanceVO vmInstance = _vmDao.findById(instanceId); + + Long hostIdForVm = vmInstance.getHostId() != null ? vmInstance.getHostId() : vmInstance.getLastHostId(); + + if (hostIdForVm != null) { + HostVO hostForVm = _hostDao.findById(hostIdForVm); + + if (hostForVm.getClusterId().equals(clusterId)) { + storagePaths.add(volume.get_iScsiName()); + } + } + } + } + } + + return storagePaths; + } + + private List> getTargets(long clusterId, long storagePoolId) { + List> targets = new ArrayList<>(); + + StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId); + + // If you do not pass in null for the second parameter, you only get back applicable ROOT disks. + List volumes = _volumeDao.findByPoolId(storagePoolId, null); + + if (volumes != null) { + for (VolumeVO volume : volumes) { + Long instanceId = volume.getInstanceId(); + + if (instanceId != null) { + VMInstanceVO vmInstance = _vmDao.findById(instanceId); + + Long hostIdForVm = vmInstance.getHostId() != null ? vmInstance.getHostId() : vmInstance.getLastHostId(); + + if (hostIdForVm != null) { + HostVO hostForVm = _hostDao.findById(hostIdForVm); + + if (hostForVm.getClusterId().equals(clusterId)) { + Map details = new HashMap<>(); + + details.put(ModifyTargetsCommand.IQN, volume.get_iScsiName()); + details.put(ModifyTargetsCommand.STORAGE_HOST, storagePool.getHostAddress()); + details.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePool.getPort())); + + targets.add(details); + } + } + } + } + } + + return targets; + } + + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { + Answer answer = _agentMgr.easySend(hostId, cmd); + + if (answer == null) { + throw new CloudRuntimeException("Unable to get an answer to the modify targets command"); + } + + if (!answer.getResult()) { + String msg = "Unable to modify targets on the following host: " + hostId; + + HostVO host = _hostDao.findById(hostId); + + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), msg, msg); + + throw new CloudRuntimeException(msg); + } + } + + private void sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, long hostId) { + Answer answer = _agentMgr.easySend(hostId, cmd); + + if (answer == null) { + throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command (" + storagePool.getId() + ")"); + } + + if (!answer.getResult()) { + String msg = "Unable to attach storage pool " + storagePool.getId() + " to host " + hostId; + + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); + + throw new CloudRuntimeException("Unable to establish a connection from agent to storage pool " + storagePool.getId() + " due to " + answer.getDetails() + + " (" + storagePool.getId() + ")"); + } + + assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; Pool = " + storagePool.getId() + " Host = " + hostId; + + s_logger.info("Connection established between storage pool " + storagePool + " and host + " + hostId); + } } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java index 9881d1d179b..e505cd07ba5 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java @@ -18,18 +18,33 @@ */ package org.apache.cloudstack.storage.datastore.provider; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.util.SolidFireUtil; import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.ModifyStoragePoolAnswer; import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.agent.api.ModifyTargetsCommand; import com.cloud.alert.AlertManager; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.DataStoreRole; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; @@ -37,56 +52,168 @@ import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.utils.exception.CloudRuntimeException; public class SolidFireSharedHostListener implements HypervisorHostListener { - private static final Logger s_logger = Logger.getLogger(DefaultHostListener.class); + private static final Logger s_logger = Logger.getLogger(SolidFireSharedHostListener.class); - @Inject private AgentManager agentMgr; - @Inject private DataStoreManager dataStoreMgr; - @Inject private AlertManager alertMgr; - @Inject private StoragePoolHostDao storagePoolHostDao; - @Inject private PrimaryDataStoreDao primaryStoreDao; + @Inject private AgentManager _agentMgr; + @Inject private AlertManager _alertMgr; + @Inject private ClusterDao _clusterDao; + @Inject private ClusterDetailsDao _clusterDetailsDao; + @Inject private DataStoreManager _dataStoreMgr; + @Inject private HostDao _hostDao; + @Inject private PrimaryDataStoreDao _storagePoolDao; + @Inject private StoragePoolHostDao _storagePoolHostDao; + @Inject private StoragePoolDetailsDao _storagePoolDetailsDao; + + @Override + public boolean hostAdded(long hostId) { + HostVO host = _hostDao.findById(hostId); + + SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, host.getClusterId(), true, SolidFireUtil.SHARED_PROVIDER_NAME, + _clusterDao, _clusterDetailsDao, _storagePoolDao, _storagePoolDetailsDao, _hostDao); + + handleVMware(hostId, true); + + return true; + } @Override public boolean hostConnect(long hostId, long storagePoolId) { - StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId); - - if (storagePoolHost == null) { - storagePoolHost = new StoragePoolHostVO(storagePoolId, hostId, ""); - - storagePoolHostDao.persist(storagePoolHost); - } - - StoragePool storagePool = (StoragePool)dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); + StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool); - Answer answer = agentMgr.easySend(hostId, cmd); - if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command for storage pool: " + storagePool.getId()); + ModifyStoragePoolAnswer answer = sendModifyStoragePoolCommand(cmd, storagePool, hostId); + + StoragePoolHostVO storagePoolHost = _storagePoolHostDao.findByPoolHost(storagePoolId, hostId); + + if (storagePoolHost != null) { + storagePoolHost.setLocalPath(answer.getPoolInfo().getLocalPath().replaceAll("//", "/")); + } else { + storagePoolHost = new StoragePoolHostVO(storagePoolId, hostId, answer.getPoolInfo().getLocalPath().replaceAll("//", "/")); + + _storagePoolHostDao.persist(storagePoolHost); } - if (!answer.getResult()) { - String msg = "Unable to attach storage pool " + storagePoolId + " to the host " + hostId; + StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId); - alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); + storagePoolVO.setCapacityBytes(answer.getPoolInfo().getCapacityBytes()); + storagePoolVO.setUsedBytes(answer.getPoolInfo().getCapacityBytes() - answer.getPoolInfo().getAvailableBytes()); - throw new CloudRuntimeException(msg); - } - - assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer not returned from ModifyStoragePoolCommand; Storage pool = " + - storagePool.getId() + "; Host=" + hostId; - - s_logger.info("Connection established between storage pool " + storagePool + " and host + " + hostId); + _storagePoolDao.update(storagePoolId, storagePoolVO); return true; } @Override public boolean hostDisconnected(long hostId, long storagePoolId) { - StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId); + StoragePoolHostVO storagePoolHost = _storagePoolHostDao.findByPoolHost(storagePoolId, hostId); if (storagePoolHost != null) { - storagePoolHostDao.deleteStoragePoolHostDetails(hostId, storagePoolId); + _storagePoolHostDao.deleteStoragePoolHostDetails(hostId, storagePoolId); } return true; } + + @Override + public boolean hostAboutToBeRemoved(long hostId) { + handleVMware(hostId, false); + + return true; + } + + @Override + public boolean hostRemoved(long hostId, long clusterId) { + SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, clusterId, false, SolidFireUtil.SHARED_PROVIDER_NAME, + _clusterDao, _clusterDetailsDao, _storagePoolDao, _storagePoolDetailsDao, _hostDao); + + return true; + } + + private void handleVMware(long hostId, boolean add) { + HostVO host = _hostDao.findById(hostId); + + if (HypervisorType.VMware.equals(host.getHypervisorType())) { + List storagePools = _storagePoolDao.findPoolsByProvider(SolidFireUtil.SHARED_PROVIDER_NAME); + + if (storagePools != null && storagePools.size() > 0) { + List> targets = new ArrayList<>(); + + for (StoragePoolVO storagePool : storagePools) { + if (storagePool.getClusterId().equals(host.getClusterId())) { + long storagePoolId = storagePool.getId(); + + StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.IQN); + + String iqn = storagePoolDetail.getValue(); + + storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_VIP); + + String sVip = storagePoolDetail.getValue(); + + storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_PORT); + + String sPort = storagePoolDetail.getValue(); + + Map details = new HashMap<>(); + + details.put(ModifyTargetsCommand.IQN, iqn); + details.put(ModifyTargetsCommand.STORAGE_HOST, sVip); + details.put(ModifyTargetsCommand.STORAGE_PORT, sPort); + + targets.add(details); + } + } + + if (targets.size() > 0) { + ModifyTargetsCommand cmd = new ModifyTargetsCommand(); + + cmd.setAdd(add); + cmd.setTargets(targets); + + sendModifyTargetsCommand(cmd, hostId); + } + } + } + } + + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { + Answer answer = _agentMgr.easySend(hostId, cmd); + + if (answer == null) { + throw new CloudRuntimeException("Unable to get an answer to the modify targets command"); + } + + if (!answer.getResult()) { + String msg = "Unable to modify targets on the following host: " + hostId; + + HostVO host = _hostDao.findById(hostId); + + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), msg, msg); + + throw new CloudRuntimeException(msg); + } + } + + private ModifyStoragePoolAnswer sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, long hostId) { + Answer answer = _agentMgr.easySend(hostId, cmd); + + if (answer == null) { + throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command for storage pool: " + storagePool.getId()); + } + + if (!answer.getResult()) { + String msg = "Unable to attach storage pool " + storagePool.getId() + " to the host " + hostId; + + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); + + throw new CloudRuntimeException(msg); + } + + assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer not returned from ModifyStoragePoolCommand; Storage pool = " + + storagePool.getId() + "; Host = " + hostId; + + s_logger.info("Connection established between storage pool " + storagePool + " and host " + hostId); + + return (ModifyStoragePoolAnswer)answer; + } } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java index 9c486db429c..7268e72914a 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java @@ -28,6 +28,7 @@ import java.security.SecureRandom; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -58,14 +59,19 @@ import com.google.gson.GsonBuilder; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.utils.security.SSLUtils; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterDetailsVO; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.dao.ClusterDao; import com.cloud.host.Host; import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; import com.cloud.user.AccountDetailVO; import com.cloud.user.AccountDetailsDao; +import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; public class SolidFireUtil { @@ -73,6 +79,8 @@ public class SolidFireUtil { public static final String PROVIDER_NAME = "SolidFire"; public static final String SHARED_PROVIDER_NAME = "SolidFireShared"; + public static final int s_lockTimeInSeconds = 300; + public static final String LOG_PREFIX = "SolidFire: "; public static final String MANAGEMENT_VIP = "mVip"; @@ -124,6 +132,22 @@ public class SolidFireUtil { private final String _clusterAdminPassword; public SolidFireConnection(String managementVip, int managementPort, String clusterAdminUsername, String clusterAdminPassword) { + if (managementVip == null) { + throw new CloudRuntimeException("The management VIP cannot be 'null'."); + } + + if (managementPort <= 0) { + throw new CloudRuntimeException("The management port must be a positive integer."); + } + + if (clusterAdminUsername == null) { + throw new CloudRuntimeException("The cluster admin username cannot be 'null'."); + } + + if (clusterAdminPassword == null) { + throw new CloudRuntimeException("The cluster admin password cannot be 'null'."); + } + _managementVip = managementVip; _managementPort = managementPort; _clusterAdminUsername = clusterAdminUsername; @@ -145,6 +169,22 @@ public class SolidFireUtil { public String getClusterAdminPassword() { return _clusterAdminPassword; } + + @Override + public int hashCode() { + return _managementVip.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof SolidFireConnection)) { + return false; + } + + SolidFireConnection sfConnection = (SolidFireConnection)obj; + + return _managementVip.equals(sfConnection.getManagementVip()); + } } public static SolidFireConnection getSolidFireConnection(long storagePoolId, StoragePoolDetailsDao storagePoolDetailsDao) { @@ -238,6 +278,58 @@ public class SolidFireUtil { } } + public static void hostAddedToOrRemovedFromCluster(long hostId, long clusterId, boolean added, String storageProvider, + ClusterDao clusterDao, ClusterDetailsDao clusterDetailsDao, PrimaryDataStoreDao storagePoolDao, StoragePoolDetailsDao storagePoolDetailsDao, HostDao hostDao) { + ClusterVO cluster = clusterDao.findById(clusterId); + + GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid()); + + if (!lock.lock(s_lockTimeInSeconds)) { + String errMsg = "Couldn't lock the DB on the following string: " + cluster.getUuid(); + + s_logger.debug(errMsg); + + throw new CloudRuntimeException(errMsg); + } + + try { + List storagePools = storagePoolDao.findPoolsByProvider(storageProvider); + + if (storagePools != null && storagePools.size() > 0) { + List sfConnections = new ArrayList(); + + for (StoragePoolVO storagePool : storagePools) { + ClusterDetailsVO clusterDetail = clusterDetailsDao.findDetail(clusterId, SolidFireUtil.getVagKey(storagePool.getId())); + + String vagId = clusterDetail != null ? clusterDetail.getValue() : null; + + if (vagId != null) { + SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePool.getId(), storagePoolDetailsDao); + + if (!sfConnections.contains(sfConnection)) { + sfConnections.add(sfConnection); + + SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getSolidFireVag(sfConnection, Long.parseLong(vagId)); + + List hostsToAddOrRemove = new ArrayList<>(); + HostVO hostToAddOrRemove = hostDao.findByIdIncludingRemoved(hostId); + + hostsToAddOrRemove.add(hostToAddOrRemove); + + String[] hostIqns = SolidFireUtil.getNewHostIqns(sfVag.getInitiators(), SolidFireUtil.getIqnsFromHosts(hostsToAddOrRemove), added); + + SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), hostIqns, sfVag.getVolumeIds()); + } + } + } + } + } + finally { + lock.unlock(); + lock.releaseRef(); + } + } + public static long placeVolumeInVolumeAccessGroup(SolidFireConnection sfConnection, long sfVolumeId, long storagePoolId, String vagUuid, List hosts, ClusterDetailsDao clusterDetailsDao) { if (hosts == null || hosts.isEmpty()) { @@ -264,8 +356,7 @@ public class SolidFireUtil { long[] volumeIds = getNewVolumeIds(sfVag.getVolumeIds(), sfVolumeId, true); - SolidFireUtil.modifySolidFireVag(sfConnection, lVagId, - sfVag.getInitiators(), volumeIds); + SolidFireUtil.modifySolidFireVag(sfConnection, lVagId, sfVag.getInitiators(), volumeIds); } ClusterDetailsVO clusterDetail = new ClusterDetailsVO(hosts.get(0).getClusterId(), getVagKey(storagePoolId), String.valueOf(lVagId)); @@ -289,20 +380,34 @@ public class SolidFireUtil { return true; } - public static String[] getNewHostIqns(String[] currentIqns, String[] newIqns) { - List lstIqns = new ArrayList(); + public static String[] getNewHostIqns(String[] iqns, String[] iqnsToAddOrRemove, boolean add) { + if (add) { + return getNewHostIqnsAdd(iqns, iqnsToAddOrRemove); + } - if (currentIqns != null) { - for (String currentIqn : currentIqns) { - lstIqns.add(currentIqn); + return getNewHostIqnsRemove(iqns, iqnsToAddOrRemove); + } + + private static String[] getNewHostIqnsAdd(String[] iqns, String[] iqnsToAdd) { + List lstIqns = iqns != null ? new ArrayList<>(Arrays.asList(iqns)) : new ArrayList(); + + if (iqnsToAdd != null) { + for (String iqnToAdd : iqnsToAdd) { + if (!lstIqns.contains(iqnToAdd)) { + lstIqns.add(iqnToAdd); + } } } - if (newIqns != null) { - for (String newIqn : newIqns) { - if (!lstIqns.contains(newIqn)) { - lstIqns.add(newIqn); - } + return lstIqns.toArray(new String[0]); + } + + private static String[] getNewHostIqnsRemove(String[] iqns, String[] iqnsToRemove) { + List lstIqns = iqns != null ? new ArrayList<>(Arrays.asList(iqns)) : new ArrayList(); + + if (iqnsToRemove != null) { + for (String iqnToRemove : iqnsToRemove) { + lstIqns.remove(iqnToRemove); } } diff --git a/server/src/com/cloud/capacity/CapacityManagerImpl.java b/server/src/com/cloud/capacity/CapacityManagerImpl.java index ac97bc96abf..13794c76361 100644 --- a/server/src/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/com/cloud/capacity/CapacityManagerImpl.java @@ -981,6 +981,10 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, return null; } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException { // TODO Auto-generated method stub @@ -993,6 +997,14 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, return false; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean isRecurring() { // TODO Auto-generated method stub diff --git a/server/src/com/cloud/capacity/ComputeCapacityListener.java b/server/src/com/cloud/capacity/ComputeCapacityListener.java index d8a3e6a8935..453960de5c8 100644 --- a/server/src/com/cloud/capacity/ComputeCapacityListener.java +++ b/server/src/com/cloud/capacity/ComputeCapacityListener.java @@ -55,6 +55,10 @@ public class ComputeCapacityListener implements Listener { return null; } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host server, StartupCommand startup, boolean forRebalance) throws ConnectionException { if (!(startup instanceof StartupRoutingCommand)) { @@ -68,6 +72,14 @@ public class ComputeCapacityListener implements Listener { return false; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean isRecurring() { return false; diff --git a/server/src/com/cloud/capacity/StorageCapacityListener.java b/server/src/com/cloud/capacity/StorageCapacityListener.java index f83cbd3a53d..e2d5a5d0aa4 100644 --- a/server/src/com/cloud/capacity/StorageCapacityListener.java +++ b/server/src/com/cloud/capacity/StorageCapacityListener.java @@ -58,6 +58,10 @@ public class StorageCapacityListener implements Listener { return null; } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host server, StartupCommand startup, boolean forRebalance) throws ConnectionException { @@ -81,6 +85,14 @@ public class StorageCapacityListener implements Listener { return false; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean isRecurring() { return false; diff --git a/server/src/com/cloud/consoleproxy/ConsoleProxyListener.java b/server/src/com/cloud/consoleproxy/ConsoleProxyListener.java index 2c1bbd67326..77c9d5a17ab 100644 --- a/server/src/com/cloud/consoleproxy/ConsoleProxyListener.java +++ b/server/src/com/cloud/consoleproxy/ConsoleProxyListener.java @@ -63,6 +63,10 @@ public class ConsoleProxyListener implements Listener { return null; } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) { _proxyMgr.onAgentConnect(host, cmd); @@ -78,6 +82,14 @@ public class ConsoleProxyListener implements Listener { return true; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean processTimeout(long agentId, long seq) { return true; diff --git a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java index f5d2af92be5..2b3358aee8e 100644 --- a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -899,6 +899,10 @@ StateListener { return null; } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException { if (!(cmd instanceof StartupRoutingCommand)) { @@ -920,6 +924,14 @@ StateListener { return false; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean isRecurring() { // TODO Auto-generated method stub diff --git a/server/src/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java b/server/src/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java index 48be8f28516..13a1a64cd21 100644 --- a/server/src/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java +++ b/server/src/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java @@ -85,6 +85,10 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements return null; } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) { } @@ -95,6 +99,14 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements return false; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean isRecurring() { // TODO Auto-generated method stub diff --git a/server/src/com/cloud/network/NetworkUsageManagerImpl.java b/server/src/com/cloud/network/NetworkUsageManagerImpl.java index a0356ca90be..b7adecda0dd 100644 --- a/server/src/com/cloud/network/NetworkUsageManagerImpl.java +++ b/server/src/com/cloud/network/NetworkUsageManagerImpl.java @@ -483,6 +483,10 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage return true; } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) { if (cmd instanceof StartupTrafficMonitorCommand) { @@ -498,6 +502,14 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage return; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean processTimeout(long agentId, long seq) { return true; diff --git a/server/src/com/cloud/network/SshKeysDistriMonitor.java b/server/src/com/cloud/network/SshKeysDistriMonitor.java index 1eeb3a32131..e263c548cdc 100644 --- a/server/src/com/cloud/network/SshKeysDistriMonitor.java +++ b/server/src/com/cloud/network/SshKeysDistriMonitor.java @@ -40,12 +40,10 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; public class SshKeysDistriMonitor implements Listener { private static final Logger s_logger = Logger.getLogger(SshKeysDistriMonitor.class); AgentManager _agentMgr; - private final HostDao _hostDao; private ConfigurationDao _configDao; public SshKeysDistriMonitor(AgentManager mgr, HostDao host, ConfigurationDao config) { - this._agentMgr = mgr; - _hostDao = host; + _agentMgr = mgr; _configDao = config; } @@ -67,6 +65,18 @@ public class SshKeysDistriMonitor implements Listener { return true; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException { if (cmd instanceof StartupRoutingCommand) { diff --git a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index ac8b86816a2..fc448a347a4 100644 --- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -251,159 +251,98 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V Configurable, StateListener { private static final Logger s_logger = Logger.getLogger(VirtualNetworkApplianceManagerImpl.class); - @Inject - EntityManager _entityMgr; - @Inject - DataCenterDao _dcDao = null; - @Inject - VlanDao _vlanDao = null; - @Inject - FirewallRulesDao _rulesDao = null; - @Inject - LoadBalancerDao _loadBalancerDao = null; - @Inject - LoadBalancerVMMapDao _loadBalancerVMMapDao = null; - @Inject - IPAddressDao _ipAddressDao = null; - @Inject - VMTemplateDao _templateDao = null; - @Inject - DomainRouterDao _routerDao = null; - @Inject - UserDao _userDao = null; - @Inject - UserStatisticsDao _userStatsDao = null; - @Inject - HostDao _hostDao = null; - @Inject - ConfigurationDao _configDao; - @Inject - HostPodDao _podDao = null; - @Inject - UserStatsLogDao _userStatsLogDao = null; - @Inject - AgentManager _agentMgr; - @Inject - AlertManager _alertMgr; - @Inject - AccountManager _accountMgr; - @Inject - ConfigurationManager _configMgr; - @Inject - ConfigurationServer _configServer; - @Inject - ServiceOfferingDao _serviceOfferingDao = null; - @Inject - UserVmDao _userVmDao; - @Inject - VMInstanceDao _vmDao; - @Inject - NetworkOfferingDao _networkOfferingDao = null; - @Inject - GuestOSDao _guestOSDao = null; - @Inject - NetworkOrchestrationService _networkMgr; - @Inject - NetworkModel _networkModel; - @Inject - VirtualMachineManager _itMgr; - @Inject - VpnUserDao _vpnUsersDao; - @Inject - RulesManager _rulesMgr; - @Inject - NetworkDao _networkDao; - @Inject - LoadBalancingRulesManager _lbMgr; - @Inject - PortForwardingRulesDao _pfRulesDao; - @Inject - RemoteAccessVpnDao _vpnDao; - @Inject - NicDao _nicDao; - @Inject - NicIpAliasDao _nicIpAliasDao; - @Inject - VolumeDao _volumeDao = null; - @Inject - UserVmDetailsDao _vmDetailsDao; - @Inject - ClusterDao _clusterDao; - @Inject - ResourceManager _resourceMgr; - @Inject - PhysicalNetworkServiceProviderDao _physicalProviderDao; - @Inject - VirtualRouterProviderDao _vrProviderDao; - @Inject - ManagementServerHostDao _msHostDao; - @Inject - Site2SiteCustomerGatewayDao _s2sCustomerGatewayDao; - @Inject - Site2SiteVpnGatewayDao _s2sVpnGatewayDao; - @Inject - Site2SiteVpnConnectionDao _s2sVpnConnectionDao; - @Inject - Site2SiteVpnManager _s2sVpnMgr; - @Inject - UserIpv6AddressDao _ipv6Dao; - @Inject - NetworkService _networkSvc; - @Inject - IpAddressManager _ipAddrMgr; - @Inject - ConfigDepot _configDepot; - @Inject - MonitoringServiceDao _monitorServiceDao; - @Inject - AsyncJobManager _asyncMgr; - @Inject - protected VpcDao _vpcDao; - @Inject - protected ApiAsyncJobDispatcher _asyncDispatcher; - @Inject - OpRouterMonitorServiceDao _opRouterMonitorServiceDao; + @Inject private EntityManager _entityMgr; + @Inject private DataCenterDao _dcDao; + @Inject protected VlanDao _vlanDao; + @Inject private FirewallRulesDao _rulesDao; + @Inject private LoadBalancerDao _loadBalancerDao; + @Inject private LoadBalancerVMMapDao _loadBalancerVMMapDao; + @Inject protected IPAddressDao _ipAddressDao; + @Inject private VMTemplateDao _templateDao; + @Inject protected DomainRouterDao _routerDao; + @Inject private UserDao _userDao; + @Inject protected UserStatisticsDao _userStatsDao; + @Inject private HostDao _hostDao; + @Inject private ConfigurationDao _configDao; + @Inject private HostPodDao _podDao; + @Inject private UserStatsLogDao _userStatsLogDao; + @Inject protected AgentManager _agentMgr; + @Inject private AlertManager _alertMgr; + @Inject private AccountManager _accountMgr; + @Inject private ConfigurationManager _configMgr; + @Inject private ConfigurationServer _configServer; + @Inject private ServiceOfferingDao _serviceOfferingDao; + @Inject private UserVmDao _userVmDao; + @Inject private VMInstanceDao _vmDao; + @Inject private NetworkOfferingDao _networkOfferingDao; + @Inject private GuestOSDao _guestOSDao; + @Inject private NetworkOrchestrationService _networkMgr; + @Inject protected NetworkModel _networkModel; + @Inject protected VirtualMachineManager _itMgr; + @Inject private VpnUserDao _vpnUsersDao; + @Inject private RulesManager _rulesMgr; + @Inject protected NetworkDao _networkDao; + @Inject private LoadBalancingRulesManager _lbMgr; + @Inject private PortForwardingRulesDao _pfRulesDao; + @Inject protected RemoteAccessVpnDao _vpnDao; + @Inject protected NicDao _nicDao; + @Inject private NicIpAliasDao _nicIpAliasDao; + @Inject private VolumeDao _volumeDao; + @Inject private UserVmDetailsDao _vmDetailsDao; + @Inject private ClusterDao _clusterDao; + @Inject private ResourceManager _resourceMgr; + @Inject private PhysicalNetworkServiceProviderDao _physicalProviderDao; + @Inject protected VirtualRouterProviderDao _vrProviderDao; + @Inject private ManagementServerHostDao _msHostDao; + @Inject private Site2SiteCustomerGatewayDao _s2sCustomerGatewayDao; + @Inject private Site2SiteVpnGatewayDao _s2sVpnGatewayDao; + @Inject private Site2SiteVpnConnectionDao _s2sVpnConnectionDao; + @Inject private Site2SiteVpnManager _s2sVpnMgr; + @Inject private UserIpv6AddressDao _ipv6Dao; + @Inject private NetworkService _networkSvc; + @Inject private IpAddressManager _ipAddrMgr; + @Inject private ConfigDepot _configDepot; + @Inject private MonitoringServiceDao _monitorServiceDao; + @Inject private AsyncJobManager _asyncMgr; + @Inject protected VpcDao _vpcDao; + @Inject protected ApiAsyncJobDispatcher _asyncDispatcher; + @Inject private OpRouterMonitorServiceDao _opRouterMonitorServiceDao; - @Inject - protected NetworkTopologyContext _networkTopologyContext; + @Inject protected NetworkTopologyContext _networkTopologyContext; @Autowired @Qualifier("networkHelper") protected NetworkHelper _nwHelper; - @Inject - protected RouterControlHelper _routerControlHelper; + @Inject protected RouterControlHelper _routerControlHelper; - @Inject - protected CommandSetupHelper _commandSetupHelper; - @Inject - protected RouterDeploymentDefinitionBuilder _routerDeploymentManagerBuilder; + @Inject protected CommandSetupHelper _commandSetupHelper; + @Inject protected RouterDeploymentDefinitionBuilder _routerDeploymentManagerBuilder; - int _routerRamSize; - int _routerCpuMHz; - int _retry = 2; - String _mgmtCidr; + private int _routerRamSize; + private int _routerCpuMHz; + private String _mgmtCidr; - int _routerStatsInterval = 300; - int _routerCheckInterval = 30; - int _rvrStatusUpdatePoolSize = 10; + private int _routerStatsInterval = 300; + private int _routerCheckInterval = 30; + private int _rvrStatusUpdatePoolSize = 10; private String _dnsBasicZoneUpdates = "all"; - private final Set _guestOSNeedGatewayOnNonDefaultNetwork = new HashSet(); + private final Set _guestOSNeedGatewayOnNonDefaultNetwork = new HashSet<>(); private boolean _disableRpFilter = false; - int _routerExtraPublicNics = 2; + private int _routerExtraPublicNics = 2; private int _usageAggregationRange = 1440; private String _usageTimeZone = "GMT"; private final long mgmtSrvrId = MacAddress.getMacAddress().toLong(); private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 5; // 5 seconds private boolean _dailyOrHourly = false; - ScheduledExecutorService _executor; - ScheduledExecutorService _checkExecutor; - ScheduledExecutorService _networkStatsUpdateExecutor; - ExecutorService _rvrStatusUpdateExecutor; + private ScheduledExecutorService _executor; + private ScheduledExecutorService _checkExecutor; + private ScheduledExecutorService _networkStatsUpdateExecutor; + private ExecutorService _rvrStatusUpdateExecutor; - BlockingQueue _vrUpdateQueue = null; + private BlockingQueue _vrUpdateQueue; @Override public VirtualRouter destroyRouter(final long routerId, final Account caller, final Long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException { @@ -586,10 +525,7 @@ Configurable, StateListener routers = _routerDao.listIsolatedByHostId(host.getId()); @@ -2395,6 +2332,14 @@ Configurable, StateListener affectedVms = new ArrayList(); - int commandNum = 0; + for (Answer ans : answers) { if (ans instanceof SecurityGroupRuleAnswer) { SecurityGroupRuleAnswer ruleAnswer = (SecurityGroupRuleAnswer)ans; @@ -106,7 +106,7 @@ public class SecurityGroupListener implements Listener { } } } - commandNum++; + if (_workTracker != null) _workTracker.processAnswers(agentId, seq, answers); } @@ -151,6 +151,10 @@ public class SecurityGroupListener implements Listener { return processed; } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) { if (s_logger.isInfoEnabled()) @@ -188,6 +192,14 @@ public class SecurityGroupListener implements Listener { return true; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean processTimeout(long agentId, long seq) { if (_workTracker != null) { diff --git a/server/src/com/cloud/resource/ResourceManagerImpl.java b/server/src/com/cloud/resource/ResourceManagerImpl.java index 8341075b72e..fb34901d811 100644 --- a/server/src/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/com/cloud/resource/ResourceManagerImpl.java @@ -771,6 +771,9 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, _hostTagsDao.persist(host.getId(), hostTags); } hosts.add(host); + + _agentMgr.notifyMonitorsOfNewlyAddedHost(host.getId()); + return hosts; } } @@ -843,10 +846,13 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, return true; } + long clusterId = host.getClusterId(); + + _agentMgr.notifyMonitorsOfHostAboutToBeRemoved(host.getId()); + Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { - _dcDao.releasePrivateIpAddress(host.getPrivateIpAddress(), host.getDataCenterId(), null); _agentMgr.disconnectWithoutInvestigation(hostId, Status.Event.Remove); @@ -920,6 +926,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } }); + _agentMgr.notifyMonitorsOfRemovedHost(host.getId(), clusterId); + return true; } @@ -1570,17 +1578,35 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, return true; } + private HostVO getNewHost(StartupCommand[] startupCommands) { + StartupCommand startupCommand = startupCommands[0]; + + HostVO host = findHostByGuid(startupCommand.getGuid()); + + if (host != null) { + return host; + } + + host = findHostByGuid(startupCommand.getGuidWithoutResource()); + + if (host != null) { + return host; + } + + return null; + } + protected HostVO createHostVO(final StartupCommand[] cmds, final ServerResource resource, final Map details, List hostTags, final ResourceStateAdapter.Event stateEvent) { - final StartupCommand startup = cmds[0]; - HostVO host = findHostByGuid(startup.getGuid()); - boolean isNew = false; - if (host == null) { - host = findHostByGuid(startup.getGuidWithoutResource()); - } + boolean newHost = false; + StartupCommand startup = cmds[0]; + + HostVO host = getNewHost(cmds); + if (host == null) { host = new HostVO(startup.getGuid()); - isNew = true; + + newHost = true; } String dataCenter = startup.getDataCenter(); @@ -1695,7 +1721,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, throw new CloudRuntimeException("No resource state adapter response"); } - if (isNew) { + if (newHost) { host = _hostDao.persist(host); } else { _hostDao.update(host.getId(), host); @@ -1794,9 +1820,13 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } } + // find out if the host we want to connect to is new (so we can send an event) + boolean newHost = getNewHost(cmds) == null; + host = createHostVO(cmds, resource, details, hostTags, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT); + if (host != null) { - created = _agentMgr.handleDirectConnectAgent(host, cmds, resource, forRebalance); + created = _agentMgr.handleDirectConnectAgent(host, cmds, resource, forRebalance, newHost); /* reload myself from database */ host = _hostDao.findById(host.getId()); } @@ -1866,12 +1896,19 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } host = null; + boolean newHost = false; + final GlobalLock addHostLock = GlobalLock.getInternLock("AddHostLock"); + try { if (addHostLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) { // to safely determine first host in cluster in multi-MS scenario try { + // find out if the host we want to connect to is new (so we can send an event) + newHost = getNewHost(cmds) == null; + host = createHostVO(cmds, resource, details, hostTags, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT); + if (host != null) { // if first host in cluster no need to defer agent creation deferAgentCreation = !isFirstHostInCluster(host); @@ -1886,7 +1923,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, if (host != null) { if (!deferAgentCreation) { // if first host in cluster then - created = _agentMgr.handleDirectConnectAgent(host, cmds, resource, forRebalance); + created = _agentMgr.handleDirectConnectAgent(host, cmds, resource, forRebalance, newHost); host = _hostDao.findById(host.getId()); // reload } else { host = _hostDao.findById(host.getId()); // reload diff --git a/server/src/com/cloud/storage/ImageStoreUploadMonitorImpl.java b/server/src/com/cloud/storage/ImageStoreUploadMonitorImpl.java index ecdbcae255b..2d9f544bff5 100755 --- a/server/src/com/cloud/storage/ImageStoreUploadMonitorImpl.java +++ b/server/src/com/cloud/storage/ImageStoreUploadMonitorImpl.java @@ -152,6 +152,14 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto return false; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean isRecurring() { return false; @@ -167,6 +175,10 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto return false; } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException { } diff --git a/server/src/com/cloud/storage/LocalStoragePoolListener.java b/server/src/com/cloud/storage/LocalStoragePoolListener.java index a66515ebdc9..941e505fa75 100644 --- a/server/src/com/cloud/storage/LocalStoragePoolListener.java +++ b/server/src/com/cloud/storage/LocalStoragePoolListener.java @@ -18,8 +18,6 @@ package com.cloud.storage; import javax.inject.Inject; -import org.apache.log4j.Logger; - import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import com.cloud.agent.Listener; @@ -39,17 +37,11 @@ import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.utils.db.DB; public class LocalStoragePoolListener implements Listener { - private final static Logger s_logger = Logger.getLogger(LocalStoragePoolListener.class); - @Inject - PrimaryDataStoreDao _storagePoolDao; - @Inject - StoragePoolHostDao _storagePoolHostDao; - @Inject - CapacityDao _capacityDao; - @Inject - StorageManager _storageMgr; - @Inject - DataCenterDao _dcDao; + @Inject private PrimaryDataStoreDao _storagePoolDao; + @Inject private StoragePoolHostDao _storagePoolHostDao; + @Inject private CapacityDao _capacityDao; + @Inject private StorageManager _storageMgr; + @Inject private DataCenterDao _dcDao; @Override public int getTimeout() { @@ -71,6 +63,10 @@ public class LocalStoragePoolListener implements Listener { return false; } + @Override + public void processHostAdded(long hostId) { + } + @Override @DB public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException { @@ -102,6 +98,14 @@ public class LocalStoragePoolListener implements Listener { return false; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean processTimeout(long agentId, long seq) { return false; diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index 3d7146e1967..0509abc87c6 100644 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -458,7 +458,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C _storagePoolAcquisitionWaitSeconds = NumbersUtil.parseInt(configs.get("pool.acquisition.wait.seconds"), 1800); s_logger.info("pool.acquisition.wait.seconds is configured as " + _storagePoolAcquisitionWaitSeconds + " seconds"); - _agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _storagePoolDao), true, false, true); + _agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _storagePoolDao, _dataStoreProviderMgr), true, false, true); String value = _configDao.getValue(Config.StorageTemplateCleanupEnabled.key()); _templateCleanupEnabled = (value == null ? true : Boolean.parseBoolean(value)); diff --git a/server/src/com/cloud/storage/download/DownloadListener.java b/server/src/com/cloud/storage/download/DownloadListener.java index 814ce3c44be..51f9d42980c 100644 --- a/server/src/com/cloud/storage/download/DownloadListener.java +++ b/server/src/com/cloud/storage/download/DownloadListener.java @@ -270,9 +270,7 @@ public class DownloadListener implements Listener { } @Override - public boolean processDisconnect(long agentId, com.cloud.host.Status state) { - setDisconnected(); - return true; + public void processHostAdded(long hostId) { } @Override @@ -310,6 +308,20 @@ public class DownloadListener implements Listener { } } + @Override + public boolean processDisconnect(long agentId, com.cloud.host.Status state) { + setDisconnected(); + return true; + } + + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + public void setCommand(DownloadCommand cmd) { this._cmd = cmd; } diff --git a/server/src/com/cloud/storage/listener/StoragePoolMonitor.java b/server/src/com/cloud/storage/listener/StoragePoolMonitor.java index ad65bd63b6e..dcbe4744adc 100644 --- a/server/src/com/cloud/storage/listener/StoragePoolMonitor.java +++ b/server/src/com/cloud/storage/listener/StoragePoolMonitor.java @@ -22,6 +22,10 @@ import javax.inject.Inject; import org.apache.log4j.Logger; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; @@ -46,13 +50,14 @@ public class StoragePoolMonitor implements Listener { private static final Logger s_logger = Logger.getLogger(StoragePoolMonitor.class); private final StorageManagerImpl _storageManager; private final PrimaryDataStoreDao _poolDao; + private DataStoreProviderManager _dataStoreProviderMgr; @Inject OCFS2Manager _ocfs2Mgr; - public StoragePoolMonitor(StorageManagerImpl mgr, PrimaryDataStoreDao poolDao) { - this._storageManager = mgr; - this._poolDao = poolDao; - + public StoragePoolMonitor(StorageManagerImpl mgr, PrimaryDataStoreDao poolDao, DataStoreProviderManager dataStoreProviderMgr) { + _storageManager = mgr; + _poolDao = poolDao; + _dataStoreProviderMgr = dataStoreProviderMgr; } @Override @@ -66,8 +71,25 @@ public class StoragePoolMonitor implements Listener { } @Override - public synchronized boolean processDisconnect(long agentId, Status state) { - return true; + public void processHostAdded(long hostId) { + List providers = _dataStoreProviderMgr.getProviders(); + + if (providers != null) { + for (DataStoreProvider provider : providers) { + if (provider instanceof PrimaryDataStoreProvider) { + try { + HypervisorHostListener hypervisorHostListener = provider.getHostListener(); + + if (hypervisorHostListener != null) { + hypervisorHostListener.hostAdded(hostId); + } + } + catch (Exception ex) { + s_logger.error("hostAdded(long) failed for storage provider " + provider.getName(), ex); + } + } + } + } } @Override @@ -111,6 +133,55 @@ public class StoragePoolMonitor implements Listener { } } + @Override + public synchronized boolean processDisconnect(long agentId, Status state) { + return true; + } + + @Override + public void processHostAboutToBeRemoved(long hostId) { + List providers = _dataStoreProviderMgr.getProviders(); + + if (providers != null) { + for (DataStoreProvider provider : providers) { + if (provider instanceof PrimaryDataStoreProvider) { + try { + HypervisorHostListener hypervisorHostListener = provider.getHostListener(); + + if (hypervisorHostListener != null) { + hypervisorHostListener.hostAboutToBeRemoved(hostId); + } + } + catch (Exception ex) { + s_logger.error("hostAboutToBeRemoved(long) failed for storage provider " + provider.getName(), ex); + } + } + } + } + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + List providers = _dataStoreProviderMgr.getProviders(); + + if (providers != null) { + for (DataStoreProvider provider : providers) { + if (provider instanceof PrimaryDataStoreProvider) { + try { + HypervisorHostListener hypervisorHostListener = provider.getHostListener(); + + if (hypervisorHostListener != null) { + hypervisorHostListener.hostRemoved(hostId, clusterId); + } + } + catch (Exception ex) { + s_logger.error("hostRemoved(long, long) failed for storage provider " + provider.getName(), ex); + } + } + } + } + } + @Override public boolean processCommands(long agentId, long seq, Command[] req) { return false; diff --git a/server/src/com/cloud/storage/listener/StorageSyncListener.java b/server/src/com/cloud/storage/listener/StorageSyncListener.java index fe15d30b95a..eeef434367e 100644 --- a/server/src/com/cloud/storage/listener/StorageSyncListener.java +++ b/server/src/com/cloud/storage/listener/StorageSyncListener.java @@ -50,6 +50,10 @@ public class StorageSyncListener implements Listener { return true; } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) { } @@ -60,6 +64,14 @@ public class StorageSyncListener implements Listener { return true; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean processCommands(long agentId, long seq, Command[] request) { return false; diff --git a/server/src/com/cloud/storage/secondary/SecondaryStorageListener.java b/server/src/com/cloud/storage/secondary/SecondaryStorageListener.java index 43613e7f9ca..b78a548729c 100644 --- a/server/src/com/cloud/storage/secondary/SecondaryStorageListener.java +++ b/server/src/com/cloud/storage/secondary/SecondaryStorageListener.java @@ -65,6 +65,10 @@ public class SecondaryStorageListener implements Listener { return null; } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) { if ((cmd instanceof StartupStorageCommand)) { @@ -91,6 +95,14 @@ public class SecondaryStorageListener implements Listener { return true; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean processTimeout(long agentId, long seq) { return true; diff --git a/server/src/com/cloud/storage/upload/UploadListener.java b/server/src/com/cloud/storage/upload/UploadListener.java index f6ad815d007..838676ef0fe 100644 --- a/server/src/com/cloud/storage/upload/UploadListener.java +++ b/server/src/com/cloud/storage/upload/UploadListener.java @@ -113,8 +113,6 @@ public class UploadListener implements Listener { private DataStore sserver; - private boolean uploadActive = true; - private UploadDao uploadDao; private final UploadMonitorImpl uploadMonitor; @@ -250,6 +248,10 @@ public class UploadListener implements Listener { return false; } + @Override + public void processHostAdded(long hostId) { + } + @Override public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) { if (!(cmd instanceof StartupStorageCommand)) { @@ -270,7 +272,6 @@ public class UploadListener implements Listener { } public void setUploadInactive(Status reason) { - uploadActive = false; uploadMonitor.handleUploadEvent(accountId, typeName, type, uploadId, reason, eventId); } @@ -294,6 +295,14 @@ public class UploadListener implements Listener { return true; } + @Override + public void processHostAboutToBeRemoved(long hostId) { + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + } + @Override public boolean processTimeout(long agentId, long seq) { return true; diff --git a/test/integration/plugins/solidfire/TestAddRemoveHosts.py b/test/integration/plugins/solidfire/TestAddRemoveHosts.py new file mode 100644 index 00000000000..518d022caec --- /dev/null +++ b/test/integration/plugins/solidfire/TestAddRemoveHosts.py @@ -0,0 +1,710 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import logging +import random +import SignedAPICall +import time +import XenAPI + +# All tests inherit from cloudstackTestCase +from marvin.cloudstackTestCase import cloudstackTestCase + +# Import Integration Libraries + +# base - contains all resources as entities and defines create, delete, list operations on them +from marvin.lib.base import Account, ServiceOffering, User, Host, StoragePool, VirtualMachine + +# common - commonly used methods for all tests are listed here +from marvin.lib.common import get_domain, get_template, get_zone, list_hosts, list_clusters, list_volumes + +# utils - utility classes for common cleanup, external library wrappers, etc. +from marvin.lib.utils import cleanup_resources + +from solidfire import solidfire_element_api as sf_api + + +class TestData: + account = "account" + capacityBytes = "capacitybytes" + capacityIops = "capacityiops" + clusterId = "clusterId" + computeOffering = "computeoffering" + displayText = "displaytext" + diskSize = "disksize" + domainId = "domainId" + hypervisor = "hypervisor" + login = "login" + mvip = "mvip" + name = "name" + newHost = "newHost" + newHostDisplayName = "newHostDisplayName" + osType = "ostype" + password = "password" + podId = "podid" + port = "port" + primaryStorage = "primarystorage" + primaryStorage2 = "primarystorage2" + provider = "provider" + scope = "scope" + solidFire = "solidfire" + storageTag = "SolidFire_SAN_1" + storageTag2 = "SolidFire_Volume_1" + tags = "tags" + url = "url" + urlOfNewHost = "urlOfNewHost" + user = "user" + username = "username" + virtualMachine = "virtualmachine" + volume_1 = "volume_1" + xenServer = "xenserver" + zoneId = "zoneid" + + def __init__(self): + self.testdata = { + TestData.solidFire: { + TestData.mvip: "192.168.139.112", + TestData.login: "admin", + TestData.password: "admin", + TestData.port: 443, + TestData.url: "https://192.168.139.112:443" + }, + TestData.xenServer: { + TestData.username: "root", + TestData.password: "solidfire" + }, + TestData.urlOfNewHost: "https://192.168.129.243", + TestData.account: { + "email": "test@test.com", + "firstname": "John", + "lastname": "Doe", + TestData.username: "test", + TestData.password: "test" + }, + TestData.user: { + "email": "user@test.com", + "firstname": "Jane", + "lastname": "Doe", + TestData.username: "testuser", + TestData.password: "password" + }, + TestData.newHost: { + TestData.username: "root", + TestData.password: "solidfire", + TestData.url: "http://192.168.129.243", + TestData.podId : "1", + TestData.zoneId: "1" + }, + TestData.primaryStorage: { + TestData.name: "SolidFire-%d" % random.randint(0, 100), + TestData.scope: "ZONE", + TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" + + "clusterAdminUsername=admin;clusterAdminPassword=admin;" + + "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" + + "clusterDefaultBurstIopsPercentOfMaxIops=1.5;", + TestData.provider: "SolidFire", + TestData.tags: TestData.storageTag, + TestData.capacityIops: 4500000, + TestData.capacityBytes: 2251799813685248, + TestData.hypervisor: "Any" + }, + TestData.primaryStorage2: { + TestData.name: "SolidFireShared-%d" % random.randint(0, 100), + TestData.scope: "CLUSTER", + TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" + + "clusterAdminUsername=admin;clusterAdminPassword=admin;" + + "minIops=5000;maxIops=50000;burstIops=75000", + TestData.provider: "SolidFireShared", + TestData.tags: TestData.storageTag2, + TestData.capacityIops: 5000, + TestData.capacityBytes: 1099511627776, + TestData.hypervisor: "XenServer", + TestData.podId: 1 + }, + TestData.virtualMachine: { + TestData.name: "TestVM", + "displayname": "Test VM" + }, + TestData.computeOffering: { + TestData.name: "SF_CO_1", + TestData.displayText: "SF_CO_1 (Min IOPS = 10,000; Max IOPS = 15,000)", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 128, + "storagetype": "shared", + "customizediops": False, + "miniops": "10000", + "maxiops": "15000", + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag + }, + TestData.volume_1: { + "diskname": "testvolume", + }, + "volume2": { + "diskname": "testvolume2", + }, + TestData.newHostDisplayName: "XenServer-6.5-3", + TestData.osType: "CentOS 5.6(64-bit) no GUI (XenServer)", + TestData.zoneId: 1, + TestData.clusterId: 1, + TestData.domainId: 1, + TestData.url: "192.168.129.50" + } + + +class TestAddRemoveHosts(cloudstackTestCase): + _vag_id_should_be_non_zero_int_err_msg = "The SolidFire VAG ID should be a non-zero integer." + _sf_account_id_should_be_non_zero_int_err_msg = "The SolidFire account ID should be a non-zero integer." + + @classmethod + def setUpClass(cls): + # Set up API client + testclient = super(TestAddRemoveHosts, cls).getClsTestClient() + cls.apiClient = testclient.getApiClient() + cls.dbConnection = testclient.getDbConnection() + + cls.testdata = TestData().testdata + + cls.xs_pool_master_ip = list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress + + # Set up XenAPI connection + host_ip = "https://" + cls.xs_pool_master_ip + + cls.xen_session = XenAPI.Session(host_ip) + + xenserver = cls.testdata[TestData.xenServer] + + cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) + + # Set up SolidFire connection + cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire]) + + # Get Resources from Cloud Infrastructure + cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) + cls.cluster = list_clusters(cls.apiClient)[0] + cls.template = get_template(cls.apiClient, cls.zone.id, cls.testdata[TestData.osType]) + cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) + + # Create test account + cls.account = Account.create( + cls.apiClient, + cls.testdata[TestData.account], + admin=1 + ) + + # Set up connection to make customized API calls + user = User.create( + cls.apiClient, + cls.testdata[TestData.user], + account=cls.account.name, + domainid=cls.domain.id + ) + + url = cls.testdata[TestData.url] + + api_url = "http://" + url + ":8080/client/api" + userkeys = User.registerUserKeys(cls.apiClient, user.id) + + cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey) + + cls.compute_offering = ServiceOffering.create( + cls.apiClient, + cls.testdata[TestData.computeOffering] + ) + + cls._cleanup = [ + cls.compute_offering, + user, + cls.account + ] + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiClient, cls._cleanup) + + cls._purge_solidfire_volumes() + except Exception as e: + logging.debug("Exception in tearDownClass(cls): %s" % e) + + def setUp(self): + self.virtual_machine = None + + self.cleanup = [] + + def tearDown(self): + try: + if self.virtual_machine is not None: + self.virtual_machine.delete(self.apiClient, True) + + cleanup_resources(self.apiClient, self.cleanup) + except Exception as e: + logging.debug("Exception in tearDown(self): %s" % e) + + def test_add_remove_host_with_solidfire_plugin_1(self): + primarystorage = self.testdata[TestData.primaryStorage] + + primary_storage = StoragePool.create( + self.apiClient, + primarystorage, + scope=primarystorage[TestData.scope], + zoneid=self.zone.id, + provider=primarystorage[TestData.provider], + tags=primarystorage[TestData.tags], + capacityiops=primarystorage[TestData.capacityIops], + capacitybytes=primarystorage[TestData.capacityBytes], + hypervisor=primarystorage[TestData.hypervisor] + ) + + self.cleanup.append(primary_storage) + + self.virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + root_volume = self._get_root_volume(self.virtual_machine) + + sf_iscsi_name = self._get_iqn(root_volume) + + self._perform_add_remove_host(primary_storage.id, sf_iscsi_name) + + def test_add_remove_host_with_solidfire_plugin_2(self): + primarystorage2 = self.testdata[TestData.primaryStorage2] + + primary_storage_2 = StoragePool.create( + self.apiClient, + primarystorage2, + scope=primarystorage2[TestData.scope], + zoneid=self.zone.id, + clusterid=self.cluster.id, + provider=primarystorage2[TestData.provider], + tags=primarystorage2[TestData.tags], + capacityiops=primarystorage2[TestData.capacityIops], + capacitybytes=primarystorage2[TestData.capacityBytes], + hypervisor=primarystorage2[TestData.hypervisor] + ) + + self.cleanup.append(primary_storage_2) + + sf_iscsi_name = self._get_iqn_2(primary_storage_2) + + self._perform_add_remove_host(primary_storage_2.id, sf_iscsi_name) + + def test_add_remove_host_with_solidfire_plugin_3(self): + primarystorage = self.testdata[TestData.primaryStorage] + + primary_storage = StoragePool.create( + self.apiClient, + primarystorage, + scope=primarystorage[TestData.scope], + zoneid=self.zone.id, + provider=primarystorage[TestData.provider], + tags=primarystorage[TestData.tags], + capacityiops=primarystorage[TestData.capacityIops], + capacitybytes=primarystorage[TestData.capacityBytes], + hypervisor=primarystorage[TestData.hypervisor] + ) + + self.cleanup.append(primary_storage) + + self.virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + root_volume = self._get_root_volume(self.virtual_machine) + + sf_iscsi_name = self._get_iqn(root_volume) + + primarystorage2 = self.testdata[TestData.primaryStorage2] + + primary_storage_2 = StoragePool.create( + self.apiClient, + primarystorage2, + scope=primarystorage2[TestData.scope], + zoneid=self.zone.id, + clusterid=self.cluster.id, + provider=primarystorage2[TestData.provider], + tags=primarystorage2[TestData.tags], + capacityiops=primarystorage2[TestData.capacityIops], + capacitybytes=primarystorage2[TestData.capacityBytes], + hypervisor=primarystorage2[TestData.hypervisor] + ) + + self.cleanup.append(primary_storage_2) + + self._perform_add_remove_host(primary_storage.id, sf_iscsi_name) + + def test_add_remove_host_with_solidfire_plugin_4(self): + primarystorage2 = self.testdata[TestData.primaryStorage2] + + primary_storage_2 = StoragePool.create( + self.apiClient, + primarystorage2, + scope=primarystorage2[TestData.scope], + zoneid=self.zone.id, + clusterid=self.cluster.id, + provider=primarystorage2[TestData.provider], + tags=primarystorage2[TestData.tags], + capacityiops=primarystorage2[TestData.capacityIops], + capacitybytes=primarystorage2[TestData.capacityBytes], + hypervisor=primarystorage2[TestData.hypervisor] + ) + + self.cleanup.append(primary_storage_2) + + sf_iscsi_name = self._get_iqn_2(primary_storage_2) + + primarystorage = self.testdata[TestData.primaryStorage] + + primary_storage = StoragePool.create( + self.apiClient, + primarystorage, + scope=primarystorage[TestData.scope], + zoneid=self.zone.id, + provider=primarystorage[TestData.provider], + tags=primarystorage[TestData.tags], + capacityiops=primarystorage[TestData.capacityIops], + capacitybytes=primarystorage[TestData.capacityBytes], + hypervisor=primarystorage[TestData.hypervisor] + ) + + self.cleanup.append(primary_storage) + + self.virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + self._perform_add_remove_host(primary_storage_2.id, sf_iscsi_name) + + def _perform_add_remove_host(self, primary_storage_id, sf_iscsi_name): + xen_sr = self.xen_session.xenapi.SR.get_by_name_label(sf_iscsi_name)[0] + + pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr) + + self._verify_all_pbds_attached(pbds) + + num_pbds = len(pbds) + + sf_vag_id = self._get_sf_vag_id(self.cluster.id, primary_storage_id) + + host_iscsi_iqns = self._get_host_iscsi_iqns() + + sf_vag = self._get_sf_vag(sf_vag_id) + + sf_vag_initiators = self._get_sf_vag_initiators(sf_vag) + + self._verifyVag(host_iscsi_iqns, sf_vag_initiators) + + sf_vag_initiators_len_orig = len(sf_vag_initiators) + + xen_session = XenAPI.Session(self.testdata[TestData.urlOfNewHost]) + + xenserver = self.testdata[TestData.xenServer] + + xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) + + xen_session.xenapi.pool.join(self.xs_pool_master_ip, xenserver[TestData.username], xenserver[TestData.password]) + + time.sleep(60) + + pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr) + + self.assertEqual( + len(pbds), + num_pbds + 1, + "'len(pbds)' is not equal to 'num_pbds + 1'." + ) + + num_pbds = num_pbds + 1 + + num_pbds_not_attached = 0 + + for pbd in pbds: + pbd_record = self.xen_session.xenapi.PBD.get_record(pbd) + + if pbd_record["currently_attached"] == False: + num_pbds_not_attached = num_pbds_not_attached + 1 + + self.assertEqual( + num_pbds_not_attached, + 1, + "'num_pbds_not_attached' is not equal to 1." + ) + + host = Host.create( + self.apiClient, + self.cluster, + self.testdata[TestData.newHost], + hypervisor="XenServer" + ) + + self.assertTrue( + isinstance(host, Host), + "'host' is not a 'Host'." + ) + + pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr) + + self.assertEqual( + len(pbds), + num_pbds, + "'len(pbds)' is not equal to 'num_pbds'." + ) + + self._verify_all_pbds_attached(pbds) + + host_iscsi_iqns = self._get_host_iscsi_iqns() + + sf_vag = self._get_sf_vag(sf_vag_id) + + sf_vag_initiators = self._get_sf_vag_initiators(sf_vag) + + self._verifyVag(host_iscsi_iqns, sf_vag_initiators) + + sf_vag_initiators_len_new = len(sf_vag_initiators) + + self.assertEqual( + sf_vag_initiators_len_new, + sf_vag_initiators_len_orig + 1, + "sf_vag_initiators_len_new' != sf_vag_initiators_len_orig + 1" + ) + + host.delete(self.apiClient) + + pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr) + + self.assertEqual( + len(pbds), + num_pbds, + "'len(pbds)' is not equal to 'num_pbds'." + ) + + self._verify_all_pbds_attached(pbds) + + host_iscsi_iqns = self._get_host_iscsi_iqns() + + sf_vag = self._get_sf_vag(sf_vag_id) + + sf_vag_initiators = self._get_sf_vag_initiators(sf_vag) + + self.assertEqual( + len(host_iscsi_iqns) - 1, + len(sf_vag_initiators), + "'len(host_iscsi_iqns) - 1' is not equal to 'len(sf_vag_initiators)'." + ) + + host_ref = self.xen_session.xenapi.host.get_by_name_label(self.testdata[TestData.newHostDisplayName])[0] + + self.xen_session.xenapi.pool.eject(host_ref) + + time.sleep(120) + + pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr) + + self.assertEqual( + len(pbds), + num_pbds - 1, + "'len(pbds)' is not equal to 'num_pbds - 1'." + ) + + self._verify_all_pbds_attached(pbds) + + host_iscsi_iqns = self._get_host_iscsi_iqns() + + sf_vag = self._get_sf_vag(sf_vag_id) + + sf_vag_initiators = self._get_sf_vag_initiators(sf_vag) + + self._verifyVag(host_iscsi_iqns, sf_vag_initiators) + + sf_vag_initiators_len_new = len(sf_vag_initiators) + + self.assertEqual( + sf_vag_initiators_len_new, + sf_vag_initiators_len_orig, + "sf_vag_initiators_len_new' != sf_vag_initiators_len_orig" + ) + + def _verify_all_pbds_attached(self, pbds): + for pbd in pbds: + pbd_record = self.xen_session.xenapi.PBD.get_record(pbd) + + self.assertEqual( + pbd_record["currently_attached"], + True, + "Not all PBDs are currently attached." + ) + + def _get_root_volume(self, vm): + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=vm.id, + listall=True + ) + + self.assertNotEqual( + list_volumes_response, + None, + "'list_volumes_response' should not be equal to 'None'." + ) + + self.assertEqual( + len(list_volumes_response) > 0, + True, + "'len(list_volumes_response)' should be greater than 0." + ) + + for volume in list_volumes_response: + if volume.type.upper() == "ROOT": + return volume + + self.assert_(False, "Unable to locate the ROOT volume of the VM with the following ID: " + str(vm.id)) + + def _get_iqn(self, volume): + # Get volume IQN + sf_iscsi_name_request = {'volumeid': volume.id} + # put this commented line back once PR 1403 is in + # sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(sf_iscsi_name_request) + sf_iscsi_name_result = self.cs_api.getSolidFireVolumeIscsiName(sf_iscsi_name_request) + # sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName'] + sf_iscsi_name = sf_iscsi_name_result['apisolidfirevolumeiscsiname']['solidFireVolumeIscsiName'] + + self._check_iscsi_name(sf_iscsi_name) + + return sf_iscsi_name + + def _get_iqn_2(self, primary_storage): + sql_query = "Select path From storage_pool Where uuid = '" + str(primary_storage.id) + "'" + + # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench + sql_result = self.dbConnection.execute(sql_query) + + return sql_result[0][0] + + def _check_iscsi_name(self, sf_iscsi_name): + self.assertEqual( + sf_iscsi_name[0], + "/", + "The iSCSI name needs to start with a forward slash." + ) + + def _get_host_iscsi_iqns(self): + hosts = self.xen_session.xenapi.host.get_all() + + self.assertEqual( + isinstance(hosts, list), + True, + "'hosts' is not a list." + ) + + host_iscsi_iqns = [] + + for host in hosts: + host_iscsi_iqns.append(self._get_host_iscsi_iqn(host)) + + return host_iscsi_iqns + + def _get_host_iscsi_iqn(self, host): + other_config = self.xen_session.xenapi.host.get_other_config(host) + + return other_config["iscsi_iqn"] + + def _get_sf_vag_id(self, cluster_id, primary_storage_id): + # Get SF Volume Access Group ID + sf_vag_id_request = {'clusterid': cluster_id, 'storageid': primary_storage_id} + sf_vag_id_result = self.cs_api.getSolidFireVolumeAccessGroupId(sf_vag_id_request) + sf_vag_id = sf_vag_id_result['apisolidfirevolumeaccessgroupid']['solidFireVolumeAccessGroupId'] + + self.assertEqual( + isinstance(sf_vag_id, int), + True, + TestAddRemoveHosts._vag_id_should_be_non_zero_int_err_msg + ) + + return sf_vag_id + + def _get_sf_vag(self, sf_vag_id): + return self.sf_client.list_volume_access_groups(sf_vag_id, 1)["volumeAccessGroups"][0] + + def _get_sf_vag_initiators(self, sf_vag): + return sf_vag["initiators"] + + def _verifyVag(self, host_iscsi_iqns, sf_vag_initiators): + self.assertEqual( + isinstance(host_iscsi_iqns, list), + True, + "'host_iscsi_iqns' is not a list." + ) + + self.assertEqual( + isinstance(sf_vag_initiators, list), + True, + "'sf_vag_initiators' is not a list." + ) + + self.assertEqual( + len(host_iscsi_iqns), + len(sf_vag_initiators), + "Lists are not the same size." + ) + + for host_iscsi_iqn in host_iscsi_iqns: + # an error should occur if host_iscsi_iqn is not in sf_vag_initiators + sf_vag_initiators.index(host_iscsi_iqn) + + def _check_list(self, in_list, expected_size_of_list, err_msg): + self.assertEqual( + isinstance(in_list, list), + True, + "'in_list' is not a list." + ) + + self.assertEqual( + len(in_list), + expected_size_of_list, + err_msg + ) + + @classmethod + def _purge_solidfire_volumes(cls): + deleted_volumes = cls.sf_client.list_deleted_volumes() + + for deleted_volume in deleted_volumes: + cls.sf_client.purge_deleted_volume(deleted_volume['volumeID']) + diff --git a/test/integration/plugins/solidfire/TestSnapshots.py b/test/integration/plugins/solidfire/TestSnapshots.py new file mode 100644 index 00000000000..9c3d25580c0 --- /dev/null +++ b/test/integration/plugins/solidfire/TestSnapshots.py @@ -0,0 +1,1472 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import logging +import random +import SignedAPICall +import time +import XenAPI + +# All tests inherit from cloudstackTestCase +from marvin.cloudstackTestCase import cloudstackTestCase + +from nose.plugins.attrib import attr + +# Import Integration Libraries + +# base - contains all resources as entities and defines create, delete, list operations on them +from marvin.lib.base import Account, DiskOffering, ServiceOffering, Snapshot, StoragePool, Template, User, VirtualMachine, Volume + +# common - commonly used methods for all tests are listed here +from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_volumes + +# utils - utility classes for common cleanup, external library wrappers, etc. +from marvin.lib.utils import cleanup_resources + +from solidfire import solidfire_element_api as sf_api + +# on April 10, 2016: Ran 3 tests in 7742.481s with three hosts +# on May 2, 2016: Ran 3 tests in 7409.770s with two hosts + + +class TestData(): + account = "account" + capacityBytes = "capacitybytes" + capacityIops = "capacityiops" + clusterId = "clusterId" + computeOffering = "computeoffering" + diskName = "diskname" + diskOffering = "diskoffering" + domainId = "domainId" + hypervisor = "hypervisor" + login = "login" + mvip = "mvip" + password = "password" + port = "port" + primaryStorage = "primarystorage" + provider = "provider" + scope = "scope" + solidFire = "solidfire" + storageTag = "SolidFire_SAN_1" + tags = "tags" + templateName = "templatename" + url = "url" + user = "user" + username = "username" + virtualMachine = "virtualmachine" + volume_1 = "volume_1" + volume_2 = "volume_2" + xenServer = "xenserver" + zoneId = "zoneId" + + def __init__(self): + self.testdata = { + TestData.solidFire: { + TestData.mvip: "192.168.139.112", + TestData.login: "admin", + TestData.password: "admin", + TestData.port: 443, + TestData.url: "https://192.168.139.112:443" + }, + TestData.xenServer: { + TestData.username: "root", + TestData.password: "solidfire" + }, + TestData.account: { + "email": "test@test.com", + "firstname": "John", + "lastname": "Doe", + "username": "test", + "password": "test" + }, + "testaccount": { + "email": "test2@test2.com", + "firstname": "Jane", + "lastname": "Doe", + TestData.username: "test2", + TestData.password: "test" + }, + TestData.user: { + "email": "user@test.com", + "firstname": "Jane", + "lastname": "Doe", + TestData.username: "testuser", + TestData.password: "password" + }, + TestData.primaryStorage: { + "name": "SolidFire-%d" % random.randint(0, 100), + TestData.scope: "ZONE", + "url": "MVIP=192.168.139.112;SVIP=10.10.8.112;" + + "clusterAdminUsername=admin;clusterAdminPassword=admin;" + + "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" + + "clusterDefaultBurstIopsPercentOfMaxIops=1.5;", + TestData.provider: "SolidFire", + TestData.tags: TestData.storageTag, + TestData.capacityIops: 4500000, + TestData.capacityBytes: 2251799813685248, + TestData.hypervisor: "Any" + }, + TestData.virtualMachine: { + "name": "TestVM", + "displayname": "Test VM" + }, + TestData.computeOffering: { + "name": "SF_CO_1", + "displaytext": "SF_CO_1 (Min IOPS = 10,000; Max IOPS = 15,000)", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 128, + "storagetype": "shared", + "customizediops": False, + "miniops": "10000", + "maxiops": "15000", + "hypervisorsnapshotreserve": 200, + "tags": "SolidFire_SAN_1" + }, + TestData.diskOffering: { + "name": "SF_DO_1", + "displaytext": "SF_DO_1 (Min IOPS = 300; Max IOPS = 500)", + "disksize": 128, + "customizediops": False, + "miniops": 300, + "maxiops": 500, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "testdiskofferings": { + "customiopsdo": { + "name": "SF_Custom_Iops_DO", + "displaytext": "Customized Iops DO", + "disksize": 128, + "customizediops": True, + "miniops": 500, + "maxiops": 1000, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "customsizedo": { + "name": "SF_Custom_Size_DO", + "displaytext": "Customized Size DO", + "disksize": 175, + "customizediops": False, + "miniops": 500, + "maxiops": 1000, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "customsizeandiopsdo": { + "name": "SF_Custom_Iops_Size_DO", + "displaytext": "Customized Size and Iops DO", + "disksize": 200, + "customizediops": True, + "miniops": 400, + "maxiops": 800, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "newiopsdo": { + "name": "SF_New_Iops_DO", + "displaytext": "New Iops (min=350, max = 700)", + "disksize": 128, + "miniops": 350, + "maxiops": 700, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "newsizedo": { + "name": "SF_New_Size_DO", + "displaytext": "New Size: 175", + "disksize": 175, + "miniops": 400, + "maxiops": 800, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "newsizeandiopsdo": { + "name": "SF_New_Size_Iops_DO", + "displaytext": "New Size and Iops", + "disksize": 200, + "miniops": 200, + "maxiops": 400, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } + }, + TestData.volume_1: { + TestData.diskName: "test-volume", + }, + TestData.volume_2: { + TestData.diskName: "test-volume-2", + }, + TestData.templateName: "CentOS 5.6(64-bit) no GUI (XenServer)", + TestData.zoneId: 1, + TestData.clusterId: 1, + TestData.domainId: 1, + TestData.url: "192.168.129.50" + } + + +class TestSnapshots(cloudstackTestCase): + _should_be_zero_volume_access_groups_in_list_err_msg = "There shouldn't be any volume access groups in this list." + _should_be_zero_snapshots_in_list_err_msg = "There shouldn't be any snapshots in this list." + _should_only_be_one_snapshot_in_list_err_msg = "There should only be one snapshot in this list." + _should_be_two_snapshots_in_list_err_msg = "There should be two snapshots in this list." + _should_be_three_snapshots_in_list_err_msg = "There should be three snapshots in this list." + _should_be_zero_volumes_in_list_err_msg = "There shouldn't be any volumes in this list." + _should_only_be_one_volume_in_list_err_msg = "There should only be one volume in this list." + _should_be_two_volumes_in_list_err_msg = "There should be two volumes in this list." + _should_be_three_volumes_in_list_err_msg = "There should be three volumes in this list." + _should_be_four_volumes_in_list_err_msg = "There should be four volumes in this list." + _should_be_five_volumes_in_list_err_msg = "There should be five volumes in this list." + _should_be_six_volumes_in_list_err_msg = "There should be six volumes in this list." + _should_be_seven_volumes_in_list_err_msg = "There should be seven volumes in this list." + _should_be_five_items_in_list_err_msg = "There should be five items in this list." + _sf_account_id_should_be_non_zero_int_err_msg = "The SolidFire account ID should be a non-zero integer." + + @classmethod + def setUpClass(cls): + # Set up API client + testclient = super(TestSnapshots, cls).getClsTestClient() + cls.apiClient = testclient.getApiClient() + cls.dbConnection = testclient.getDbConnection() + + cls.testdata = TestData().testdata + + # Set up xenAPI connection + host_ip = "https://" + \ + list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress + + # Set up XenAPI connection + cls.xen_session = XenAPI.Session(host_ip) + + xenserver = cls.testdata[TestData.xenServer] + + cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) + + # Set up SolidFire connection + cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire]) + + # Get Resources from Cloud Infrastructure + cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) + cls.cluster = list_clusters(cls.apiClient)[0] + cls.template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName]) + cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) + + # Create test account + cls.account = Account.create( + cls.apiClient, + cls.testdata["account"], + admin=1 + ) + + # Set up connection to make customized API calls + cls.user = User.create( + cls.apiClient, + cls.testdata["user"], + account=cls.account.name, + domainid=cls.domain.id + ) + + url = cls.testdata[TestData.url] + + api_url = "http://" + url + ":8080/client/api" + userkeys = User.registerUserKeys(cls.apiClient, cls.user.id) + + cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey) + + primarystorage = cls.testdata[TestData.primaryStorage] + + cls.primary_storage = StoragePool.create( + cls.apiClient, + primarystorage, + scope=primarystorage[TestData.scope], + zoneid=cls.zone.id, + provider=primarystorage[TestData.provider], + tags=primarystorage[TestData.tags], + capacityiops=primarystorage[TestData.capacityIops], + capacitybytes=primarystorage[TestData.capacityBytes], + hypervisor=primarystorage[TestData.hypervisor] + ) + + cls.compute_offering = ServiceOffering.create( + cls.apiClient, + cls.testdata[TestData.computeOffering] + ) + + cls.disk_offering = DiskOffering.create( + cls.apiClient, + cls.testdata[TestData.diskOffering] + ) + + # Resources that are to be destroyed + cls._cleanup = [ + cls.compute_offering, + cls.disk_offering, + cls.user, + cls.account + ] + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiClient, cls._cleanup) + + cls.primary_storage.delete(cls.apiClient) + + cls._purge_solidfire_volumes() + except Exception as e: + logging.debug("Exception in tearDownClass(cls): %s" % e) + + def setUp(self): + self.cleanup = [] + + def tearDown(self): + cleanup_resources(self.apiClient, self.cleanup) + + @attr(hypervisor='XenServer') + def test_01_create_volume_snapshot_using_sf_snapshot(self): + self._set_supports_resign(True) + + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine.id, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_1_root_volume = list_volumes_response[0] + vm_1_root_volume_name = vm_1_root_volume.name + + sf_account_id = self._get_sf_account_id(self.account.id, self.primary_storage.id) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + + # Get snapshot information for volume from SolidFire cluster + sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + + self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) + + primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage) + + vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + + vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) + + vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 3, TestSnapshots._should_be_three_snapshots_in_list_err_msg) + + self._delete_and_test_snapshot(vol_snap_2) + + self._delete_and_test_snapshot(vol_snap_1) + + self._delete_and_test_snapshot(vol_snap_3) + + vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + + vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) + + virtual_machine.delete(self.apiClient, True) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + + self._delete_and_test_snapshot(vol_snap_1) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + + self._delete_and_test_snapshot(vol_snap_2) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 0, TestSnapshots._should_be_zero_volumes_in_list_err_msg) + + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine.id, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_1_root_volume = list_volumes_response[0] + vm_1_root_volume_name = vm_1_root_volume.name + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + + # Get snapshot information for volume from SolidFire cluster + sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + + self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) + + vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + + vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) + + vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 3, TestSnapshots._should_be_three_snapshots_in_list_err_msg) + + services = {"displaytext": "Template-1", "name": "Template-1-name", "ostype": "CentOS 5.6 (64-bit)", "ispublic": "true"} + + template = Template.create_from_snapshot(self.apiClient, vol_snap_2, services) + + self.cleanup.append(template) + + virtual_machine_dict = {"name": "TestVM2", "displayname": "Test VM 2"} + + virtual_machine_2 = VirtualMachine.create( + self.apiClient, + virtual_machine_dict, + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=template.id, + domainid=self.domain.id, + startvm=True + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine_2.id, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_2_root_volume = list_volumes_response[0] + vm_2_root_volume_name = vm_2_root_volume.name + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) + + sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) + + # Get snapshot information for volume from SolidFire cluster + sf_snapshots_2 = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID']) + + self._check_list(sf_snapshots_2, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) + + vol_snap_a = self._create_and_test_snapshot(vm_2_root_volume.id, sf_volume_2, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + + services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True} + + volume_created_from_snapshot = Volume.create_from_snapshot(self.apiClient, vol_snap_a.id, services, account=self.account.name, domainid=self.domain.id) + + volume_created_from_snapshot_name = volume_created_from_snapshot.name + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg) + + sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) + sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + + self._check_list(sf_volume_3['volumeAccessGroups'], 0, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) + + volume_created_from_snapshot = virtual_machine.attach_volume( + self.apiClient, + volume_created_from_snapshot + ) + + self._delete_and_test_snapshot(vol_snap_a) + + virtual_machine.delete(self.apiClient, True) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg) + + sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) + sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + + volume_created_from_snapshot = virtual_machine_2.attach_volume( + self.apiClient, + volume_created_from_snapshot + ) + + self._delete_and_test_snapshot(vol_snap_2) + + self._delete_and_test_snapshot(vol_snap_3) + + self._delete_and_test_snapshot(vol_snap_1) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) + + sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) + sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + + virtual_machine_2.delete(self.apiClient, True) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + + list_volumes_response = list_volumes( + self.apiClient, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + data_volume = list_volumes_response[0] + + data_volume = Volume(data_volume.__dict__) + + data_volume.delete(self.apiClient) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 0, TestSnapshots._should_be_zero_volumes_in_list_err_msg) + + @attr(hypervisor='XenServer') + def test_02_create_volume_snapshot_using_sf_volume(self): + self._set_supports_resign(False) + + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine.id, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_1_root_volume = list_volumes_response[0] + vm_1_root_volume_name = vm_1_root_volume.name + + sf_account_id = self._get_sf_account_id(self.account.id, self.primary_storage.id) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + + # Get snapshot information for volume from SolidFire cluster + sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + + self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) + + primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage) + + sf_volume_id = sf_volume['volumeID'] + sf_volume_size = sf_volume['totalSize'] + + vol_snap_1 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 1, primary_storage_db_id, sf_volume_size, + sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) + + vol_snap_2 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 2, primary_storage_db_id, sf_volume_size, + sf_account_id, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg) + + vol_snap_3 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 3, primary_storage_db_id, sf_volume_size, + sf_account_id, 4, TestSnapshots._should_be_four_volumes_in_list_err_msg) + + self._delete_and_test_snapshot_2(vol_snap_2, sf_account_id, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg) + + self._delete_and_test_snapshot_2(vol_snap_1, sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) + + self._delete_and_test_snapshot_2(vol_snap_3, sf_account_id, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vol_snap_1 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 4, primary_storage_db_id, sf_volume_size, + sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) + + vol_snap_2 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 5, primary_storage_db_id, sf_volume_size, + sf_account_id, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg) + + virtual_machine.delete(self.apiClient, True) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) + + self._delete_and_test_snapshot_2(vol_snap_1, sf_account_id, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + self._delete_and_test_snapshot_2(vol_snap_2, sf_account_id, 0, TestSnapshots._should_be_zero_volumes_in_list_err_msg) + + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine.id, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_1_root_volume = list_volumes_response[0] + vm_1_root_volume_name = vm_1_root_volume.name + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + + # Get snapshot information for volume from SolidFire cluster + sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + + self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) + + sf_volume_id = sf_volume['volumeID'] + sf_volume_size = sf_volume['totalSize'] + + vol_snap_1 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 1, primary_storage_db_id, sf_volume_size, + sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) + + vol_snap_2 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 2, primary_storage_db_id, sf_volume_size, + sf_account_id, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg) + + vol_snap_3 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 3, primary_storage_db_id, sf_volume_size, + sf_account_id, 4, TestSnapshots._should_be_four_volumes_in_list_err_msg) + + services = {"displaytext": "Template-1", "name": "Template-1-name", "ostype": "CentOS 5.6 (64-bit)", "ispublic": "true"} + + template = Template.create_from_snapshot(self.apiClient, vol_snap_2, services) + + self.cleanup.append(template) + + virtual_machine_dict = {"name": "TestVM2", "displayname": "Test VM 2"} + + virtual_machine_2 = VirtualMachine.create( + self.apiClient, + virtual_machine_dict, + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=template.id, + domainid=self.domain.id, + startvm=True + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine_2.id, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_2_root_volume = list_volumes_response[0] + vm_2_root_volume_name = vm_2_root_volume.name + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 5, TestSnapshots._should_be_five_volumes_in_list_err_msg) + + sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) + + # Get snapshot information for volume from SolidFire cluster + sf_snapshots_2 = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID']) + + self._check_list(sf_snapshots_2, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) + + sf_volume_id_2 = sf_volume_2['volumeID'] + sf_volume_size_2 = sf_volume_2['totalSize'] + + vol_snap_a = self._create_and_test_snapshot_2(vm_2_root_volume.id, sf_volume_id_2, sf_volume_id + 5, primary_storage_db_id, sf_volume_size_2, + sf_account_id, 6, TestSnapshots._should_be_six_volumes_in_list_err_msg) + + services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True} + + volume_created_from_snapshot = Volume.create_from_snapshot(self.apiClient, vol_snap_a.id, services, account=self.account.name, domainid=self.domain.id) + + volume_created_from_snapshot_name = volume_created_from_snapshot.name + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 7, TestSnapshots._should_be_seven_volumes_in_list_err_msg) + + sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) + sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + + self._check_list(sf_volume_3['volumeAccessGroups'], 0, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) + + volume_created_from_snapshot = virtual_machine.attach_volume( + self.apiClient, + volume_created_from_snapshot + ) + + self._delete_and_test_snapshot_2(vol_snap_a, sf_account_id, 6, TestSnapshots._should_be_six_volumes_in_list_err_msg) + + virtual_machine.delete(self.apiClient, True) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 5, TestSnapshots._should_be_five_volumes_in_list_err_msg) + + sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) + sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + + volume_created_from_snapshot = virtual_machine_2.attach_volume( + self.apiClient, + volume_created_from_snapshot + ) + + self._delete_and_test_snapshot_2(vol_snap_2, sf_account_id, 4, TestSnapshots._should_be_four_volumes_in_list_err_msg) + + self._delete_and_test_snapshot_2(vol_snap_3, sf_account_id, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg) + + self._delete_and_test_snapshot_2(vol_snap_1, sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) + + virtual_machine_2.delete(self.apiClient, True) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + list_volumes_response = list_volumes( + self.apiClient, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + data_volume = list_volumes_response[0] + + data_volume = Volume(data_volume.__dict__) + + data_volume.delete(self.apiClient) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 0, TestSnapshots._should_be_zero_volumes_in_list_err_msg) + + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=template.id, + domainid=self.domain.id, + startvm=True + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine.id, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_1_root_volume = list_volumes_response[0] + vm_1_root_volume_name = vm_1_root_volume.name + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + time.sleep(60) + + virtual_machine.stop(self.apiClient, True) + + sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + + sf_volume_id = sf_volume['volumeID'] + sf_volume_size = sf_volume['totalSize'] + + vol_snap_1 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 1, primary_storage_db_id, sf_volume_size, + sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) + + services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True} + + volume_created_from_snapshot = Volume.create_from_snapshot(self.apiClient, vol_snap_1.id, services, account=self.account.name, domainid=self.domain.id) + + volume_created_from_snapshot_name = volume_created_from_snapshot.name + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg) + + sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) + + self._check_list(sf_volume_2['volumeAccessGroups'], 0, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) + + volume_created_from_snapshot = virtual_machine.attach_volume( + self.apiClient, + volume_created_from_snapshot + ) + + sf_volume_id_2 = sf_volume_2['volumeID'] + sf_volume_size_2 = sf_volume_2['totalSize'] + + vol_snap_a = self._create_and_test_snapshot_2(volume_created_from_snapshot.id, sf_volume_id_2, sf_volume_id + 3, primary_storage_db_id, sf_volume_size_2, + sf_account_id, 4, TestSnapshots._should_be_four_volumes_in_list_err_msg) + + self._delete_and_test_snapshot_2(vol_snap_1, sf_account_id, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg) + + self._delete_and_test_snapshot_2(vol_snap_a, sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) + + virtual_machine.delete(self.apiClient, True) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + list_volumes_response = list_volumes( + self.apiClient, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vol_snap_a = self._create_and_test_snapshot_2(volume_created_from_snapshot.id, sf_volume_id_2, sf_volume_id + 4, primary_storage_db_id, sf_volume_size_2, + sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) + + data_volume = list_volumes_response[0] + + data_volume = Volume(data_volume.__dict__) + + data_volume.delete(self.apiClient) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + list_volumes_response = list_volumes( + self.apiClient, + listall=True + ) + + self.assertEqual( + list_volumes_response, + None, + "'list_volumes_response' should be equal to 'None'." + ) + + self._delete_and_test_snapshot_2(vol_snap_a, sf_account_id, 0, TestSnapshots._should_be_zero_volumes_in_list_err_msg) + + @attr(hypervisor='XenServer') + def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self): + self._set_supports_resign(False) + + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine.id, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_1_root_volume = list_volumes_response[0] + vm_1_root_volume_name = vm_1_root_volume.name + + sf_account_id = self._get_sf_account_id(self.account.id, self.primary_storage.id) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) + + # Get snapshot information for volume from SolidFire cluster + sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + + self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) + + primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage) + + sf_volume_id = sf_volume['volumeID'] + sf_volume_size = sf_volume['totalSize'] + + vol_snap_1 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 1, primary_storage_db_id, sf_volume_size, + sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) + + vol_snap_2 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 2, primary_storage_db_id, sf_volume_size, + sf_account_id, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg) + + self._set_supports_resign(True) + + vol_snap_a = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + + vol_snap_b = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg) + + services = {"displaytext": "Template-1", "name": "Template-1-name", "ostype": "CentOS 5.6 (64-bit)", "ispublic": "true"} + + template_1 = Template.create_from_snapshot(self.apiClient, vol_snap_1, services) + + self.cleanup.append(template_1) + + virtual_machine_dict = {"name": "TestVM2", "displayname": "Test VM 2"} + + virtual_machine_2 = VirtualMachine.create( + self.apiClient, + virtual_machine_dict, + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=template_1.id, + domainid=self.domain.id, + startvm=True + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine_2.id, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_2_root_volume = list_volumes_response[0] + vm_2_root_volume_name = vm_2_root_volume.name + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 4, TestSnapshots._should_be_four_volumes_in_list_err_msg) + + sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) + + # Get snapshot information for volume from SolidFire cluster + sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID']) + + self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) + + services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True} + + volume_created_from_snapshot_1 = Volume.create_from_snapshot(self.apiClient, vol_snap_2.id, services, account=self.account.name, domainid=self.domain.id) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 5, TestSnapshots._should_be_five_volumes_in_list_err_msg) + + volume_created_from_snapshot_1 = virtual_machine_2.attach_volume( + self.apiClient, + volume_created_from_snapshot_1 + ) + + services = {"displaytext": "Template-A", "name": "Template-A-name", "ostype": "CentOS 5.6 (64-bit)", "ispublic": "true"} + + template_a = Template.create_from_snapshot(self.apiClient, vol_snap_a, services) + + self.cleanup.append(template_a) + + virtual_machine_dict = {"name": "TestVM3", "displayname": "Test VM 3"} + + virtual_machine_3 = VirtualMachine.create( + self.apiClient, + virtual_machine_dict, + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=template_a.id, + domainid=self.domain.id, + startvm=True + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine_3.id, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_3_root_volume = list_volumes_response[0] + vm_3_root_volume_name = vm_3_root_volume.name + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 6, TestSnapshots._should_be_six_volumes_in_list_err_msg) + + sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, vm_3_root_volume_name) + + # Get snapshot information for volume from SolidFire cluster + sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_3['volumeID']) + + self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) + + services = {"diskname": "Vol-A", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True} + + volume_created_from_snapshot_a = Volume.create_from_snapshot(self.apiClient, vol_snap_b.id, services, account=self.account.name, domainid=self.domain.id) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 7, TestSnapshots._should_be_seven_volumes_in_list_err_msg) + + volume_created_from_snapshot_a = virtual_machine_3.attach_volume( + self.apiClient, + volume_created_from_snapshot_a + ) + + virtual_machine.delete(self.apiClient, True) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + # should still be 7 volumes because the SolidFire volume for the root disk of the VM just destroyed + # is still needed for the SolidFire snapshots + self._check_list(sf_volumes, 7, TestSnapshots._should_be_seven_volumes_in_list_err_msg) + + virtual_machine_2.delete(self.apiClient, True) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 6, TestSnapshots._should_be_six_volumes_in_list_err_msg) + + virtual_machine_3.delete(self.apiClient, True) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 5, TestSnapshots._should_be_five_volumes_in_list_err_msg) + + data_volume = Volume(volume_created_from_snapshot_a.__dict__) + + data_volume.delete(self.apiClient) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 4, TestSnapshots._should_be_four_volumes_in_list_err_msg) + + data_volume = Volume(volume_created_from_snapshot_1.__dict__) + + data_volume.delete(self.apiClient) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg) + + self._delete_and_test_snapshot_2(vol_snap_1, sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) + + self._delete_and_test_snapshot(vol_snap_b) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + # should still be 2 volumes because the SolidFire volume for the root disk of the VM just destroyed + # is still needed for the SolidFire snapshots + self._check_list(sf_volumes, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) + + self._delete_and_test_snapshot(vol_snap_a) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + self._delete_and_test_snapshot_2(vol_snap_2, sf_account_id, 0, TestSnapshots._should_be_zero_volumes_in_list_err_msg) + + def _set_supports_resign(self, supports_resign): + supports_resign = str(supports_resign) + + sql_query = "Update host_details Set value = '" + supports_resign + "' Where name = 'supportsResign'" + + # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench + self.dbConnection.execute(sql_query) + + def _check_list(self, in_list, expected_size_of_list, err_msg): + self.assertEqual( + isinstance(in_list, list), + True, + "'in_list' is not a list." + ) + + self.assertEqual( + len(in_list), + expected_size_of_list, + err_msg + ) + + def _check_list_not_empty(self, in_list): + self.assertEqual( + isinstance(in_list, list), + True, + "'in_list' is not a list." + ) + + self.assertGreater( + len(in_list), + 0, + "The size of 'in_list' must be greater than zero." + ) + + # used when SolidFire snapshots are being used for CloudStack volume snapshots + def _check_snapshot_details(self, sf_snapshot_details, cs_snapshot_id, sf_volume_id, sf_snapshot_id, storage_pool_id, sf_volume_size): + self._check_list(sf_snapshot_details, 5, TestSnapshots._should_be_five_items_in_list_err_msg) + + self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "takeSnapshot", "true") + self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "volumeId", sf_volume_id) + self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "snapshotId", sf_snapshot_id) + self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "sfStoragePoolId", storage_pool_id) + self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "sfVolumeSize", sf_volume_size) + + # used when SolidFire volumes are being used for CloudStack volume snapshots + def _check_snapshot_details_2(self, sf_snapshot_details, cs_snapshot_id, sf_volume_id, storage_pool_id, sf_volume_size): + self._check_list(sf_snapshot_details, 5, TestSnapshots._should_be_five_items_in_list_err_msg) + + self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "volumeId", sf_volume_id) + self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "sfStoragePoolId", storage_pool_id) + self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "sfVolumeSize", sf_volume_size) + self._check_snapshot_detail_starts_with(sf_snapshot_details, cs_snapshot_id, "iqn", "/iqn.") + self._check_snapshot_detail_size(sf_snapshot_details, cs_snapshot_id, "path", 36) + + def _check_snapshot_detail(self, sf_snapshot_details_list, cs_snapshot_id, snapshot_detail_key, snapshot_detail_value): + for sf_snapshot_detail_dict in sf_snapshot_details_list: + if sf_snapshot_detail_dict["volumeSnapshotId"] != cs_snapshot_id: + raise Exception("This snapshot detail does not apply to the expected CloudStack volume snapshot.") + + if sf_snapshot_detail_dict["snapshotDetailsName"] == snapshot_detail_key: + if sf_snapshot_detail_dict["snapshotDetailsValue"] == str(snapshot_detail_value): + return + + raise Exception("There is a problem with the snapshot details key '" + snapshot_detail_key + "' and value '" + str(snapshot_detail_value) + "'.") + + def _check_snapshot_detail_starts_with(self, sf_snapshot_details_list, cs_snapshot_id, snapshot_detail_key, starts_with): + for sf_snapshot_detail_dict in sf_snapshot_details_list: + if sf_snapshot_detail_dict["volumeSnapshotId"] != cs_snapshot_id: + raise Exception("This snapshot detail does not apply to the expected CloudStack volume snapshot.") + + if sf_snapshot_detail_dict["snapshotDetailsName"] == snapshot_detail_key: + if sf_snapshot_detail_dict["snapshotDetailsValue"].startswith(starts_with): + return + + raise Exception("There is a problem with the snapshot details key '" + snapshot_detail_key + "' and 'starts with' value '" + starts_with + "'.") + + def _check_snapshot_detail_size(self, sf_snapshot_details_list, cs_snapshot_id, snapshot_detail_key, length): + for sf_snapshot_detail_dict in sf_snapshot_details_list: + if sf_snapshot_detail_dict["volumeSnapshotId"] != cs_snapshot_id: + raise Exception("This snapshot detail does not apply to the expected CloudStack volume snapshot.") + + if sf_snapshot_detail_dict["snapshotDetailsName"] == snapshot_detail_key: + if len(sf_snapshot_detail_dict["snapshotDetailsValue"]) == length: + return + + raise Exception("There is a problem with the snapshot details key '" + snapshot_detail_key + "' and 'length' value '" + str(length) + "'.") + + def _most_recent_sf_snapshot(self, sf_snapshots): + self._check_list_not_empty(sf_snapshots) + + most_recent_id = 0 + sf_snapshot_to_return = None + + for sf_snapshot in sf_snapshots: + if (sf_snapshot['snapshotID'] > most_recent_id): + sf_snapshot_to_return = sf_snapshot + + most_recent_id = sf_snapshot['snapshotID'] + + if (sf_snapshot_to_return == None): + raise Exception("Unable to find the most recent SolidFire snapshot in the provided list") + + return sf_snapshot_to_return + + def _get_cs_volume_snapshot_db_id(self, vol_snap): + return self._get_db_id("snapshots", vol_snap) + + def _get_cs_storage_pool_db_id(self, storage_pool): + return self._get_db_id("storage_pool", storage_pool) + + def _get_db_id(self, table, db_obj): + sql_query = "Select id From " + table + " Where uuid = '" + str(db_obj.id) + "'" + + # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench + sql_result = self.dbConnection.execute(sql_query) + + return sql_result[0][0] + + def _get_sf_volume_by_name(self, sf_volumes, sf_volume_name): + self._check_list_not_empty(sf_volumes) + + sf_volume = None + + for volume in sf_volumes: + if volume['name'] == sf_volume_name: + sf_volume = volume + + break + + self.assertNotEqual( + sf_volume, + None, + "The SolidFire volume could not be found in the expected account." + ) + + return sf_volume + + def _get_sf_volume_by_id(self, sf_volumes, sf_volume_id): + self._check_list_not_empty(sf_volumes) + + sf_volume = None + + for volume in sf_volumes: + if volume['volumeID'] == sf_volume_id: + sf_volume = volume + + break + + self.assertNotEqual( + sf_volume, + None, + "The SolidFire volume could not be found in the expected account." + ) + + return sf_volume + + def _get_sf_account_id(self, cs_account_id, primary_storage_id): + sf_account_id_request = {'accountid': cs_account_id, 'storageid': primary_storage_id} + sf_account_id_result = self.cs_api.getSolidFireAccountId(sf_account_id_request) + sf_account_id = sf_account_id_result['apisolidfireaccountid']['solidFireAccountId'] + + self.assertEqual( + isinstance(sf_account_id, int), + True, + TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg + ) + + return sf_account_id + + def _get_snapshot_detail(self, sf_snapshot_details_list, key): + for sf_snapshot_detail_dict in sf_snapshot_details_list: + if sf_snapshot_detail_dict["snapshotDetailsName"] == key: + return sf_snapshot_detail_dict["snapshotDetailsValue"] + + raise Exception("Unable to find the following snapshot details key: " + key) + + def _check_sf_snapshot_does_not_exist(self, sf_snapshots, sf_snapshot_id): + for sf_snapshot in sf_snapshots: + if sf_snapshot["snapshotID"] == sf_snapshot: + raise Exception("The following SolidFire snapshot ID should not exist: " + sf_snapshot_id) + + def _check_snapshot_details_do_not_exist(self, vol_snap_db_id): + sql_query = "Select count(*) From snapshot_details Where snapshot_id = " + str(vol_snap_db_id) + + # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench + sql_result = self.dbConnection.execute(sql_query) + + self.assertEqual( + sql_result[0][0], + 0, + "Snapshot details should not exist for the following CloudStack volume snapshot DB ID: " + str(vol_snap_db_id) + ) + + # used when SolidFire snapshots are being used for CloudStack volume snapshots + def _create_and_test_snapshot(self, volume_id_for_snapshot, sf_volume, primary_storage_db_id, expected_num_snapshots, snapshot_err_msg): + vol_snap = Snapshot.create( + self.apiClient, + volume_id=volume_id_for_snapshot + ) + + sf_volume_id = sf_volume['volumeID'] + + # Get snapshot information for volume from SolidFire cluster + sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_id) + + self._check_list(sf_snapshots, expected_num_snapshots, snapshot_err_msg) + + sf_snapshot = self._most_recent_sf_snapshot(sf_snapshots) + + sf_snapshot_details_request = {'snapshotid': vol_snap.id} + sf_snapshot_details_response = self.cs_api.getVolumeSnapshotDetails(sf_snapshot_details_request) + sf_snapshot_details = sf_snapshot_details_response['null'] # 'null' gets me the list that's in the dictionary + + vol_snap_db_id = self._get_cs_volume_snapshot_db_id(vol_snap) + + self._check_snapshot_details(sf_snapshot_details, vol_snap_db_id, sf_volume_id, sf_snapshot['snapshotID'], primary_storage_db_id, sf_volume['totalSize']) + + return vol_snap + + # used when SolidFire volumes are being used for CloudStack volume snapshots + def _create_and_test_snapshot_2(self, volume_id_for_snapshot, sf_volume_id, sf_volume_id_for_volume_snapshot, primary_storage_db_id, sf_volume_size, + sf_account_id, expected_num_volumes, volume_err_msg): + vol_snap = Snapshot.create( + self.apiClient, + volume_id=volume_id_for_snapshot + ) + + # Get snapshot information for volume from SolidFire cluster + sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_id) + + self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) + + sf_snapshot_details_request = {'snapshotid': vol_snap.id} + sf_snapshot_details_response = self.cs_api.getVolumeSnapshotDetails(sf_snapshot_details_request) + sf_snapshot_details = sf_snapshot_details_response['null'] # 'null' gets me the list that's in the dictionary + + vol_snap_db_id = self._get_cs_volume_snapshot_db_id(vol_snap) + + self._check_snapshot_details_2(sf_snapshot_details, vol_snap_db_id, sf_volume_id_for_volume_snapshot, primary_storage_db_id, sf_volume_size) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, expected_num_volumes, volume_err_msg) + + sf_volume_for_snapshot = self._get_sf_volume_by_id(sf_volumes, sf_volume_id_for_volume_snapshot) + + self._check_list(sf_volume_for_snapshot['volumeAccessGroups'], 0, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) + + return vol_snap + + # used when SolidFire snapshots are being used for CloudStack volume snapshots + def _delete_and_test_snapshot(self, vol_snap): + vol_snap_id = vol_snap.id + vol_snap_db_id = self._get_cs_volume_snapshot_db_id(vol_snap) + + sf_snapshot_details_request = {'snapshotid': vol_snap_id} + sf_snapshot_details_response = self.cs_api.getVolumeSnapshotDetails(sf_snapshot_details_request) + sf_snapshot_details = sf_snapshot_details_response['null'] # 'null' gets me the list that's in the dictionary + + sf_volume_id = self._get_snapshot_detail(sf_snapshot_details, "volumeId") + sf_snapshot_id = self._get_snapshot_detail(sf_snapshot_details, "snapshotId") + + vol_snap.delete(self.apiClient) + + # Get snapshot information for volume from SolidFire cluster + sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_id) + + self._check_sf_snapshot_does_not_exist(sf_snapshots, sf_snapshot_id) + + self._check_snapshot_details_do_not_exist(vol_snap_db_id) + + # used when SolidFire volumes are being used for CloudStack volume snapshots + def _delete_and_test_snapshot_2(self, vol_snap, sf_account_id, expected_num_volumes, volume_err_msg): + vol_snap_db_id = self._get_cs_volume_snapshot_db_id(vol_snap) + + vol_snap.delete(self.apiClient) + + self._check_snapshot_details_do_not_exist(vol_snap_db_id) + + # Get volume information from SolidFire cluster + sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id) + + self._check_list(sf_volumes, expected_num_volumes, volume_err_msg) + + @classmethod + def _purge_solidfire_volumes(cls): + deleted_volumes = cls.sf_client.list_deleted_volumes() + + for deleted_volume in deleted_volumes: + cls.sf_client.purge_deleted_volume(deleted_volume['volumeID']) + diff --git a/test/integration/plugins/solidfire/TestVMSnapshots.py b/test/integration/plugins/solidfire/TestVMSnapshots.py new file mode 100644 index 00000000000..8fba8f86418 --- /dev/null +++ b/test/integration/plugins/solidfire/TestVMSnapshots.py @@ -0,0 +1,862 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import logging +import random +import SignedAPICall +import XenAPI + +# All tests inherit from cloudstackTestCase +from marvin.cloudstackTestCase import cloudstackTestCase + +# Import Integration Libraries + +# base - contains all resources as entities and defines create, delete, list operations on them +from marvin.lib.base import Account, DiskOffering, ServiceOffering, StoragePool, User, VirtualMachine, VmSnapshot, Volume + +# common - commonly used methods for all tests are listed here +from marvin.lib.common import get_domain, get_template, get_zone, list_hosts, list_volumes + +# utils - utility classes for common cleanup, external library wrappers, etc. +from marvin.lib.utils import cleanup_resources + +from solidfire import solidfire_element_api as sf_api + +# on April 15, 2016: Ran 2 tests in 800.299s with three hosts +# on May 2, 2016: Ran 2 tests in 789.729s with two hosts + + +class TestData: + account = "account" + capacityBytes = "capacitybytes" + capacityIops = "capacityiops" + clusterId = "clusterId" + computeOffering = "computeoffering" + diskOffering = "diskoffering" + domainId = "domainId" + hypervisor = "hypervisor" + login = "login" + mvip = "mvip" + password = "password" + port = "port" + primaryStorage = "primarystorage" + provider = "provider" + scope = "scope" + solidFire = "solidfire" + storageTag = "SolidFire_SAN_1" + tags = "tags" + templateName = "templatename" + url = "url" + user = "user" + username = "username" + virtualMachine = "virtualmachine" + volume_1 = "volume_1" + xenServer = "xenserver" + zoneId = "zoneId" + + def __init__(self): + self.testdata = { + TestData.solidFire: { + TestData.mvip: "192.168.139.112", + TestData.login: "admin", + TestData.password: "admin", + TestData.port: 443, + TestData.url: "https://192.168.139.112:443" + }, + TestData.xenServer: { + TestData.username: "root", + TestData.password: "solidfire" + }, + TestData.account: { + "email": "test@test.com", + "firstname": "John", + "lastname": "Doe", + TestData.username: "test", + TestData.password: "test" + }, + TestData.user: { + "email": "user@test.com", + "firstname": "Jane", + "lastname": "Doe", + TestData.username: "testuser", + TestData.password: "password" + }, + TestData.primaryStorage: { + "name": "SolidFire-%d" % random.randint(0, 100), + TestData.scope: "ZONE", + "url": "MVIP=192.168.139.112;SVIP=10.10.8.112;" + + "clusterAdminUsername=admin;clusterAdminPassword=admin;" + + "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" + + "clusterDefaultBurstIopsPercentOfMaxIops=1.5;", + TestData.provider: "SolidFire", + TestData.tags: TestData.storageTag, + TestData.capacityIops: 4500000, + TestData.capacityBytes: 2251799813685248, + TestData.hypervisor: "Any" + }, + TestData.virtualMachine: { + "name": "TestVM", + "displayname": "Test VM" + }, + TestData.computeOffering: { + "name": "SF_CO_1", + "displaytext": "SF_CO_1 (Min IOPS = 10,000; Max IOPS = 15,000)", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 128, + "storagetype": "shared", + "customizediops": False, + "miniops": "10000", + "maxiops": "15000", + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag + }, + TestData.diskOffering: { + "name": "SF_DO_1", + "displaytext": "SF_DO_1 (Min IOPS = 300; Max IOPS = 500)", + "disksize": 128, + "customizediops": False, + "miniops": 300, + "maxiops": 500, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "testdiskofferings": { + "customiopsdo": { + "name": "SF_Custom_IOPS_DO", + "displaytext": "Customized IOPS DO (Size = 128 GB; Min IOPS = 500; Max IOPS = 1000)", + "disksize": 128, + "customizediops": True, + "miniops": 500, + "maxiops": 1000, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "customsizedo": { + "name": "SF_Custom_Size_DO", + "displaytext": "Customized IOPS DO (Min IOPS = 500; Max IOPS = 1000)", + "disksize": 175, + "customizediops": False, + "miniops": 500, + "maxiops": 1000, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "customsizeandiopsdo": { + "name": "SF_Custom_Size_IOPS_DO", + "displaytext": "Customized Size and IOPS DO", + "disksize": 200, + "customizediops": True, + "miniops": 400, + "maxiops": 800, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "newiopsdo": { + "name": "SF_New_IOPS_DO", + "displaytext": "New IOPS (Size = 128 GB; Min IOPS = 350, Max IOPS = 700)", + "disksize": 128, + "miniops": 350, + "maxiops": 700, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "newsizedo": { + "name": "SF_New_Size_DO", + "displaytext": "New Size: 175", + "disksize": 175, + "miniops": 400, + "maxiops": 800, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "newsizeandiopsdo": { + "name": "SF_New_Size_IOPS_DO", + "displaytext": "New Size and IOPS", + "disksize": 200, + "miniops": 200, + "maxiops": 400, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } + }, + TestData.volume_1: { + "diskname": "testvolume", + }, + "volume2": { + "diskname": "testvolume2", + }, + TestData.templateName: "CentOS 5.6(64-bit) no GUI (XenServer)", + TestData.zoneId: 1, + TestData.clusterId: 1, + TestData.domainId: 1, + TestData.url: "192.168.129.50" + } + + +class TestVMSnapshots(cloudstackTestCase): + _should_be_no_vm_snapshots_err_msg = "There should be no VM snapshots." + _should_only_be_one_vm_snapshot_err_msg = "There should only be one VM snapshot." + _should_only_be_one_root_volume_err_msg = "There should only be one root volume." + _path_should_have_changed_err_msg = "The 'path' in the 'DB' should have changed." + _path_should_not_have_changed_err_msg = "The 'path' in the 'DB' should not have changed." + _should_only_be_one_vdi_err_msg = "There should only be one VDI." + _should_be_three_vdis_err_msg = "There should be three VDIs." + _active_vdis_should_not_be_the_same_err_msg = "The active VDIs should not be the same." + _active_vdis_should_be_the_same_err_msg = "The active VDIs should be the same." + _snapshot_vdis_should_be_the_same_err_msg = "The snapshot VDIs should be the same." + _base_vdis_should_be_the_same_err_msg = "The base VDIs should be the same." + _snapshot_parent_not_correct_err_msg = "Snapshot's parent is not correct." + + @classmethod + def setUpClass(cls): + # Set up API client + testclient = super(TestVMSnapshots, cls).getClsTestClient() + cls.apiClient = testclient.getApiClient() + + cls.testdata = TestData().testdata + + # Set up XenAPI connection + host_ip = "https://" + \ + list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress + + cls.xen_session = XenAPI.Session(host_ip) + + xenserver = cls.testdata[TestData.xenServer] + + cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) + + # Set up SolidFire connection + cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire]) + + # Get Resources from Cloud Infrastructure + cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) + template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName]) + cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) + + # Create test account + cls.account = Account.create( + cls.apiClient, + cls.testdata[TestData.account], + admin=1 + ) + + # Set up connection to make customized API calls + user = User.create( + cls.apiClient, + cls.testdata[TestData.user], + account=cls.account.name, + domainid=cls.domain.id + ) + + url = cls.testdata[TestData.url] + + api_url = "http://" + url + ":8080/client/api" + userkeys = User.registerUserKeys(cls.apiClient, user.id) + + cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey) + + primarystorage = cls.testdata[TestData.primaryStorage] + + cls.primary_storage = StoragePool.create( + cls.apiClient, + primarystorage, + scope=primarystorage[TestData.scope], + zoneid=cls.zone.id, + provider=primarystorage[TestData.provider], + tags=primarystorage[TestData.tags], + capacityiops=primarystorage[TestData.capacityIops], + capacitybytes=primarystorage[TestData.capacityBytes], + hypervisor=primarystorage[TestData.hypervisor] + ) + + compute_offering = ServiceOffering.create( + cls.apiClient, + cls.testdata[TestData.computeOffering] + ) + + cls.disk_offering = DiskOffering.create( + cls.apiClient, + cls.testdata[TestData.diskOffering] + ) + + # Create VM and volume for tests + cls.virtual_machine = VirtualMachine.create( + cls.apiClient, + cls.testdata[TestData.virtualMachine], + accountid=cls.account.name, + zoneid=cls.zone.id, + serviceofferingid=compute_offering.id, + templateid=template.id, + domainid=cls.domain.id, + startvm=True + ) + + cls._cleanup = [ + cls.virtual_machine, + compute_offering, + cls.disk_offering, + user, + cls.account + ] + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiClient, cls._cleanup) + + cls.primary_storage.delete(cls.apiClient) + + cls._purge_solidfire_volumes() + except Exception as e: + logging.debug("Exception in tearDownClass(cls): %s" % e) + + def setUp(self): + self.cleanup = [] + + def tearDown(self): + try: + cleanup_resources(self.apiClient, self.cleanup) + except Exception as e: + logging.debug("Exception in tearDown(self): %s" % e) + + def test_01_take_VM_snapshot(self): + self.virtual_machine.start(self.apiClient) + + root_volumes = list_volumes(self.apiClient, type="ROOT", listAll="true") + + self._check_list(root_volumes, 1, TestVMSnapshots._should_only_be_one_root_volume_err_msg) + + root_volume = root_volumes[0] + + volume_id = {'volumeid': root_volume.id} + + sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(volume_id) + sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName'] + + self._check_iscsi_name(sf_iscsi_name) + + root_volume_path_1 = self._get_path(volume_id) + + ####################################### + ####################################### + # STEP 1: Take snapshot of running VM # + ####################################### + ####################################### + vm_snapshot = VmSnapshot.create( + self.apiClient, + vmid=self.virtual_machine.id, + snapshotmemory="false", + name="Test Snapshot", + description="Test Snapshot Desc" + ) + + list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true") + + self._verify_vm_snapshot(list_vm_snapshots, vm_snapshot) + + root_volume_path_2 = self._get_path(volume_id) + + self.assertEqual( + root_volume_path_1, + root_volume_path_2, + TestVMSnapshots._path_should_not_have_changed_err_msg + ) + + xen_sr = self.xen_session.xenapi.SR.get_by_name_label(sf_iscsi_name)[0] + + xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr) + + self._check_list(xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg) + + vdis_after_create = self._get_vdis(xen_vdis) + + vdiSnapshotOf = self.xen_session.xenapi.VDI.get_record(vdis_after_create.snapshot_vdi["snapshot_of"]) + + self.assertEqual( + vdiSnapshotOf["uuid"], + vdis_after_create.active_vdi["uuid"], + TestVMSnapshots._snapshot_parent_not_correct_err_msg + ) + + ####################################### + ####################################### + ### STEP 2: Revert VM to Snapshot ### + ####################################### + ####################################### + self.virtual_machine.stop(self.apiClient) + + VmSnapshot.revertToSnapshot(self.apiClient, vmsnapshotid=vm_snapshot.id) + + list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true") + + self._check_list(list_vm_snapshots, 1, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg) + + root_volume_path_3 = self._get_path(volume_id) + + self.assertNotEqual( + root_volume_path_1, + root_volume_path_3, + TestVMSnapshots._path_should_have_changed_err_msg + ) + + xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr) + + self._check_list(xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg) + + vdis_after_revert = self._get_vdis(xen_vdis) + + self.assertNotEqual( + vdis_after_create.active_vdi["uuid"], + vdis_after_revert.active_vdi["uuid"], + TestVMSnapshots._active_vdis_should_not_be_the_same_err_msg + ) + + self.assertEqual( + vdis_after_create.snapshot_vdi["uuid"], + vdis_after_revert.snapshot_vdi["uuid"], + TestVMSnapshots._snapshot_vdis_should_be_the_same_err_msg + ) + + self.assertEqual( + vdis_after_create.base_vdi["uuid"], + vdis_after_revert.base_vdi["uuid"], + TestVMSnapshots._base_vdis_should_be_the_same_err_msg + ) + + ####################################### + ####################################### + ##### STEP 3: Delete VM snapshot ##### + ####################################### + ####################################### + VmSnapshot.deleteVMSnapshot(self.apiClient, vmsnapshotid=vm_snapshot.id) + + list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true") + + self.assertEqual( + list_vm_snapshots, + None, + TestVMSnapshots._should_be_no_vm_snapshots_err_msg + ) + + root_volume_path_4 = self._get_path(volume_id) + + self.assertEqual( + root_volume_path_3, + root_volume_path_4, + TestVMSnapshots._path_should_not_have_changed_err_msg + ) + + xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr) + + self._check_list(xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg) + + vdis_after_delete = self._get_vdis(xen_vdis, True) + + self.assertEqual( + vdis_after_revert.active_vdi["uuid"], + vdis_after_delete.active_vdi["uuid"], + TestVMSnapshots._active_vdis_should_be_the_same_err_msg + ) + + ####################################### + ####################################### + ##### STEP 4: Start VM ##### + ####################################### + ####################################### + self.virtual_machine.start(self.apiClient) + + def test_02_take_VM_snapshot_with_data_disk(self): + self.virtual_machine.start(self.apiClient) + + data_volume = Volume.create( + self.apiClient, + self.testdata[TestData.volume_1], + account=self.account.name, + domainid=self.domain.id, + zoneid=self.zone.id, + diskofferingid=self.disk_offering.id + ) + + self.cleanup = [data_volume] + + self.virtual_machine.attach_volume(self.apiClient, data_volume) + + root_volumes = list_volumes(self.apiClient, type="ROOT", listAll="true") + + self._check_list(root_volumes, 1, TestVMSnapshots._should_only_be_one_root_volume_err_msg) + + root_volume = root_volumes[0] + + root_volume_id = {'volumeid': root_volume.id} + + sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(root_volume_id) + sf_iscsi_root_volume_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName'] + + self._check_iscsi_name(sf_iscsi_root_volume_name) + + root_volume_path_1 = self._get_path(root_volume_id) + + data_volumes = list_volumes(self.apiClient, type="DATADISK", listAll="true") + + self._check_list(data_volumes, 1, "There should only be one data volume.") + + data_volume = data_volumes[0] + + data_volume_id = {'volumeid': data_volume.id} + + sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(data_volume_id) + sf_iscsi_data_volume_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName'] + + self._check_iscsi_name(sf_iscsi_data_volume_name) + + data_volume_path_1 = self._get_path(data_volume_id) + + ####################################### + ####################################### + # STEP 1: Take snapshot of running VM # + ####################################### + ####################################### + vm_snapshot = VmSnapshot.create( + self.apiClient, + vmid=self.virtual_machine.id, + snapshotmemory="false", + name="Test Snapshot", + description="Test Snapshot Desc" + ) + + list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true") + + self._verify_vm_snapshot(list_vm_snapshots, vm_snapshot) + + root_volume_path_2 = self._get_path(root_volume_id) + + self.assertEqual( + root_volume_path_1, + root_volume_path_2, + TestVMSnapshots._path_should_not_have_changed_err_msg + ) + + data_volume_path_2 = self._get_path(data_volume_id) + + self.assertEqual( + data_volume_path_1, + data_volume_path_2, + TestVMSnapshots._path_should_not_have_changed_err_msg + ) + + root_volume_xen_sr = self.xen_session.xenapi.SR.get_by_name_label(sf_iscsi_root_volume_name)[0] + + root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr) + + self._check_list(root_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg) + + root_volume_vdis_after_create = self._get_vdis(root_volume_xen_vdis) + + vdiSnapshotOf = self.xen_session.xenapi.VDI.get_record(root_volume_vdis_after_create.snapshot_vdi["snapshot_of"]) + + self.assertEqual( + vdiSnapshotOf["uuid"], + root_volume_vdis_after_create.active_vdi["uuid"], + TestVMSnapshots._snapshot_parent_not_correct_err_msg + ) + + data_volume_xen_sr = self.xen_session.xenapi.SR.get_by_name_label(sf_iscsi_data_volume_name)[0] + + data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr) + + self._check_list(data_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg) + + data_volume_vdis_after_create = self._get_vdis(data_volume_xen_vdis) + + vdiSnapshotOf = self.xen_session.xenapi.VDI.get_record(data_volume_vdis_after_create.snapshot_vdi["snapshot_of"]) + + self.assertEqual( + vdiSnapshotOf["uuid"], + data_volume_vdis_after_create.active_vdi["uuid"], + TestVMSnapshots._snapshot_parent_not_correct_err_msg + ) + + ####################################### + ####################################### + ### STEP 2: Revert VM to Snapshot ### + ####################################### + ####################################### + self.virtual_machine.stop(self.apiClient) + + VmSnapshot.revertToSnapshot(self.apiClient, vmsnapshotid=vm_snapshot.id) + + list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true") + + self._check_list(list_vm_snapshots, 1, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg) + + root_volume_path_3 = self._get_path(root_volume_id) + + self.assertNotEqual( + root_volume_path_1, + root_volume_path_3, + TestVMSnapshots._path_should_have_changed_err_msg + ) + + root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr) + + self._check_list(root_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg) + + root_volume_vdis_after_revert = self._get_vdis(root_volume_xen_vdis) + + self.assertNotEqual( + root_volume_vdis_after_create.active_vdi["uuid"], + root_volume_vdis_after_revert.active_vdi["uuid"], + TestVMSnapshots._active_vdis_should_not_be_the_same_err_msg + ) + + self.assertEqual( + root_volume_vdis_after_create.snapshot_vdi["uuid"], + root_volume_vdis_after_revert.snapshot_vdi["uuid"], + TestVMSnapshots._snapshot_vdis_should_be_the_same_err_msg + ) + + self.assertEqual( + root_volume_vdis_after_create.base_vdi["uuid"], + root_volume_vdis_after_revert.base_vdi["uuid"], + TestVMSnapshots._base_vdis_should_be_the_same_err_msg + ) + + data_volume_path_3 = self._get_path(data_volume_id) + + self.assertNotEqual( + data_volume_path_1, + data_volume_path_3, + TestVMSnapshots._path_should_have_changed_err_msg + ) + + data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr) + + self._check_list(data_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg) + + data_volume_vdis_after_revert = self._get_vdis(data_volume_xen_vdis) + + self.assertNotEqual( + data_volume_vdis_after_create.active_vdi["uuid"], + data_volume_vdis_after_revert.active_vdi["uuid"], + TestVMSnapshots._active_vdis_should_not_be_the_same_err_msg + ) + + self.assertEqual( + data_volume_vdis_after_create.snapshot_vdi["uuid"], + data_volume_vdis_after_revert.snapshot_vdi["uuid"], + TestVMSnapshots._snapshot_vdis_should_be_the_same_err_msg + ) + + self.assertEqual( + data_volume_vdis_after_create.base_vdi["uuid"], + data_volume_vdis_after_revert.base_vdi["uuid"], + TestVMSnapshots._base_vdis_should_be_the_same_err_msg + ) + + ####################################### + ####################################### + ##### STEP 3: Delete VM snapshot ##### + ####################################### + ####################################### + VmSnapshot.deleteVMSnapshot(self.apiClient, vmsnapshotid=vm_snapshot.id) + + list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true") + + self.assertEqual( + list_vm_snapshots, + None, + TestVMSnapshots._should_be_no_vm_snapshots_err_msg + ) + + root_volume_path_4 = self._get_path(root_volume_id) + + self.assertEqual( + root_volume_path_3, + root_volume_path_4, + TestVMSnapshots._path_should_not_have_changed_err_msg + ) + + root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr) + + self._check_list(root_volume_xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg) + + root_volume_vdis_after_delete = self._get_vdis(root_volume_xen_vdis, True) + + self.assertEqual( + root_volume_vdis_after_revert.active_vdi["uuid"], + root_volume_vdis_after_delete.active_vdi["uuid"], + TestVMSnapshots._active_vdis_should_be_the_same_err_msg + ) + + data_volume_path_4 = self._get_path(data_volume_id) + + self.assertEqual( + data_volume_path_3, + data_volume_path_4, + TestVMSnapshots._path_should_not_have_changed_err_msg + ) + + data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr) + + self._check_list(data_volume_xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg) + + data_volume_vdis_after_delete = self._get_vdis(data_volume_xen_vdis, True) + + self.assertEqual( + data_volume_vdis_after_revert.active_vdi["uuid"], + data_volume_vdis_after_delete.active_vdi["uuid"], + TestVMSnapshots._active_vdis_should_be_the_same_err_msg + ) + + ####################################### + ####################################### + ##### STEP 4: Start VM ##### + ####################################### + ####################################### + self.virtual_machine.detach_volume(self.apiClient, data_volume) + + self.virtual_machine.start(self.apiClient) + + def _get_path(self, volume_id): + path_result = self.cs_api.getPathForVolume(volume_id) + + return path_result['apipathforvolume']['path'] + + def _verify_vm_snapshot(self, list_vm_snapshots, vm_snapshot): + self._check_list(list_vm_snapshots, 1, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg) + + vm_snapshot_from_list = list_vm_snapshots[0] + + self.assertEqual( + vm_snapshot.id, + vm_snapshot_from_list.id, + "There is a problem with the VM snapshot ID." + ) + + self.assertEqual( + vm_snapshot.virtualmachineid, + self.virtual_machine.id, + "The ID of the snapshot's virtual machine does not match the expected virtual machine." + ) + + self.assertEqual( + vm_snapshot.state, + "Ready", + "The snapshot is not in the 'Ready' state." + ) + + def _check_iscsi_name(self, sf_iscsi_name): + self.assertEqual( + sf_iscsi_name[0], + "/", + "The iSCSI name needs to start with a forward slash." + ) + + def _check_list(self, in_list, expected_size_of_list, err_msg): + self.assertEqual( + isinstance(in_list, list), + True, + "'in_list' is not a list." + ) + + self.assertEqual( + len(in_list), + expected_size_of_list, + err_msg + ) + + def _get_vdis(self, xen_vdis, only_active_expected=False): + expected_number_of_vdis = 1 if only_active_expected else 3 + + self.assertEqual( + len(xen_vdis), + expected_number_of_vdis, + "The list had an unexpected number of items in it." + ) + + active_vdi = None + snapshot_vdi = None + base_vdi = None + + for temp_vdi in xen_vdis: + temp = self.xen_session.xenapi.VDI.get_record(temp_vdi) + + if temp["name_label"] == "base copy": + base_vdi = temp + else: + if temp["is_a_snapshot"] == True: + snapshot_vdi = temp + else: + active_vdi = temp + + self.assertNotEqual( + active_vdi, + None, + "The active VDI could not be located." + ) + + if only_active_expected: + self.assertEqual( + snapshot_vdi, + None, + "The snapshot VDI should not be present." + ) + + self.assertEqual( + base_vdi, + None, + "The base VDI should not be present." + ) + else: + self.assertNotEqual( + snapshot_vdi, + None, + "The snapshot VDI could not be located." + ) + + self.assertNotEqual( + base_vdi, + None, + "The base VDI could not be located." + ) + + class VdiCollection(object): + pass + + vdis = VdiCollection() + + vdis.active_vdi = active_vdi + vdis.snapshot_vdi = snapshot_vdi + vdis.base_vdi = base_vdi + + return vdis + + @classmethod + def _purge_solidfire_volumes(cls): + deleted_volumes = cls.sf_client.list_deleted_volumes() + + for deleted_volume in deleted_volumes: + cls.sf_client.purge_deleted_volume(deleted_volume['volumeID']) + diff --git a/test/integration/plugins/solidfire/TestVolumes.py b/test/integration/plugins/solidfire/TestVolumes.py new file mode 100644 index 00000000000..ed7d42ae24c --- /dev/null +++ b/test/integration/plugins/solidfire/TestVolumes.py @@ -0,0 +1,1676 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import logging +import random +import SignedAPICall +import XenAPI + +# All tests inherit from cloudstackTestCase +from marvin.cloudstackTestCase import cloudstackTestCase + +from nose.plugins.attrib import attr + +# Import Integration Libraries + +# base - contains all resources as entities and defines create, delete, list operations on them +from marvin.lib.base import Account, DiskOffering, ServiceOffering, StoragePool, User, VirtualMachine, Volume + +# common - commonly used methods for all tests are listed here +from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_virtual_machines, \ + list_volumes + +# utils - utility classes for common cleanup, external library wrappers, etc. +from marvin.lib.utils import cleanup_resources + +from solidfire import solidfire_element_api as sf_api + +# on April 14, 2016: Ran 11 tests in 2494.043s with three hosts (resign = True) +# on April 14, 2016: Ran 11 tests in 2033.516s with three hosts (resign = False) + +# on May 2, 2016: Ran 11 tests in 2352.461s with two hosts (resign = True) +# on May 2, 2016: Ran 11 tests in 1982.066s with two hosts (resign = False) + + +class TestData(): + account = "account" + capacityBytes = "capacitybytes" + capacityIops = "capacityiops" + clusterId = "clusterId" + computeOffering = "computeoffering" + diskName = "diskname" + diskOffering = "diskoffering" + domainId = "domainId" + hypervisor = "hypervisor" + login = "login" + mvip = "mvip" + password = "password" + port = "port" + primaryStorage = "primarystorage" + provider = "provider" + scope = "scope" + solidFire = "solidfire" + storageTag = "SolidFire_SAN_1" + tags = "tags" + templateCacheName = "centos56-x86-64-xen" + templateName = "templatename" + testAccount = "testaccount" + url = "url" + user = "user" + username = "username" + virtualMachine = "virtualmachine" + virtualMachine2 = "virtualmachine2" + volume_1 = "volume_1" + volume_2 = "volume_2" + xenServer = "xenserver" + zoneId = "zoneId" + + def __init__(self): + self.testdata = { + TestData.solidFire: { + TestData.mvip: "192.168.139.112", + TestData.login: "admin", + TestData.password: "admin", + TestData.port: 443, + TestData.url: "https://192.168.139.112:443" + }, + TestData.xenServer: { + TestData.username: "root", + TestData.password: "solidfire" + }, + TestData.account: { + "email": "test@test.com", + "firstname": "John", + "lastname": "Doe", + "username": "test", + "password": "test" + }, + TestData.testAccount: { + "email": "test2@test2.com", + "firstname": "Jane", + "lastname": "Doe", + "username": "test2", + "password": "test" + }, + TestData.user: { + "email": "user@test.com", + "firstname": "Jane", + "lastname": "Doe", + "username": "testuser", + "password": "password" + }, + TestData.primaryStorage: { + "name": "SolidFire-%d" % random.randint(0, 100), + TestData.scope: "ZONE", + "url": "MVIP=192.168.139.112;SVIP=10.10.8.112;" + + "clusterAdminUsername=admin;clusterAdminPassword=admin;" + + "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" + + "clusterDefaultBurstIopsPercentOfMaxIops=1.5;", + TestData.provider: "SolidFire", + TestData.tags: TestData.storageTag, + TestData.capacityIops: 4500000, + TestData.capacityBytes: 2251799813685248, + TestData.hypervisor: "Any" + }, + TestData.virtualMachine: { + "name": "TestVM", + "displayname": "Test VM" + }, + TestData.virtualMachine2: { + "name": "TestVM2", + "displayname": "Test VM 2" + }, + TestData.computeOffering: { + "name": "SF_CO_1", + "displaytext": "SF_CO_1 (Min IOPS = 10,000; Max IOPS = 15,000)", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 128, + "storagetype": "shared", + "customizediops": False, + "miniops": "10000", + "maxiops": "15000", + "hypervisorsnapshotreserve": 200, + "tags": "SolidFire_SAN_1" + }, + TestData.diskOffering: { + "name": "SF_DO_1", + "displaytext": "SF_DO_1 (Min IOPS = 300; Max IOPS = 500)", + "disksize": 128, + "customizediops": False, + "miniops": 300, + "maxiops": 500, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "testdiskofferings": { + "customiopsdo": { + "name": "SF_Custom_Iops_DO", + "displaytext": "Customized Iops DO", + "disksize": 128, + "customizediops": True, + "miniops": 500, + "maxiops": 1000, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "customsizedo": { + "name": "SF_Custom_Size_DO", + "displaytext": "Customized Size DO", + "disksize": 175, + "customizediops": False, + "miniops": 500, + "maxiops": 1000, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "customsizeandiopsdo": { + "name": "SF_Custom_Iops_Size_DO", + "displaytext": "Customized Size and Iops DO", + "disksize": 200, + "customizediops": True, + "miniops": 400, + "maxiops": 800, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "newiopsdo": { + "name": "SF_New_Iops_DO", + "displaytext": "New Iops (min=350, max = 700)", + "disksize": 128, + "miniops": 350, + "maxiops": 700, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "newsizedo": { + "name": "SF_New_Size_DO", + "displaytext": "New Size: 175", + "disksize": 175, + "miniops": 400, + "maxiops": 800, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "newsizeandiopsdo": { + "name": "SF_New_Size_Iops_DO", + "displaytext": "New Size and Iops", + "disksize": 200, + "miniops": 200, + "maxiops": 400, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } + }, + TestData.volume_1: { + TestData.diskName: "test-volume", + }, + TestData.volume_2: { + TestData.diskName: "test-volume-2", + }, + TestData.templateName: "CentOS 5.6(64-bit) no GUI (XenServer)", + TestData.zoneId: 1, + TestData.clusterId: 1, + TestData.domainId: 1, + TestData.url: "192.168.129.50" + } + + +class TestVolumes(cloudstackTestCase): + _should_only_be_one_vm_in_list_err_msg = "There should only be one VM in this list." + _should_only_be_one_volume_in_list_err_msg = "There should only be one volume in this list." + _sf_account_id_should_be_non_zero_int_err_msg = "The SolidFire account ID should be a non-zero integer." + _vag_id_should_be_non_zero_int_err_msg = "The SolidFire VAG ID should be a non-zero integer." + _volume_size_should_be_non_zero_int_err_msg = "The SolidFire volume size should be a non-zero integer." + _volume_vm_id_and_vm_id_do_not_match_err_msg = "The volume's VM ID and the VM's ID do not match." + _vm_not_in_running_state_err_msg = "The VM is not in the 'Running' state." + _vm_not_in_stopped_state_err_msg = "The VM is not in the 'Stopped' state." + _sr_not_shared_err_msg = "The SR is not shared." + _volume_response_should_not_be_zero_err_msg = "The length of the response for the SolidFire-volume query should not be zero." + _list_should_be_empty = "The list should be empty." + _volume_should_not_be_in_a_vag = "The volume should not be in a volume access group." + + @classmethod + def setUpClass(cls): + # Set up API client + testclient = super(TestVolumes, cls).getClsTestClient() + cls.apiClient = testclient.getApiClient() + cls.dbConnection = testclient.getDbConnection() + + cls.testdata = TestData().testdata + + cls.supports_resign = True + + cls._set_supports_resign() + + # Set up xenAPI connection + host_ip = "https://" + \ + list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress + + # Set up XenAPI connection + cls.xen_session = XenAPI.Session(host_ip) + + xenserver = cls.testdata[TestData.xenServer] + + cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) + + # Set up SolidFire connection + cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire]) + + # Get Resources from Cloud Infrastructure + cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) + cls.cluster = list_clusters(cls.apiClient)[0] + cls.template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName]) + cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) + + # Create test account + cls.account = Account.create( + cls.apiClient, + cls.testdata["account"], + admin=1 + ) + + # Set up connection to make customized API calls + cls.user = User.create( + cls.apiClient, + cls.testdata["user"], + account=cls.account.name, + domainid=cls.domain.id + ) + + url = cls.testdata[TestData.url] + + api_url = "http://" + url + ":8080/client/api" + userkeys = User.registerUserKeys(cls.apiClient, cls.user.id) + + cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey) + + primarystorage = cls.testdata[TestData.primaryStorage] + + cls.primary_storage = StoragePool.create( + cls.apiClient, + primarystorage, + scope=primarystorage[TestData.scope], + zoneid=cls.zone.id, + provider=primarystorage[TestData.provider], + tags=primarystorage[TestData.tags], + capacityiops=primarystorage[TestData.capacityIops], + capacitybytes=primarystorage[TestData.capacityBytes], + hypervisor=primarystorage[TestData.hypervisor] + ) + + cls.compute_offering = ServiceOffering.create( + cls.apiClient, + cls.testdata[TestData.computeOffering] + ) + + cls.disk_offering = DiskOffering.create( + cls.apiClient, + cls.testdata[TestData.diskOffering] + ) + + # Create VM and volume for tests + cls.virtual_machine = VirtualMachine.create( + cls.apiClient, + cls.testdata[TestData.virtualMachine], + accountid=cls.account.name, + zoneid=cls.zone.id, + serviceofferingid=cls.compute_offering.id, + templateid=cls.template.id, + domainid=cls.domain.id, + startvm=True + ) + + cls.volume = Volume.create( + cls.apiClient, + cls.testdata[TestData.volume_1], + account=cls.account.name, + domainid=cls.domain.id, + zoneid=cls.zone.id, + diskofferingid=cls.disk_offering.id + ) + + # Resources that are to be destroyed + cls._cleanup = [ + cls.volume, + cls.virtual_machine, + cls.compute_offering, + cls.disk_offering, + cls.user, + cls.account + ] + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiClient, cls._cleanup) + + cls.primary_storage.delete(cls.apiClient) + + cls._purge_solidfire_volumes() + except Exception as e: + logging.debug("Exception in tearDownClass(cls): %s" % e) + + def setUp(self): + self.attached = False + self.cleanup = [] + + def tearDown(self): + if self.attached: + self.virtual_machine.detach_volume(self.apiClient, self.volume) + + cleanup_resources(self.apiClient, self.cleanup) + + @attr(hypervisor='XenServer') + def test_00_check_template_cache(self): + if self.supports_resign == False: + return + + sf_volumes = self._get_sf_volumes() + + sf_volume = self._check_and_get_sf_volume(sf_volumes, TestData.templateCacheName) + + self.assertEqual( + len(sf_volume['volumeAccessGroups']), + 0, + "The volume should not be in a VAG." + ) + + sf_account_id = sf_volume["accountID"] + + sf_account = self.sf_client.get_account_by_id(sf_account_id)["account"] + + sf_account_name = sf_account["username"] + + self.assertEqual( + sf_account_name.endswith("_1"), + True, + "The template cache volume's account does not end with '_1'." + ) + + @attr(hypervisor='XenServer') + def test_01_attach_new_volume_to_stopped_VM(self): + '''Attach a volume to a stopped virtual machine, then start VM''' + + self.virtual_machine.stop(self.apiClient) + + new_volume = Volume.create( + self.apiClient, + self.testdata[TestData.volume_2], + account=self.account.name, + domainid=self.domain.id, + zoneid=self.zone.id, + diskofferingid=self.disk_offering.id + ) + + self.cleanup.append(new_volume) + + self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) + + new_volume = self.virtual_machine.attach_volume( + self.apiClient, + new_volume + ) + + newvolume = self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) + + self.virtual_machine.start(self.apiClient) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + newvolume.virtualmachineid, + vm.id, + TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + "running", + TestVolumes._vm_not_in_running_state_err_msg + ) + + sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id) + + sf_volume_size = self._get_volume_size_with_hsr(new_volume) + + sf_vag_id = self._get_vag_id() + + sf_iscsi_name = self._get_iqn(new_volume) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, newvolume.name) + + self._check_size_and_iops(sf_volume, newvolume, sf_volume_size) + + self._check_vag(sf_volume, sf_vag_id) + + self._check_xen_sr(sf_iscsi_name) + + # Detach volume + new_volume = self.virtual_machine.detach_volume( + self.apiClient, + new_volume + ) + + @attr(hypervisor='XenServer') + def test_02_attach_detach_attach_volume(self): + '''Attach, detach, and attach volume to a running VM''' + + self.virtual_machine.start(self.apiClient) + + sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id) + + sf_vag_id = self._get_vag_id() + + self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + ####################################### + ####################################### + # STEP 1: Attach volume to running VM # + ####################################### + ####################################### + + self.volume = self.virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + vm.id, + TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestVolumes._vm_not_in_running_state_err_msg + ) + + sf_iscsi_name = self._get_iqn(self.volume) + + sf_volume_size = self._get_volume_size_with_hsr(self.volume) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self._check_size_and_iops(sf_volume, vol, sf_volume_size) + + self._check_vag(sf_volume, sf_vag_id) + + self._check_xen_sr(sf_iscsi_name) + + ######################################### + ######################################### + # STEP 2: Detach volume from running VM # + ######################################### + ######################################### + + self.volume = self.virtual_machine.detach_volume( + self.apiClient, + self.volume + ) + + self.attached = False + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + None, + "The volume should not be attached to a VM." + ) + + self.assertEqual( + vm.state.lower(), + 'running', + str(vm.state) + ) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self.assertEqual( + len(sf_volume['volumeAccessGroups']), + 0, + "The volume should not be in a VAG." + ) + + self._check_xen_sr(sf_iscsi_name, False) + + ####################################### + ####################################### + # STEP 3: Attach volume to running VM # + ####################################### + ####################################### + + self.volume = self.virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + vm.id, + TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestVolumes._vm_not_in_running_state_err_msg + ) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self._check_vag(sf_volume, sf_vag_id) + + self._check_xen_sr(sf_iscsi_name) + + @attr(hypervisor='XenServer') + def test_03_attached_volume_reboot_VM(self): + '''Attach volume to running VM, then reboot.''' + + self.virtual_machine.start(self.apiClient) + + sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id) + + sf_vag_id = self._get_vag_id() + + self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + ####################################### + ####################################### + # STEP 1: Attach volume to running VM # + ####################################### + ####################################### + + self.volume = self.virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + vm.id, + TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestVolumes._vm_not_in_running_state_err_msg + ) + + sf_iscsi_name = self._get_iqn(self.volume) + + sf_volume_size = self._get_volume_size_with_hsr(self.volume) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self._check_size_and_iops(sf_volume, vol, sf_volume_size) + + self._check_vag(sf_volume, sf_vag_id) + + self._check_xen_sr(sf_iscsi_name) + + ####################################### + ####################################### + # STEP 2: Reboot VM with attached vol # + ####################################### + ####################################### + self.virtual_machine.reboot(self.apiClient) + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(self.virtual_machine.id) + + sf_iscsi_name = self._get_iqn(self.volume) + + sf_volume_size = self._get_volume_size_with_hsr(self.volume) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self._check_size_and_iops(sf_volume, vol, sf_volume_size) + + self._check_vag(sf_volume, sf_vag_id) + + self._check_xen_sr(sf_iscsi_name) + + @attr(hypervisor='XenServer') + def test_04_detach_volume_reboot(self): + '''Detach volume from a running VM, then reboot.''' + + self.virtual_machine.start(self.apiClient) + + sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id) + + sf_vag_id = self._get_vag_id() + + self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + ####################################### + ####################################### + # STEP 1: Attach volume to running VM # + ####################################### + ####################################### + + self.volume = self.virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + vm.id, + TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestVolumes._vm_not_in_running_state_err_msg + ) + + sf_iscsi_name = self._get_iqn(self.volume) + + sf_volume_size = self._get_volume_size_with_hsr(self.volume) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self._check_size_and_iops(sf_volume, vol, sf_volume_size) + + self._check_vag(sf_volume, sf_vag_id) + + self._check_xen_sr(sf_iscsi_name) + + ######################################### + ######################################### + # STEP 2: Detach volume from running VM # + ######################################### + ######################################### + + self.volume = self.virtual_machine.detach_volume( + self.apiClient, + self.volume + ) + + self.attached = False + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + None, + "The volume should not be attached to a VM." + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestVolumes._vm_not_in_running_state_err_msg + ) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self.assertEqual( + len(sf_volume['volumeAccessGroups']), + 0, + TestVolumes._volume_should_not_be_in_a_vag + ) + + self._check_xen_sr(sf_iscsi_name, False) + + ####################################### + ####################################### + # STEP 3: Reboot VM with detached vol # + ####################################### + ####################################### + + self.virtual_machine.reboot(self.apiClient) + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(self.virtual_machine.id) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self.assertEqual( + len(sf_volume['volumeAccessGroups']), + 0, + TestVolumes._volume_should_not_be_in_a_vag + ) + + self._check_xen_sr(sf_iscsi_name, False) + + @attr(hypervisor='XenServer') + def test_05_detach_vol_stopped_VM_start(self): + '''Detach volume from a stopped VM, then start.''' + + self.virtual_machine.start(self.apiClient) + + sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id) + + sf_vag_id = self._get_vag_id() + + self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + ####################################### + ####################################### + # STEP 1: Attach volume to running VM # + ####################################### + ####################################### + + self.volume = self.virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + vm.id, + TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestVolumes._vm_not_in_running_state_err_msg + ) + + sf_iscsi_name = self._get_iqn(self.volume) + + sf_volume_size = self._get_volume_size_with_hsr(self.volume) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self._check_size_and_iops(sf_volume, vol, sf_volume_size) + + self._check_vag(sf_volume, sf_vag_id) + + self._check_xen_sr(sf_iscsi_name) + + ######################################### + ######################################### + # STEP 2: Detach volume from stopped VM # + ######################################### + ######################################### + + self.virtual_machine.stop(self.apiClient) + + self.volume = self.virtual_machine.detach_volume( + self.apiClient, + self.volume + ) + + self.attached = False + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + None, + "The volume should not be attached to a VM." + ) + + self.assertEqual( + vm.state.lower(), + 'stopped', + TestVolumes._vm_not_in_stopped_state_err_msg + ) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self.assertEqual( + len(sf_volume['volumeAccessGroups']), + 0, + TestVolumes._volume_should_not_be_in_a_vag + ) + + self._check_xen_sr(sf_iscsi_name, False) + + ####################################### + ####################################### + # STEP 3: Start VM with detached vol # + ####################################### + ####################################### + + self.virtual_machine.start(self.apiClient) + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(self.virtual_machine.id) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self.assertEqual( + len(sf_volume['volumeAccessGroups']), + 0, + TestVolumes._volume_should_not_be_in_a_vag + ) + + self._check_xen_sr(sf_iscsi_name, False) + + @attr(hypervisor='XenServer') + def test_06_attach_volume_to_stopped_VM(self): + '''Attach a volume to a stopped virtual machine, then start VM''' + + self.virtual_machine.stop(self.apiClient) + + sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id) + + sf_vag_id = self._get_vag_id() + + self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + ####################################### + ####################################### + # STEP 1: Attach volume to stopped VM # + ####################################### + ####################################### + + self.volume = self.virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + vm.id, + TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'stopped', + TestVolumes._vm_not_in_stopped_state_err_msg + ) + + sf_iscsi_name = self._get_iqn(self.volume) + + sf_volume_size = self._get_volume_size_with_hsr(self.volume) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self._check_size_and_iops(sf_volume, vol, sf_volume_size) + + self._check_vag(sf_volume, sf_vag_id) + + self._check_xen_sr(sf_iscsi_name) + + self.virtual_machine.start(self.apiClient) + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + vm.id, + TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestVolumes._vm_not_in_running_state_err_msg + ) + + sf_iscsi_name = self._get_iqn(self.volume) + + sf_volume_size = self._get_volume_size_with_hsr(self.volume) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self._check_size_and_iops(sf_volume, vol, sf_volume_size) + + self._check_vag(sf_volume, sf_vag_id) + + self._check_xen_sr(sf_iscsi_name) + + @attr(hypervisor='XenServer') + def test_07_destroy_expunge_VM_with_volume(self): + '''Destroy and expunge VM with attached volume''' + + ####################################### + ####################################### + # STEP 1: Create VM and attach volume # + ####################################### + ####################################### + + test_virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine2], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + self.volume = test_virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(test_virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + vm.id, + TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestVolumes._vm_not_in_running_state_err_msg + ) + + sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id) + + sf_volume_size = self._get_volume_size_with_hsr(self.volume) + + sf_vag_id = self._get_vag_id() + + sf_iscsi_name = self._get_iqn(self.volume) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self._check_size_and_iops(sf_volume, vol, sf_volume_size) + + self._check_vag(sf_volume, sf_vag_id) + + self._check_xen_sr(sf_iscsi_name) + + ####################################### + ####################################### + # STEP 2: Destroy and Expunge VM # + ####################################### + ####################################### + + test_virtual_machine.delete(self.apiClient, True) + + self.attached = False + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + self.assertEqual( + vol.virtualmachineid, + None, + "Check if attached to virtual machine" + ) + + self.assertEqual( + vol.vmname, + None, + "Check if VM was expunged" + ) + + list_virtual_machine_response = list_virtual_machines( + self.apiClient, + id=test_virtual_machine.id + ) + + self.assertEqual( + list_virtual_machine_response, + None, + "Check if VM was actually expunged" + ) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self._check_size_and_iops(sf_volume, vol, sf_volume_size) + + self.assertEqual( + len(sf_volume['volumeAccessGroups']), + 0, + TestVolumes._volume_should_not_be_in_a_vag + ) + + self._check_xen_sr(sf_iscsi_name, False) + + @attr(hypervisor='XenServer') + def test_08_delete_volume_was_attached(self): + '''Delete volume that was attached to a VM and is detached now''' + + self.virtual_machine.start(self.apiClient) + + ####################################### + ####################################### + # STEP 1: Create vol and attach to VM # + ####################################### + ####################################### + + new_volume = Volume.create( + self.apiClient, + self.testdata[TestData.volume_2], + account=self.account.name, + domainid=self.domain.id, + zoneid=self.zone.id, + diskofferingid=self.disk_offering.id + ) + + volume_to_delete_later = new_volume + + self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) + + new_volume = self.virtual_machine.attach_volume( + self.apiClient, + new_volume + ) + + vol = self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + vm.id, + "Check if attached to virtual machine" + ) + + self.assertEqual( + vm.state.lower(), + 'running', + str(vm.state) + ) + + sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id) + + sf_volume_size = self._get_volume_size_with_hsr(new_volume) + + sf_vag_id = self._get_vag_id() + + sf_iscsi_name = self._get_iqn(new_volume) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self._check_size_and_iops(sf_volume, vol, sf_volume_size) + + self._check_vag(sf_volume, sf_vag_id) + + self._check_xen_sr(sf_iscsi_name) + + ####################################### + ####################################### + # STEP 2: Detach and delete volume # + ####################################### + ####################################### + + new_volume = self.virtual_machine.detach_volume( + self.apiClient, + new_volume + ) + + vol = self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + None, + "Check if attached to virtual machine" + ) + + self.assertEqual( + vm.state.lower(), + 'running', + str(vm.state) + ) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self._check_size_and_iops(sf_volume, vol, sf_volume_size) + + self.assertEqual( + len(sf_volume['volumeAccessGroups']), + 0, + TestVolumes._volume_should_not_be_in_a_vag + ) + + self._check_xen_sr(sf_iscsi_name, False) + + volume_to_delete_later.delete(self.apiClient) + + list_volumes_response = list_volumes( + self.apiClient, + id=new_volume.id + ) + + self.assertEqual( + list_volumes_response, + None, + "Check volume was deleted" + ) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + self._check_and_get_sf_volume(sf_volumes, vol.name, False) + + @attr(hypervisor='XenServer') + def test_09_attach_volumes_multiple_accounts(self): + '''Attach a data disk to a VM in one account and attach another data disk to a VM in another account''' + + self.virtual_machine.start(self.apiClient) + + ####################################### + ####################################### + # STEP 1: Create account, VM, and vol # + ####################################### + ####################################### + + test_account = Account.create( + self.apiClient, + self.testdata[TestData.testAccount], + admin=1 + ) + + self.cleanup.append(test_account) + + test_virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine2], + accountid=test_account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + test_volume = Volume.create( + self.apiClient, + self.testdata[TestData.volume_2], + zoneid=self.zone.id, + account=test_account.name, + domainid=self.domain.id, + diskofferingid=self.disk_offering.id + ) + + self._check_and_get_cs_volume(test_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) + + ####################################### + ####################################### + # STEP 2: Attach volumes to VMs # + ####################################### + ####################################### + + self.volume = self.virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + vm.id, + "Check if attached to virtual machine" + ) + + self.assertEqual( + vm.state.lower(), + 'running', + str(vm.state) + ) + + test_volume = test_virtual_machine.attach_volume( + self.apiClient, + test_volume + ) + + test_vol = self._check_and_get_cs_volume(test_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) + + test_vm = self._get_vm(test_virtual_machine.id) + + self.assertEqual( + test_vol.virtualmachineid, + test_vm.id, + "Check if attached to virtual machine of other acct" + ) + + self.assertEqual( + test_vm.state.lower(), + 'running', + str(test_vm.state) + ) + + sf_vag_id = self._get_vag_id() + + sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id) + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + sf_volume_size = self._get_volume_size_with_hsr(vol) + + self._check_size_and_iops(sf_volume, vol, sf_volume_size) + + sf_iscsi_name = self._get_iqn(self.volume) + + self._check_xen_sr(sf_iscsi_name) + + self._check_vag(sf_volume, sf_vag_id) + + sf_test_account_id = self._get_sf_account_id(self.primary_storage.id, test_account.id) + + sf_test_volumes = self._get_sf_volumes(sf_test_account_id) + + sf_test_volume = self._check_and_get_sf_volume(sf_test_volumes, test_vol.name) + + sf_test_volume_size = self._get_volume_size_with_hsr(test_vol) + + self._check_size_and_iops(sf_test_volume, test_vol, sf_test_volume_size) + + sf_test_iscsi_name = self._get_iqn(test_volume) + + self._check_xen_sr(sf_test_iscsi_name) + + self._check_vag(sf_test_volume, sf_vag_id) + + @attr(hypervisor='XenServer') + def test_10_attach_more_than_one_disk_to_VM(self): + '''Attach more than one disk to a VM''' + + self.virtual_machine.start(self.apiClient) + + volume_2 = Volume.create( + self.apiClient, + self.testdata[TestData.volume_2], + zoneid=self.zone.id, + account=self.account.name, + domainid=self.domain.id, + diskofferingid=self.disk_offering.id + ) + + self.cleanup.append(volume_2) + + self._check_and_get_cs_volume(volume_2.id, self.testdata[TestData.volume_2][TestData.diskName]) + + ####################################### + ####################################### + # Step 1: Attach volumes to VM # + ####################################### + ####################################### + + self.virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + self.virtual_machine.attach_volume( + self.apiClient, + volume_2 + ) + + vol_2 = self._check_and_get_cs_volume(volume_2.id, self.testdata[TestData.volume_2][TestData.diskName]) + + sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id) + + sf_volume_size = self._get_volume_size_with_hsr(self.volume) + + sf_volume_2_size = self._get_volume_size_with_hsr(volume_2) + + sf_vag_id = self._get_vag_id() + + sf_volumes = self._get_sf_volumes(sf_account_id) + + sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name) + + self._check_size_and_iops(sf_volume, vol, sf_volume_size) + + sf_iscsi_name = self._get_iqn(self.volume) + + self._check_xen_sr(sf_iscsi_name) + + self._check_vag(sf_volume, sf_vag_id) + + sf_volume_2 = self._check_and_get_sf_volume(sf_volumes, vol_2.name) + + self._check_size_and_iops(sf_volume_2, vol_2, sf_volume_2_size) + + sf_iscsi_name_2 = self._get_iqn(volume_2) + + self._check_xen_sr(sf_iscsi_name_2) + + self._check_vag(sf_volume_2, sf_vag_id) + + self.virtual_machine.detach_volume(self.apiClient, volume_2) + + ''' + @attr(hypervisor = 'XenServer') + def _test_11_attach_disk_to_running_vm_change_iops(self): + Attach a disk to a running VM, then change iops + self.custom_iops_disk_offering = DiskOffering.create( + + )''' + + def _check_list(self, in_list, expected_size_of_list, err_msg): + self.assertEqual( + isinstance(in_list, list), + True, + "'in_list' is not a list." + ) + + self.assertEqual( + len(in_list), + expected_size_of_list, + err_msg + ) + + def _check_iscsi_name(self, sf_iscsi_name): + self.assertEqual( + sf_iscsi_name[0], + "/", + "The iSCSI name needs to start with a forward slash." + ) + + def _check_volume(self, volume, volume_name): + self.assertTrue( + volume.name.startswith(volume_name), + "The volume name is incorrect." + ) + + self.assertEqual( + volume.diskofferingid, + self.disk_offering.id, + "The disk offering is incorrect." + ) + + self.assertEqual( + volume.zoneid, + self.zone.id, + "The zone is incorrect." + ) + + self.assertEqual( + volume.storagetype, + self.disk_offering.storagetype, + "The storage type is incorrect." + ) + + def _check_size_and_iops(self, sf_volume, volume, size): + self.assertEqual( + sf_volume['qos']['minIOPS'], + volume.miniops, + "Check QOS - Min IOPS: " + str(sf_volume['qos']['minIOPS']) + ) + + self.assertEqual( + sf_volume['qos']['maxIOPS'], + volume.maxiops, + "Check QOS - Max IOPS: " + str(sf_volume['qos']['maxIOPS']) + ) + + self.assertEqual( + sf_volume['totalSize'], + size, + "Check SF volume size: " + str(sf_volume['totalSize']) + ) + + def _check_vag(self, sf_volume, sf_vag_id): + self.assertEqual( + len(sf_volume['volumeAccessGroups']), + 1, + "The volume should only be in one VAG." + ) + + self.assertEqual( + sf_volume['volumeAccessGroups'][0], + sf_vag_id, + "The volume is not in the VAG with the following ID: " + str(sf_vag_id) + "." + ) + + def _check_and_get_cs_volume(self, volume_id, volume_name): + list_volumes_response = list_volumes( + self.apiClient, + id=volume_id + ) + + self._check_list(list_volumes_response, 1, TestVolumes._should_only_be_one_volume_in_list_err_msg) + + cs_volume = list_volumes_response[0] + + self._check_volume(cs_volume, volume_name) + + return cs_volume + + def _get_sf_account_id(self, primary_storage_id, account_id): + sf_account_id_request = {'storageid': primary_storage_id, 'accountid': account_id} + sf_account_id_result = self.cs_api.getSolidFireAccountId(sf_account_id_request) + sf_account_id = sf_account_id_result['apisolidfireaccountid']['solidFireAccountId'] + + self.assertEqual( + isinstance(sf_account_id, int), + True, + TestVolumes._sf_account_id_should_be_non_zero_int_err_msg + ) + + return sf_account_id + + def _get_volume_size_with_hsr(self, cs_volume): + # Get underlying SF volume size with hypervisor snapshot reserve + sf_volume_size_request = {'volumeid': cs_volume.id} + sf_volume_size_result = self.cs_api.getSolidFireVolumeSize(sf_volume_size_request) + sf_volume_size = sf_volume_size_result['apisolidfirevolumesize']['solidFireVolumeSize'] + + self.assertEqual( + isinstance(sf_volume_size, int), + True, + "The SolidFire volume size should be a non-zero integer." + ) + + return sf_volume_size + + def _get_vag_id(self): + # Get SF Volume Access Group ID + sf_vag_id_request = {'clusterid': self.cluster.id, 'storageid': self.primary_storage.id} + sf_vag_id_result = self.cs_api.getSolidFireVolumeAccessGroupId(sf_vag_id_request) + sf_vag_id = sf_vag_id_result['apisolidfirevolumeaccessgroupid']['solidFireVolumeAccessGroupId'] + + self.assertEqual( + isinstance(sf_vag_id, int), + True, + TestVolumes._vag_id_should_be_non_zero_int_err_msg + ) + + return sf_vag_id + + def _get_iqn(self, volume): + # Get volume IQN + sf_iscsi_name_request = {'volumeid': volume.id} + sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(sf_iscsi_name_request) + sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName'] + + self._check_iscsi_name(sf_iscsi_name) + + return sf_iscsi_name + + def _get_vm(self, vm_id): + list_vms_response = list_virtual_machines(self.apiClient, id=vm_id) + + self._check_list(list_vms_response, 1, TestVolumes._should_only_be_one_vm_in_list_err_msg) + + return list_vms_response[0] + + def _check_and_get_sf_volume(self, sf_volumes, sf_volume_name, should_exist=True): + sf_volume = None + + for volume in sf_volumes: + if volume['name'] == sf_volume_name: + sf_volume = volume + break + + if should_exist: + self.assertNotEqual( + sf_volume, + None, + "Check if SF volume was created in correct account: " + str(sf_volumes) + ) + else: + self.assertEqual( + sf_volume, + None, + "Check if SF volume was deleted: " + str(sf_volumes) + ) + + return sf_volume + + def _check_xen_sr(self, xen_sr_name, should_exist=True): + if should_exist: + xen_sr = self.xen_session.xenapi.SR.get_by_name_label(xen_sr_name)[0] + + self.sr_shared = self.xen_session.xenapi.SR.get_shared(xen_sr) + + self.assertEqual( + self.sr_shared, + True, + TestVolumes._sr_not_shared_err_msg + ) + else: + xen_sr = self.xen_session.xenapi.SR.get_by_name_label(xen_sr_name) + + self._check_list(xen_sr, 0, TestVolumes._list_should_be_empty) + + def _get_sf_volumes(self, sf_account_id=None): + if sf_account_id is not None: + sf_volumes = self.sf_client.list_volumes_for_account(sf_account_id) + else: + sf_volumes = self.sf_client.list_active_volumes() + + self.assertNotEqual( + len(sf_volumes), + 0, + TestVolumes._volume_response_should_not_be_zero_err_msg + ) + + return sf_volumes + + @classmethod + def _set_supports_resign(cls): + supports_resign = str(cls.supports_resign) + + sql_query = "Update host_details Set value = '" + supports_resign + "' Where name = 'supportsResign'" + + # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench + cls.dbConnection.execute(sql_query) + + @classmethod + def _purge_solidfire_volumes(cls): + deleted_volumes = cls.sf_client.list_deleted_volumes() + + for deleted_volume in deleted_volumes: + cls.sf_client.purge_deleted_volume(deleted_volume['volumeID']) + diff --git a/ui/scripts/system.js b/ui/scripts/system.js index 4cb8094add3..2f6397596ed 100644 --- a/ui/scripts/system.js +++ b/ui/scripts/system.js @@ -16336,8 +16336,11 @@ } }); - if (args.context.hosts[0].hypervisor == "XenServer"){ - cloudStack.dialog.notice({ message: _s("The host has been deleted. Please eject the host from XenServer Pool") }) + if (args.context.hosts[0].hypervisor == "XenServer") { + cloudStack.dialog.notice({ message: _s("The host has been removed. Please eject the host from the XenServer Resource Pool.") }) + } + else if (args.context.hosts[0].hypervisor == "VMware") { + cloudStack.dialog.notice({ message: _s("The host has been removed. Please eject the host from the vSphere Cluster.") }) } } });