diff --git a/api/src/test/java/com/cloud/agent/api/storage/OVFHelperTest.java b/api/src/test/java/com/cloud/agent/api/storage/OVFHelperTest.java
index ec0c10af14a..f52d3045e40 100644
--- a/api/src/test/java/com/cloud/agent/api/storage/OVFHelperTest.java
+++ b/api/src/test/java/com/cloud/agent/api/storage/OVFHelperTest.java
@@ -521,7 +521,7 @@ public class OVFHelperTest {
" \n" +
" \n" +
" \n" +
- " A comma-seperated list of hostnames or IP addresses of NTP Servers\n" +
+ " A comma-separated list of hostnames or IP addresses of NTP Servers\n" +
" \n" +
" \n" +
" \n" +
diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDetailsDaoImpl.java
index 0d14882fa29..1aa2c0b203e 100644
--- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDetailsDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDetailsDaoImpl.java
@@ -72,6 +72,6 @@ public class ServiceOfferingDetailsDaoImpl extends ResourceDetailsDaoBase findOfferingIdsByDomainIds(List domainIds) {
Object[] dIds = domainIds.stream().map(s -> String.valueOf(s)).collect(Collectors.toList()).toArray();
- return findResouceIdsByNameAndValueIn("domainid", dIds);
+ return findResourceIdsByNameAndValueIn("domainid", dIds);
}
}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade222to224.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade222to224.java
index 94bc4c78528..51a929d0377 100644
--- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade222to224.java
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade222to224.java
@@ -89,7 +89,7 @@ public class Upgrade222to224 implements DbUpgrade {
upgradeGuestOs(conn);
fixRecreatableVolumesProblem(conn);
updateFkeysAndIndexes(conn);
- fixIPResouceCount(conn);
+ fixIPResourceCount(conn);
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to perform data migration", e);
}
@@ -521,7 +521,7 @@ public class Upgrade222to224 implements DbUpgrade {
// In 2.2.x there was a bug when resource_count was incremented when Direct ip was allocated. Have to fix it during the
// upgrade
- private void fixIPResouceCount(Connection conn) throws SQLException {
+ private void fixIPResourceCount(Connection conn) throws SQLException {
// First set all public_ip fields to be 0
PreparedStatement pstmt = conn.prepareStatement("UPDATE resource_count set count=0 where type='public_ip'");
pstmt.executeUpdate();
diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java
index bde71b350ac..5a173191be1 100644
--- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java
+++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java
@@ -96,5 +96,5 @@ public interface ResourceDetailsDao extends GenericDao
public void addDetail(long resourceId, String key, String value, boolean display);
- public List findResouceIdsByNameAndValueIn(String name, Object[] values);
+ public List findResourceIdsByNameAndValueIn(String name, Object[] values);
}
diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java
index b33a1fc2599..37ebfebf5dd 100644
--- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java
+++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java
@@ -186,7 +186,7 @@ public abstract class ResourceDetailsDaoBase extends G
}
@Override
- public List findResouceIdsByNameAndValueIn(String name, Object[] values) {
+ public List findResourceIdsByNameAndValueIn(String name, Object[] values) {
GenericSearchBuilder sb = createSearchBuilder(Long.class);
sb.selectFields(sb.entity().getResourceId());
sb.and("name", sb.entity().getName(), Op.EQ);
diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/DiskOfferingDetailsDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/DiskOfferingDetailsDaoImpl.java
index 4d29dda560f..756d8c28f0b 100644
--- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/DiskOfferingDetailsDaoImpl.java
+++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/DiskOfferingDetailsDaoImpl.java
@@ -71,7 +71,7 @@ public class DiskOfferingDetailsDaoImpl extends ResourceDetailsDaoBase findOfferingIdsByDomainIds(List domainIds) {
Object[] dIds = domainIds.stream().map(s -> String.valueOf(s)).collect(Collectors.toList()).toArray();
- return findResouceIdsByNameAndValueIn("domainid", dIds);
+ return findResourceIdsByNameAndValueIn("domainid", dIds);
}
}
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql b/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql
index 96424f206df..3dd28f58d17 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql
@@ -2021,7 +2021,7 @@ CREATE TABLE `cloud`.`network_acl_item` (
`icmp_code` int(10) COMMENT 'The ICMP code (if protocol=ICMP). A value of -1 means all codes for the given ICMP type.',
`icmp_type` int(10) COMMENT 'The ICMP type (if protocol=ICMP). A value of -1 means all types.',
`traffic_type` char(32) COMMENT 'the traffic type of the rule, can be Ingress or Egress',
- `cidr` varchar(255) COMMENT 'comma seperated cidr list',
+ `cidr` varchar(255) COMMENT 'comma separated cidr list',
`number` int(10) NOT NULL COMMENT 'priority number of the acl item',
`action` varchar(10) NOT NULL COMMENT 'rule action, allow or deny',
PRIMARY KEY (`id`),
diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3StoragePool.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3StoragePool.java
index 17ff7153fcf..873da13ea3c 100644
--- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3StoragePool.java
+++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3StoragePool.java
@@ -251,7 +251,7 @@ public class Ovm3StoragePool {
}
/**
- * Adding members to a pool, this is seperate from cluster configuration in
+ * Adding members to a pool, this is separate from cluster configuration in
* OVM.
*
* @return
diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/api/commands/ConfigureSimulatorCmd.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/api/commands/ConfigureSimulatorCmd.java
index b891c6cbae5..2aa666a69fc 100644
--- a/plugins/hypervisors/simulator/src/main/java/com/cloud/api/commands/ConfigureSimulatorCmd.java
+++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/api/commands/ConfigureSimulatorCmd.java
@@ -63,7 +63,7 @@ public class ConfigureSimulatorCmd extends BaseCmd {
@Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, description = "which command needs to be configured")
private String command;
- @Parameter(name = ApiConstants.VALUE, type = CommandType.STRING, required = true, description = "configuration options for this command, which is seperated by ;")
+ @Parameter(name = ApiConstants.VALUE, type = CommandType.STRING, required = true, description = "configuration options for this command, which is separated by ;")
private String values;
@Parameter(name=ApiConstants.COUNT, type=CommandType.INTEGER, description="number of times the mock is active")
diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManager.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManager.java
index c73f191f42a..c2cdbccdae7 100644
--- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManager.java
+++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManager.java
@@ -41,7 +41,7 @@ public interface VmwareManager {
"If a worker vm is older then twice the 'job.expire.minutes' + 'job.cancel.threshold.minutes' , remove it.", true, ConfigKey.Scope.Global);
static final ConfigKey s_vmwareSearchExcludeFolder = new ConfigKey("Advanced", String.class, "vmware.search.exclude.folders", null,
- "Comma seperated list of Datastore Folders to exclude from VMWare search", true, ConfigKey.Scope.Global);
+ "Comma separated list of Datastore Folders to exclude from VMWare search", true, ConfigKey.Scope.Global);
static final ConfigKey s_vmwareOVAPackageTimeout = new ConfigKey(Integer.class, "vmware.package.ova.timeout", "Advanced", "3600",
"Vmware script timeout for ova packaging process", true, ConfigKey.Scope.Global, 1000);
diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateVMSnapshotCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateVMSnapshotCommandWrapper.java
index 6bd843dadf0..e40f787c4ca 100644
--- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateVMSnapshotCommandWrapper.java
+++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateVMSnapshotCommandWrapper.java
@@ -169,7 +169,7 @@ public final class CitrixCreateVMSnapshotCommandWrapper extends CommandWrapper vbds = vmSnapshot.getVBDs(conn);
for (final VBD vbd : vbds) {
final VBD.Record vbdr = vbd.getRecord(conn);
diff --git a/plugins/network-elements/dns-notifier/src/main/resources/components-example.xml b/plugins/network-elements/dns-notifier/src/main/resources/components-example.xml
index 9d1b1200776..c53c0b14ef1 100755
--- a/plugins/network-elements/dns-notifier/src/main/resources/components-example.xml
+++ b/plugins/network-elements/dns-notifier/src/main/resources/components-example.xml
@@ -179,7 +179,7 @@ under the License.
-
+
diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/DBSyncGeneric.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/DBSyncGeneric.java
index 3842caaa2d8..fdfd9df2a43 100644
--- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/DBSyncGeneric.java
+++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/DBSyncGeneric.java
@@ -50,7 +50,7 @@ public class DBSyncGeneric {
private final String dbComparatorMethodPrefix = "dbComparator";
private final String vncComparatorMethodPrefix = "vncComparator";
- /* sync methods implementation object, if implemented in seperate class
+ /* sync methods implementation object, if implemented in separate class
* set the scope object
*/
private Object _scope;
diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/nicira/NiciraRestClient.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/nicira/NiciraRestClient.java
index fb66066893c..73d08a0f1cb 100644
--- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/nicira/NiciraRestClient.java
+++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/nicira/NiciraRestClient.java
@@ -138,7 +138,7 @@ public class NiciraRestClient extends BasicRestClient {
final String respobnseBody = EntityUtils.toString(entity);
errorMessage = respobnseBody.subSequence(0, maxResponseErrorMesageLength).toString();
} catch (final IOException e) {
- s_logger.debug("Could not read repsonse body. Response: " + response, e);
+ s_logger.debug("Could not read response body. Response: " + response, e);
}
}
diff --git a/scripts/installer/createtmplt.sh b/scripts/installer/createtmplt.sh
index c187c5fcb12..2d164304604 100755
--- a/scripts/installer/createtmplt.sh
+++ b/scripts/installer/createtmplt.sh
@@ -47,7 +47,7 @@ verify_cksum() {
64) digestalgo="sha256sum" ;;
96) digestalgo="sha384sum" ;;
128) digestalgo="sha512sum" ;;
- *) echo "Please provide valid cheksum" ; exit 3 ;;
+ *) echo "Please provide valid checksum" ; exit 3 ;;
esac
echo "$1 $2" | $digestalgo -c --status
#printf "$1\t$2" | $digestalgo -c --status
diff --git a/scripts/installer/createvolume.sh b/scripts/installer/createvolume.sh
index c7f11dc237c..15847c52375 100755
--- a/scripts/installer/createvolume.sh
+++ b/scripts/installer/createvolume.sh
@@ -48,7 +48,7 @@ verify_cksum() {
64) digestalgo="sha256sum" ;;
96) digestalgo="sha384sum" ;;
128) digestalgo="sha512sum" ;;
- *) echo "Please provide valid cheksum" ; exit 3 ;;
+ *) echo "Please provide valid checksum" ; exit 3 ;;
esac
echo "$1 $2" | $digestalgo -c --status
#printf "$1\t$2" | $digestalgo -c --status
diff --git a/scripts/storage/secondary/createvolume.sh b/scripts/storage/secondary/createvolume.sh
index 14bbee4d4df..91370dff710 100755
--- a/scripts/storage/secondary/createvolume.sh
+++ b/scripts/storage/secondary/createvolume.sh
@@ -49,7 +49,7 @@ verify_cksum() {
64) digestalgo="sha256sum" ;;
96) digestalgo="sha384sum" ;;
128) digestalgo="sha512sum" ;;
- *) echo "Please provide valid cheksum" ; exit 3 ;;
+ *) echo "Please provide valid checksum" ; exit 3 ;;
esac
echo "$1 $2" | $digestalgo -c --status
#printf "$1\t$2" | $digestalgo -c --status
diff --git a/scripts/vm/hypervisor/xenserver/create_privatetemplate_from_snapshot.sh b/scripts/vm/hypervisor/xenserver/create_privatetemplate_from_snapshot.sh
index d39fc6e9ab8..447f5f8dd51 100755
--- a/scripts/vm/hypervisor/xenserver/create_privatetemplate_from_snapshot.sh
+++ b/scripts/vm/hypervisor/xenserver/create_privatetemplate_from_snapshot.sh
@@ -67,7 +67,7 @@ fi
snapshotdir=/var/run/cloud_mount/$(uuidgen -r)
mkdir -p $snapshotdir
if [ $? -ne 0 ]; then
- echo "4#cann't make dir $snapshotdir"
+ echo "4#can't make dir $snapshotdir"
exit 0
fi
@@ -83,7 +83,7 @@ mkdir -p $templatedir
if [ $? -ne 0 ]; then
templatedir=""
cleanup
- echo "6#cann't make dir $templatedir"
+ echo "6#can't make dir $templatedir"
exit 0
fi
diff --git a/scripts/vm/hypervisor/xenserver/upgrade_snapshot.sh b/scripts/vm/hypervisor/xenserver/upgrade_snapshot.sh
index 4cb2e3053f5..3813fd78617 100755
--- a/scripts/vm/hypervisor/xenserver/upgrade_snapshot.sh
+++ b/scripts/vm/hypervisor/xenserver/upgrade_snapshot.sh
@@ -58,7 +58,7 @@ fi
snapshotdir=/var/run/cloud_mount/$(uuidgen -r)
mkdir -p $snapshotdir
if [ $? -ne 0 ]; then
- echo "4#cann't make dir $snapshotdir"
+ echo "4#can't make dir $snapshotdir"
exit 0
fi
@@ -74,7 +74,7 @@ mkdir -p $templatedir
if [ $? -ne 0 ]; then
templatedir=""
cleanup
- echo "6#cann't make dir $templatedir"
+ echo "6#can't make dir $templatedir"
exit 0
fi
diff --git a/scripts/vm/hypervisor/xenserver/vmops b/scripts/vm/hypervisor/xenserver/vmops
index f1e3b288db6..0d82a9d2116 100755
--- a/scripts/vm/hypervisor/xenserver/vmops
+++ b/scripts/vm/hypervisor/xenserver/vmops
@@ -1535,7 +1535,7 @@ def network_rules(session, args):
logging.debug("port range [%s]" % port_range)
if cidrs:
- #create seperate ipset name
+ #create separate ipset name
ipsetname = ipset_chain + "" + protocol[0:1] + "" + start + "_" + end
if start == "-1":
ipsetname = ipset_chain + "_" + protocol[0:1] + "_any"
diff --git a/scripts/vm/network/security_group.py b/scripts/vm/network/security_group.py
index 1147f5b6ec9..f3d02e7dd78 100755
--- a/scripts/vm/network/security_group.py
+++ b/scripts/vm/network/security_group.py
@@ -1389,7 +1389,7 @@ def verify_network_rules(vm_name, vm_id, vm_ip, vm_ip6, vm_mac, vif, brname, sec
#vm_mac = "1e:00:b4:00:00:05"
#vif = "vnet11"
#brname = "cloudbr0"
- #sec_ips = "10.11.118.133;10.11.118.135;10.11.118.138;" # end with ";" and seperated by ";"
+ #sec_ips = "10.11.118.133;10.11.118.135;10.11.118.138;" # end with ";" and separated by ";"
vm_ips = []
if sec_ips is not None:
diff --git a/server/conf/migration-components.xml b/server/conf/migration-components.xml
index 2ba35c836c2..6b107dfc31b 100644
--- a/server/conf/migration-components.xml
+++ b/server/conf/migration-components.xml
@@ -39,7 +39,7 @@ under the License.
-
+
diff --git a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java
index fefe6c98cc6..a4aadaa2831 100644
--- a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java
+++ b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java
@@ -3786,7 +3786,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C
vnetsInDb.addAll(tempVnets);
}
- //sorting the vnets in Db to generate a coma seperated list of the vnet string.
+ //sorting the vnets in Db to generate a coma separated list of the vnet string.
if (vnetsInDb.size() != 0) {
comaSeperatedStingOfVnetRanges = generateVnetString(new ArrayList(vnetsInDb));
}
@@ -3859,7 +3859,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C
return vlanTokens;
}
if (VnetRange.length < 2) {
- throw new InvalidParameterValueException("Please provide valid vnet range. vnet range should be a coma seperated list of vlan ranges. example 500-500,600-601" + rangeMessage);
+ throw new InvalidParameterValueException("Please provide valid vnet range. vnet range should be a coma separated list of vlan ranges. example 500-500,600-601" + rangeMessage);
}
if (VnetRange[0] == null || VnetRange[1] == null) {
@@ -3871,7 +3871,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C
EndVnet = Integer.parseInt(VnetRange[1]);
} catch (NumberFormatException e) {
s_logger.warn("Unable to parse vnet range:", e);
- throw new InvalidParameterValueException("Please provide valid vnet range. The vnet range should be a coma seperated list example 2001-2012,3000-3005." + rangeMessage);
+ throw new InvalidParameterValueException("Please provide valid vnet range. The vnet range should be a coma separated list example 2001-2012,3000-3005." + rangeMessage);
}
if (StartVnet < minVnet || EndVnet > maxVnet) {
throw new InvalidParameterValueException("Vnet range has to be" + rangeMessage);
diff --git a/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java
index 3993f52247c..8d3bf1f2ca6 100644
--- a/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java
+++ b/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java
@@ -410,7 +410,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements
try {
success = applyAutoScaleConfig(loadBalancer, vmGroup, currentState);
} catch (ResourceUnavailableException e) {
- s_logger.warn("Unable to configure AutoScaleVmGroup to the lb rule: " + loadBalancer.getId() + " because resource is unavaliable:", e);
+ s_logger.warn("Unable to configure AutoScaleVmGroup to the lb rule: " + loadBalancer.getId() + " because resource is unavailable:", e);
if (isRollBackAllowedForProvider(loadBalancer)) {
loadBalancer.setState(backupState);
_lbDao.persist(loadBalancer);
@@ -685,7 +685,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements
try {
applyLoadBalancerConfig(cmd.getLbRuleId());
} catch (ResourceUnavailableException e) {
- s_logger.warn("Unable to apply Stickiness policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavaliable:", e);
+ s_logger.warn("Unable to apply Stickiness policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavailable:", e);
if (isRollBackAllowedForProvider(loadBalancer)) {
loadBalancer.setState(backupState);
_lbDao.persist(loadBalancer);
@@ -731,7 +731,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements
try {
applyLoadBalancerConfig(cmd.getLbRuleId());
} catch (ResourceUnavailableException e) {
- s_logger.warn("Unable to apply healthcheck policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavaliable:", e);
+ s_logger.warn("Unable to apply healthcheck policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavailable:", e);
if (isRollBackAllowedForProvider(loadBalancer)) {
loadBalancer.setState(backupState);
_lbDao.persist(loadBalancer);
@@ -786,7 +786,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements
_lbDao.persist(loadBalancer);
s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting sticky policy: " + stickinessPolicyId);
}
- s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e);
+ s_logger.warn("Unable to apply the load balancer config because resource is unavailable.", e);
success = false;
}
} else {
@@ -854,7 +854,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements
_lbDao.persist(loadBalancer);
s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting healthcheck policy: " + healthCheckPolicyId);
}
- s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e);
+ s_logger.warn("Unable to apply the load balancer config because resource is unavailable.", e);
success = false;
}
} else {
@@ -1124,7 +1124,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements
applyLoadBalancerConfig(loadBalancerId);
success = true;
} catch (ResourceUnavailableException e) {
- s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e);
+ s_logger.warn("Unable to apply the load balancer config because resource is unavailable.", e);
success = false;
} finally {
if (!success) {
@@ -1243,7 +1243,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements
_lbCertMapDao.remove(certMap.getId());
s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while adding cert");
}
- s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e);
+ s_logger.warn("Unable to apply the load balancer config because resource is unavailable.", e);
}
return success;
}
@@ -1291,7 +1291,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements
_lbDao.persist(loadBalancer);
s_logger.debug("Rolled back certificate removal lb id " + lbRuleId);
}
- s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e);
+ s_logger.warn("Unable to apply the load balancer config because resource is unavailable.", e);
if (!success) {
CloudRuntimeException ex = new CloudRuntimeException("Failed to remove certificate from load balancer rule id " + lbRuleId);
ex.addProxyObject(loadBalancer.getUuid(), "loadBalancerId");
@@ -1414,7 +1414,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements
_lbDao.persist(loadBalancer);
s_logger.debug("LB Rollback rule id: " + loadBalancerId + " while removing vm instances");
}
- s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e);
+ s_logger.warn("Unable to apply the load balancer config because resource is unavailable.", e);
}
if (!success) {
CloudRuntimeException ex = new CloudRuntimeException("Failed to remove specified load balancer rule id for vms " + vmIds);
@@ -1562,7 +1562,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements
_lbDao.persist(lb);
s_logger.debug("LB Rollback rule id: " + loadBalancerId + " while deleting LB rule.");
} else {
- s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e);
+ s_logger.warn("Unable to apply the load balancer config because resource is unavailable.", e);
}
return false;
}
@@ -2216,7 +2216,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements
s_logger.debug("LB Rollback rule id: " + lbRuleId + " while updating LB rule.");
}
- s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e);
+ s_logger.warn("Unable to apply the load balancer config because resource is unavailable.", e);
success = false;
}
}
diff --git a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java
index 4bd31457861..b50e345c637 100644
--- a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java
+++ b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java
@@ -375,14 +375,14 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
pstmt = txn.prepareAutoCloseStatement(sql);
rs1 = pstmt.executeQuery();
while (rs1.next()) {
- String resouce = rs1.getString(1); //resource column
- if (resouce == null)
+ String resource = rs1.getString(1); //resource column
+ if (resource == null)
continue;
- if (resouce.equalsIgnoreCase("com.cloud.hypervisor.xenserver.resource.XenServer56Resource")
- || resouce.equalsIgnoreCase("com.cloud.hypervisor.xenserver.resource.XenServer56FP1Resource")
- || resouce.equalsIgnoreCase("com.cloud.hypervisor.xenserver.resource.XenServer56SP2Resource")
- || resouce.equalsIgnoreCase("com.cloud.hypervisor.xenserver.resource.XenServer600Resource")
- || resouce.equalsIgnoreCase("com.cloud.hypervisor.xenserver.resource.XenServer602Resource")) {
+ if (resource.equalsIgnoreCase("com.cloud.hypervisor.xenserver.resource.XenServer56Resource")
+ || resource.equalsIgnoreCase("com.cloud.hypervisor.xenserver.resource.XenServer56FP1Resource")
+ || resource.equalsIgnoreCase("com.cloud.hypervisor.xenserver.resource.XenServer56SP2Resource")
+ || resource.equalsIgnoreCase("com.cloud.hypervisor.xenserver.resource.XenServer600Resource")
+ || resource.equalsIgnoreCase("com.cloud.hypervisor.xenserver.resource.XenServer602Resource")) {
pvdriverversion = "xenserver56";
break;
}
diff --git a/server/src/main/java/com/cloud/test/DatabaseConfig.java b/server/src/main/java/com/cloud/test/DatabaseConfig.java
index 5f475b673ce..15525446e1e 100644
--- a/server/src/main/java/com/cloud/test/DatabaseConfig.java
+++ b/server/src/main/java/com/cloud/test/DatabaseConfig.java
@@ -198,7 +198,7 @@ public class DatabaseConfig {
"The time interval(in millisecond) to scan whether or not system needs more console proxy to ensure minimal standby capacity");
s_configurationDescriptions.put("consoleproxy.capacity.standby",
"The minimal number of console proxy viewer sessions that system is able to serve immediately(standby capacity)");
- s_configurationDescriptions.put("alert.email.addresses", "comma seperated list of email addresses used for sending alerts");
+ s_configurationDescriptions.put("alert.email.addresses", "comma separated list of email addresses used for sending alerts");
s_configurationDescriptions.put("alert.smtp.host", "SMTP hostname used for sending out email alerts");
s_configurationDescriptions.put("alert.smtp.port", "port the SMTP server is listening on (default is 25)");
s_configurationDescriptions.put("alert.smtp.useAuth",
diff --git a/test/integration/component/maint/test_hypervisor_limit.py b/test/integration/component/maint/test_hypervisor_limit.py
index d24540a43de..d55c46f4be2 100644
--- a/test/integration/component/maint/test_hypervisor_limit.py
+++ b/test/integration/component/maint/test_hypervisor_limit.py
@@ -127,7 +127,7 @@ class TestMaxHyperviosrLimit(cloudstackTestCase):
def test_check_hypervisor_max_limit_effect(self):
""" Test hypervisor max limits effect
- # 1. Read exsiting count of VM's on the host including SSVM and VR
+ # 1. Read existing count of VM's on the host including SSVM and VR
and modify maxguestcount accordingly
# 2. Deploy a VM
# 2. Try to deploy another vm
diff --git a/test/integration/component/maint/test_ip_resource_count_for_vpc.py b/test/integration/component/maint/test_ip_resource_count_for_vpc.py
index a7dfa25d313..2099995f505 100644
--- a/test/integration/component/maint/test_ip_resource_count_for_vpc.py
+++ b/test/integration/component/maint/test_ip_resource_count_for_vpc.py
@@ -264,7 +264,7 @@ class TestIPResourceCountVPC(cloudstackTestCase):
cmd.account=self.account.name
cmd.domainid=self.domain.id
- responce=self.apiclient.updateResourceCount(cmd)
+ response=self.apiclient.updateResourceCount(cmd)
def acquire_publicip(self, network, vpc):
self.debug("Associating public IP for network: %s" % network.name)
@@ -279,7 +279,7 @@ class TestIPResourceCountVPC(cloudstackTestCase):
return public_ip
@attr(tags=["advanced", "intervlan"], required_hardware="false")
- def test_01_ip_resouce_count_vpc_network(self):
+ def test_01_ip_resource_count_vpc_network(self):
""" Test IP count in VPC networks
"""
self.debug("Creating a VPC offering..")
diff --git a/test/integration/component/maint/test_redundant_router.py b/test/integration/component/maint/test_redundant_router.py
index 2518704e635..23971a2f795 100644
--- a/test/integration/component/maint/test_redundant_router.py
+++ b/test/integration/component/maint/test_redundant_router.py
@@ -1564,7 +1564,7 @@ class TestRvRRedundancy(cloudstackTestCase):
return primary_router, backup_router
- def chek_for_new_backupRouter(self,old_backup_router):
+ def check_for_new_backupRouter(self,old_backup_router):
primary_router, backup_router = self.get_primary_and_backupRouter()
retry = 4
self.info("Checking if new router is getting created.")
@@ -1613,7 +1613,7 @@ class TestRvRRedundancy(cloudstackTestCase):
self.wait_until_router_stabilises()
old_primary_router, old_backup_router = self.get_primary_and_backupRouter()
self.info("old_primary_router:"+old_primary_router.name+" old_backup_router"+old_backup_router.name)
- #chek if the network is in correct state
+ #check if the network is in correct state
self.assertEqual(old_primary_router.state, "Running", "The primary router is not running, network is not in a correct state to start the test")
self.assertEqual(old_backup_router.state, "Running", "The backup router is not running, network is not in a correct state to start the test")
@@ -1627,7 +1627,7 @@ class TestRvRRedundancy(cloudstackTestCase):
self.info("Network update Started, the old backup router will get destroyed and a new router will be created")
- self.chek_for_new_backupRouter(old_backup_router)
+ self.check_for_new_backupRouter(old_backup_router)
primary_router, new_backup_router=self.get_primary_and_backupRouter()
#the state of the primary router should be running. while backup is being updated
self.assertEqual(primary_router.state, "Running", "State of the primary router is not running")
diff --git a/test/integration/component/maint/test_redundant_router_deployment_planning.py b/test/integration/component/maint/test_redundant_router_deployment_planning.py
index 67d5cbdbd13..5012fd1f2e9 100644
--- a/test/integration/component/maint/test_redundant_router_deployment_planning.py
+++ b/test/integration/component/maint/test_redundant_router_deployment_planning.py
@@ -367,7 +367,7 @@ class TestRvRDeploymentPlanning(cloudstackTestCase):
)
enabled_pod = pods[0]
- self.debug("Cheking if pod has at least 2 clusters")
+ self.debug("Checking if pod has at least 2 clusters")
clusters = Cluster.list(
self.apiclient,
podid=enabled_pod.id,
@@ -574,7 +574,7 @@ class TestRvRDeploymentPlanning(cloudstackTestCase):
)
enabled_pod = pods[0]
- self.debug("Cheking if pod has multiple clusters")
+ self.debug("Checking if pod has multiple clusters")
clusters = Cluster.list(
self.apiclient,
podid=enabled_pod.id,
@@ -588,7 +588,7 @@ class TestRvRDeploymentPlanning(cloudstackTestCase):
enabled_cluster = clusters[0]
- self.debug("Cheking if cluster has multiple storage pools")
+ self.debug("Checking if cluster has multiple storage pools")
storage_pools = StoragePool.list(
self.apiclient,
clusterid=enabled_cluster.id,
@@ -812,7 +812,7 @@ class TestRvRDeploymentPlanning(cloudstackTestCase):
)
enabled_pod = pods[0]
- self.debug("Cheking if pod has multiple clusters")
+ self.debug("Checking if pod has multiple clusters")
clusters = Cluster.list(
self.apiclient,
podid=enabled_pod.id,
@@ -826,7 +826,7 @@ class TestRvRDeploymentPlanning(cloudstackTestCase):
enabled_cluster = clusters[0]
- self.debug("Cheking if cluster has multiple hosts")
+ self.debug("Checking if cluster has multiple hosts")
hosts = Host.list(
self.apiclient,
clusterid=enabled_cluster.id,
diff --git a/test/integration/component/maint/testpath_disable_enable_zone.py b/test/integration/component/maint/testpath_disable_enable_zone.py
index 1537fc5457a..216161f1c6b 100644
--- a/test/integration/component/maint/testpath_disable_enable_zone.py
+++ b/test/integration/component/maint/testpath_disable_enable_zone.py
@@ -163,11 +163,11 @@ class TestDisableEnableZone(cloudstackTestCase):
"""disable enable zone
1. Disable zone and verify following things:
For admin user:
- 1. Should be create to start/stop exsiting vms
+ 1. Should be create to start/stop existing vms
2. Should be create to deploy new vm, snapshot,volume,
template,iso in the same zone
For Non-admin user:
- 1. Should be create to start/stop exsiting vms
+ 1. Should be create to start/stop existing vms
2. Should not be create to deploy new vm, snapshot,volume,
template,iso in the same zone
2. Enable the above disabled zone and verify that:
@@ -1472,11 +1472,11 @@ class TestDisableEnableHost(cloudstackTestCase):
"""disable enable host
1. Disable host and verify following things:
For admin user:
- 1. Should be able to stop exsiting vms but can not start.
+ 1. Should be able to stop existing vms but can not start.
2. Should not be able to deploy new vm,
and create snapshot on the same host
For Non-admin user:
- 1. Should not be able to stop exsiting vms but
+ 1. Should not be able to stop existing vms but
cant not start
2. Should not be create to deploy new vm,
snapshot on the same host
@@ -1521,7 +1521,7 @@ class TestDisableEnableHost(cloudstackTestCase):
DISABLED,
"Check if the host is in disabled state"
)
- # Verify the exsisting vms should be running
+ # Verify the existing vms should be running
self.assertEqual(vm_user.state.lower(),
"running",
"Verify that the user vm is running")
diff --git a/test/integration/component/test_advancedsg_networks.py b/test/integration/component/test_advancedsg_networks.py
index 6a885121212..924d9888ad3 100644
--- a/test/integration/component/test_advancedsg_networks.py
+++ b/test/integration/component/test_advancedsg_networks.py
@@ -1403,7 +1403,7 @@ class TestNetworksInAdvancedSG_VmOperations(cloudstackTestCase):
domainid=account_1.domainid,networkids=[shared_network_account_1.id,],
serviceofferingid=self.service_offering.id)
- self.fail("Vm creation succeded, should have failed")
+ self.fail("Vm creation succeeded, should have failed")
except Exception as e:
self.debug("VM creation failed as expected with exception: %s" % e)
@@ -1513,7 +1513,7 @@ class TestNetworksInAdvancedSG_VmOperations(cloudstackTestCase):
templateid=self.template.id,domainid=domain_1.id,
networkids=[shared_network_domain_1.id,],
serviceofferingid=self.service_offering.id)
- self.fail("Vm creation succeded, should have failed")
+ self.fail("Vm creation succeeded, should have failed")
except Exception as e:
self.debug("VM creation failed as expected with exception: %s" % e)
@@ -1577,7 +1577,7 @@ class TestNetworksInAdvancedSG_VmOperations(cloudstackTestCase):
domainid=domain.id,networkids=[shared_network.id,],
serviceofferingid=self.service_offering.id)
self.cleanup_vms.append(vm)
- self.fail("Vm creation succeded, should have failed")
+ self.fail("Vm creation succeeded, should have failed")
except Exception as e:
self.debug("VM creation failed as expected with exception: %s" % e)
@@ -1722,7 +1722,7 @@ class TestNetworksInAdvancedSG_VmOperations(cloudstackTestCase):
templateid=self.template.id, networkids=[shared_network_1.id, shared_network_2.id],
serviceofferingid=self.service_offering.id)
self.cleanup_vms.append(vm)
- self.fail("Vm creation should have failed, it succeded, created vm %s" % vm.id)
+ self.fail("Vm creation should have failed, it succeeded, created vm %s" % vm.id)
except Exception as e:
self.debug("VM creation failed as expected with exception: %s" % e)
@@ -1836,7 +1836,7 @@ class TestNetworksInAdvancedSG_VmOperations(cloudstackTestCase):
templateid=self.template.id,networkids=[shared_network.id,],
serviceofferingid=self.service_offering.id)
self.cleanup_vms.append(vm)
- self.fail("Vm creation succeded, should have failed")
+ self.fail("Vm creation succeeded, should have failed")
except Exception as e:
self.debug("VM creation failed as expected with exception: %s" % e)
diff --git a/test/integration/component/test_browse_templates.py b/test/integration/component/test_browse_templates.py
index ba2a6e16554..a29cb524212 100644
--- a/test/integration/component/test_browse_templates.py
+++ b/test/integration/component/test_browse_templates.py
@@ -222,11 +222,11 @@ class TestBrowseUploadVolume(cloudstackTestCase):
def gettemplatelimts(self):
- totalresoucelist=Account.list(
+ totalresourcelist=Account.list(
self.apiclient,
id=self.account.id
)
- totaltemplates=totalresoucelist[0].templatetotal
+ totaltemplates=totalresourcelist[0].templatetotal
return(totaltemplates)
@@ -255,12 +255,12 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.ostypeid=self.test_template.ostypeid
#cmd.isdynamicallyscalable="false"
#cmd.type="template"
- getuploadparamsresponce=self.apiclient.getUploadParamsForTemplate(cmd)
+ getuploadparamsresponse=self.apiclient.getUploadParamsForTemplate(cmd)
- signt=getuploadparamsresponce.signature
- posturl=getuploadparamsresponce.postURL
- metadata=getuploadparamsresponce.metadata
- expiredata=getuploadparamsresponce.expires
+ signt=getuploadparamsresponse.signature
+ posturl=getuploadparamsresponse.postURL
+ metadata=getuploadparamsresponse.metadata
+ expiredata=getuploadparamsresponse.expires
#url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
url=self.test_template.url
@@ -288,9 +288,9 @@ class TestBrowseUploadVolume(cloudstackTestCase):
if results.status_code !=200:
self.fail("Upload is not fine")
- self.validate_uploaded_template(self.apiclient, getuploadparamsresponce.id)
+ self.validate_uploaded_template(self.apiclient, getuploadparamsresponse.id)
- return(getuploadparamsresponce)
+ return(getuploadparamsresponse)
def browse_upload_template_with_out_zoneid(self):
@@ -305,7 +305,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
success= False
try:
- getuploadparamsresponce=self.apiclient.getUploadParamsForTemplate(cmd)
+ getuploadparamsresponse=self.apiclient.getUploadParamsForTemplate(cmd)
except Exception as ex:
if "Invalid Parameter" in str(ex):
success = True
@@ -330,7 +330,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
success= False
try:
- getuploadparamsresponce=self.apiclient.getUploadParamsForTemplate(cmd)
+ getuploadparamsresponse=self.apiclient.getUploadParamsForTemplate(cmd)
except Exception as ex:
if "Invalid Parameter" in str(ex):
success = True
@@ -354,12 +354,12 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.projectid=projectid
#cmd.isdynamicallyscalable="false"
#cmd.type="template"
- getuploadparamsresponce=self.apiclient.getUploadParamsForTemplate(cmd)
+ getuploadparamsresponse=self.apiclient.getUploadParamsForTemplate(cmd)
- signt=getuploadparamsresponce.signature
- posturl=getuploadparamsresponce.postURL
- metadata=getuploadparamsresponce.metadata
- expiredata=getuploadparamsresponce.expires
+ signt=getuploadparamsresponse.signature
+ posturl=getuploadparamsresponse.postURL
+ metadata=getuploadparamsresponse.metadata
+ expiredata=getuploadparamsresponse.expires
#url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
url=self.test_template.url
@@ -381,9 +381,9 @@ class TestBrowseUploadVolume(cloudstackTestCase):
if results.status_code !=200:
self.fail("Upload is not fine")
- self.validate_uploaded_template(self.apiclient, getuploadparamsresponce.id)
+ self.validate_uploaded_template(self.apiclient, getuploadparamsresponse.id)
- return(getuploadparamsresponce)
+ return(getuploadparamsresponse)
def browse_upload_template_multiplezones(self,lzones):
@@ -398,12 +398,12 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.ostypeid=self.test_template.ostypeid
#cmd.isdynamicallyscalable="false"
#cmd.type="template"
- getuploadparamsresponce=self.apiclient.getUploadParamsForTemplate(cmd)
+ getuploadparamsresponse=self.apiclient.getUploadParamsForTemplate(cmd)
- signt=getuploadparamsresponce.signature
- posturl=getuploadparamsresponce.postURL
- metadata=getuploadparamsresponce.metadata
- expiredata=getuploadparamsresponce.expires
+ signt=getuploadparamsresponse.signature
+ posturl=getuploadparamsresponse.postURL
+ metadata=getuploadparamsresponse.metadata
+ expiredata=getuploadparamsresponse.expires
#url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
url=self.test_template.url
@@ -432,9 +432,9 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.fail("Upload is not fine")
for z1 in lzones:
- self.validate_uploaded_template(self.apiclient, getuploadparamsresponce.id)
+ self.validate_uploaded_template(self.apiclient, getuploadparamsresponse.id)
- return(getuploadparamsresponce)
+ return(getuploadparamsresponse)
def uploadtemplate(self):
cmd = getUploadParamsForTemplate.getUploadParamsForTemplateCmd()
@@ -447,12 +447,12 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.hypervisor=self.test_template.hypervisor
cmd.ostypeid=self.test_template.ostypeid
#cmd.type="template"
- getuploadparamsresponce=self.apiclient.getUploadParamsForTemplate(cmd)
+ getuploadparamsresponse=self.apiclient.getUploadParamsForTemplate(cmd)
- signt=getuploadparamsresponce.signature
- posturl=getuploadparamsresponce.postURL
- metadata=getuploadparamsresponce.metadata
- expiredata=getuploadparamsresponce.expires
+ signt=getuploadparamsresponse.signature
+ posturl=getuploadparamsresponse.postURL
+ metadata=getuploadparamsresponse.metadata
+ expiredata=getuploadparamsresponse.expires
#url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
url=self.test_template.url
@@ -481,7 +481,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
if results.status_code !=200:
self.fail("Upload is not fine")
- return(getuploadparamsresponce)
+ return(getuploadparamsresponse)
def multiple_browse_upload_template(self):
@@ -1638,7 +1638,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
afteruploadtemplatelimit=self.gettemplatelimts()
if int(afteruploadtemplatelimit)!=(int(initialtemplatelimit)+1):
- self.fail("Volume Resouce Count is not updated")
+ self.fail("Volume Resource Count is not updated")
self.delete_template(browseup_template1)
@@ -1669,7 +1669,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
afteruploadsecondarystoragelimit=self.getstoragelimits(11)
if afteruploadsecondarystoragelimit!=(initialsecondarystoragelimit+tmpldetails[0].size):
- self.fail("Secondary Storage Resouce Count is not updated")
+ self.fail("Secondary Storage Resource Count is not updated")
self.delete_template(browseup_template1)
diff --git a/test/integration/component/test_browse_volumes.py b/test/integration/component/test_browse_volumes.py
index 6100a88c4cf..8d856345fed 100644
--- a/test/integration/component/test_browse_volumes.py
+++ b/test/integration/component/test_browse_volumes.py
@@ -179,12 +179,12 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.format = self.uploadvolumeformat
cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
cmd.projectid = projectid
- getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
+ getuploadparamsresponse = self.apiclient.getUploadParamsForVolume(cmd)
- signt = getuploadparamsresponce.signature
- posturl = getuploadparamsresponce.postURL
- metadata = getuploadparamsresponce.metadata
- expiredata = getuploadparamsresponce.expires
+ signt = getuploadparamsresponse.signature
+ posturl = getuploadparamsresponse.postURL
+ metadata = getuploadparamsresponse.metadata
+ expiredata = getuploadparamsresponse.expires
url = self.uploadurl
@@ -206,14 +206,14 @@ class TestBrowseUploadVolume(cloudstackTestCase):
if results.status_code != 200:
self.fail("Upload is not fine")
- self.validate_uploaded_volume(getuploadparamsresponce.id, 'Uploaded')
+ self.validate_uploaded_volume(getuploadparamsresponse.id, 'Uploaded')
list_volume_response = Volume.list(
self.apiclient,
projectid=projectid
)
- if list_volume_response[0].id == getuploadparamsresponce.id:
- return (getuploadparamsresponce)
+ if list_volume_response[0].id == getuploadparamsresponse.id:
+ return (getuploadparamsresponse)
else:
self.fail("Volume is not listed with projectid")
@@ -260,13 +260,13 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
cmd.account = self.account.name
cmd.domainid = self.domain.id
- getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
+ getuploadparamsresponse = self.apiclient.getUploadParamsForVolume(cmd)
- signt = getuploadparamsresponce.signature
- posturl = getuploadparamsresponce.postURL
- metadata = getuploadparamsresponce.metadata
- expiredata = getuploadparamsresponce.expires
- self.globalurl = getuploadparamsresponce.postURL
+ signt = getuploadparamsresponse.signature
+ posturl = getuploadparamsresponse.postURL
+ metadata = getuploadparamsresponse.metadata
+ expiredata = getuploadparamsresponse.expires
+ self.globalurl = getuploadparamsresponse.postURL
# url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
url = self.uploadurl
@@ -288,9 +288,9 @@ class TestBrowseUploadVolume(cloudstackTestCase):
if results.status_code != 200:
self.fail("Upload is not fine")
- self.validate_uploaded_volume(getuploadparamsresponce.id, 'Uploaded')
+ self.validate_uploaded_volume(getuploadparamsresponse.id, 'Uploaded')
- return (getuploadparamsresponce)
+ return (getuploadparamsresponse)
def onlyupload(self):
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
@@ -299,8 +299,8 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
cmd.account = self.account.name
cmd.domainid = self.domain.id
- getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
- return (getuploadparamsresponce)
+ getuploadparamsresponse = self.apiclient.getUploadParamsForVolume(cmd)
+ return (getuploadparamsresponse)
def invalidupload(self):
@@ -334,12 +334,12 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
cmd.account = self.account.name
cmd.domainid = self.domain.id
- getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
+ getuploadparamsresponse = self.apiclient.getUploadParamsForVolume(cmd)
- signt = getuploadparamsresponce.signature
+ signt = getuploadparamsresponse.signature
posturl = "http://invalidposturl/2999834." + self.uploadvolumeformat
- metadata = getuploadparamsresponce.metadata
- expiredata = getuploadparamsresponce.expires
+ metadata = getuploadparamsresponse.metadata
+ expiredata = getuploadparamsresponse.expires
# url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
url = self.uploadurl
@@ -361,7 +361,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
if results.status_code != 200:
self.fail("Upload is not fine")
- self.validate_uploaded_volume(getuploadparamsresponce.id, 'UploadedAbandoned')
+ self.validate_uploaded_volume(getuploadparamsresponse.id, 'UploadedAbandoned')
except Exception as ex:
if "Max retries exceeded with url" in str(ex):
@@ -372,7 +372,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
True,
"Verify - Tampered Post URL is handled")
- return (getuploadparamsresponce)
+ return (getuploadparamsresponse)
def reuse_url(self):
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
@@ -381,12 +381,12 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
cmd.account = self.account.name
cmd.domainid = self.domain.id
- getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
+ getuploadparamsresponse = self.apiclient.getUploadParamsForVolume(cmd)
- signt = getuploadparamsresponce.signature
+ signt = getuploadparamsresponse.signature
posturl = self.globalurl
- metadata = getuploadparamsresponce.metadata
- expiredata = getuploadparamsresponce.expires
+ metadata = getuploadparamsresponse.metadata
+ expiredata = getuploadparamsresponse.expires
url = self.uploadurl
time.sleep(300)
@@ -415,7 +415,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
uploadtimeout = int(config[0].value)
time.sleep(uploadtimeout * 60)
- self.validate_uploaded_volume(getuploadparamsresponce.id, 'UploadAbandoned')
+ self.validate_uploaded_volume(getuploadparamsresponse.id, 'UploadAbandoned')
return
def validate_storage_cleanup(self, invalidpostvolume, cleanup_interval):
@@ -493,12 +493,12 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.account = self.account.name
cmd.domainid = self.domain.id
cmd.checksum = self.md5sum
- getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
+ getuploadparamsresponse = self.apiclient.getUploadParamsForVolume(cmd)
- signt = getuploadparamsresponce.signature
- posturl = getuploadparamsresponce.postURL
- metadata = getuploadparamsresponce.metadata
- expiredata = getuploadparamsresponce.expires
+ signt = getuploadparamsresponse.signature
+ posturl = getuploadparamsresponse.postURL
+ metadata = getuploadparamsresponse.metadata
+ expiredata = getuploadparamsresponse.expires
# url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
url = self.uploadurl
@@ -526,9 +526,9 @@ class TestBrowseUploadVolume(cloudstackTestCase):
if results.status_code != 200:
self.fail("Upload is not fine")
- self.validate_uploaded_volume(getuploadparamsresponce.id, 'Uploaded')
+ self.validate_uploaded_volume(getuploadparamsresponse.id, 'Uploaded')
- return (getuploadparamsresponce)
+ return (getuploadparamsresponse)
def browse_upload_volume_with_invalid_md5(self):
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
@@ -538,12 +538,12 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.account = self.account.name
cmd.domainid = self.domain.id
cmd.checksum = "xxxxxxxx"
- getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
+ getuploadparamsresponse = self.apiclient.getUploadParamsForVolume(cmd)
- signt = getuploadparamsresponce.signature
- posturl = getuploadparamsresponce.postURL
- metadata = getuploadparamsresponce.metadata
- expiredata = getuploadparamsresponce.expires
+ signt = getuploadparamsresponse.signature
+ posturl = getuploadparamsresponse.postURL
+ metadata = getuploadparamsresponse.metadata
+ expiredata = getuploadparamsresponse.expires
# url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
url = self.uploadurl
@@ -572,9 +572,9 @@ class TestBrowseUploadVolume(cloudstackTestCase):
if results.status_code != 200:
self.fail("Upload is not fine")
- self.validate_uploaded_volume(getuploadparamsresponce.id, 'Uploaded')
+ self.validate_uploaded_volume(getuploadparamsresponse.id, 'Uploaded')
- return (getuploadparamsresponce)
+ return (getuploadparamsresponse)
def validate_vm(self, vmdetails, vmstate):
@@ -1603,12 +1603,12 @@ class TestBrowseUploadVolume(cloudstackTestCase):
return
- def uploadvol(self, getuploadparamsresponce):
+ def uploadvol(self, getuploadparamsresponse):
- signt = getuploadparamsresponce.signature
- posturl = getuploadparamsresponce.postURL
- metadata = getuploadparamsresponce.metadata
- expiredata = getuploadparamsresponce.expires
+ signt = getuploadparamsresponse.signature
+ posturl = getuploadparamsresponse.postURL
+ metadata = getuploadparamsresponse.metadata
+ expiredata = getuploadparamsresponse.expires
success = False
url = self.uploadurl
@@ -1627,7 +1627,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
results = requests.post(posturl, files=files, headers=headers, verify=False)
list_volume_response = Volume.list(
self.apiclient,
- id=getuploadparamsresponce.id
+ id=getuploadparamsresponse.id
)
self.debug("======================Before SSVM Reboot==================")
@@ -1642,21 +1642,21 @@ class TestBrowseUploadVolume(cloudstackTestCase):
uploadtimeout = int(config[0].value)
time.sleep(uploadtimeout * 60)
- self.validate_uploaded_volume(getuploadparamsresponce.id, 'UploadAbandoned')
+ self.validate_uploaded_volume(getuploadparamsresponse.id, 'UploadAbandoned')
return ()
- def uploadvolwithssvmreboot(self, getuploadparamsresponce):
+ def uploadvolwithssvmreboot(self, getuploadparamsresponse):
- signt = getuploadparamsresponce.signature
- posturl = getuploadparamsresponce.postURL
- metadata = getuploadparamsresponce.metadata
- expiredata = getuploadparamsresponce.expires
+ signt = getuploadparamsresponse.signature
+ posturl = getuploadparamsresponse.postURL
+ metadata = getuploadparamsresponse.metadata
+ expiredata = getuploadparamsresponse.expires
self.debug("======================Before SSVM Reboot==================")
list_volume_response = Volume.list(
self.apiclient,
- id=getuploadparamsresponce.id
+ id=getuploadparamsresponse.id
)
self.debug(list_volume_response[0])
@@ -1680,13 +1680,13 @@ class TestBrowseUploadVolume(cloudstackTestCase):
results = requests.post(posturl, files=files, headers=headers, verify=False)
list_volume_response = Volume.list(
self.apiclient,
- id=getuploadparamsresponce.id
+ id=getuploadparamsresponse.id
)
self.debug("======================Upload After SSVM Reboot==================")
self.debug(list_volume_response[0])
- self.validate_uploaded_volume(getuploadparamsresponce.id, 'Uploaded')
+ self.validate_uploaded_volume(getuploadparamsresponse.id, 'Uploaded')
return ()
@@ -1699,13 +1699,13 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.account = self.account.name
cmd.domainid = self.domain.id
cmd.diskofferingid = self.disk_offering.id
- getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
+ getuploadparamsresponse = self.apiclient.getUploadParamsForVolume(cmd)
- signt = getuploadparamsresponce.signature
- posturl = getuploadparamsresponce.postURL
- metadata = getuploadparamsresponce.metadata
- expiredata = getuploadparamsresponce.expires
- self.globalurl = getuploadparamsresponce.postURL
+ signt = getuploadparamsresponse.signature
+ posturl = getuploadparamsresponse.postURL
+ metadata = getuploadparamsresponse.metadata
+ expiredata = getuploadparamsresponse.expires
+ self.globalurl = getuploadparamsresponse.postURL
# url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
url = self.uploadurl
@@ -1727,7 +1727,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
if results.status_code != 200:
self.fail("Upload is not fine")
- self.validate_uploaded_volume(getuploadparamsresponce.id, 'Uploaded')
+ self.validate_uploaded_volume(getuploadparamsresponse.id, 'Uploaded')
def uploadwithimagestoreid(self):
@@ -1742,13 +1742,13 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.account = self.account.name
cmd.domainid = self.domain.id
cmd.imagestoreuuid = sscmdresponse[0].id
- getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
+ getuploadparamsresponse = self.apiclient.getUploadParamsForVolume(cmd)
- signt = getuploadparamsresponce.signature
- posturl = getuploadparamsresponce.postURL
- metadata = getuploadparamsresponce.metadata
- expiredata = getuploadparamsresponce.expires
- self.globalurl = getuploadparamsresponce.postURL
+ signt = getuploadparamsresponse.signature
+ posturl = getuploadparamsresponse.postURL
+ metadata = getuploadparamsresponse.metadata
+ expiredata = getuploadparamsresponse.expires
+ self.globalurl = getuploadparamsresponse.postURL
# url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
url = self.uploadurl
@@ -1770,7 +1770,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
if results.status_code != 200:
self.fail("Upload is not fine")
- self.validate_uploaded_volume(getuploadparamsresponce.id, 'Uploaded')
+ self.validate_uploaded_volume(getuploadparamsresponse.id, 'Uploaded')
def uploadwithsamedisplaytext(self, voldetails):
@@ -1786,10 +1786,10 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.name = list_volume_response[0].name
cmd.account = self.account.name
cmd.domainid = self.domain.id
- getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
+ getuploadparamsresponse = self.apiclient.getUploadParamsForVolume(cmd)
list_volume_response1 = Volume.list(
self.apiclient,
- id=getuploadparamsresponce.id
+ id=getuploadparamsresponse.id
)
if list_volume_response1[0].name == voldetails.name:
success = False
@@ -1849,12 +1849,12 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.account = self.account.name
cmd.domainid = self.domain.id
cmd.diskofferingid = self.disk_offering.id
- getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
+ getuploadparamsresponse = self.apiclient.getUploadParamsForVolume(cmd)
- signt = getuploadparamsresponce.signature
- posturl = getuploadparamsresponce.postURL
- metadata = getuploadparamsresponce.metadata
- expiredata = getuploadparamsresponce.expires
+ signt = getuploadparamsresponse.signature
+ posturl = getuploadparamsresponse.postURL
+ metadata = getuploadparamsresponse.metadata
+ expiredata = getuploadparamsresponse.expires
# url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
url = self.extuploadurl
@@ -1877,18 +1877,18 @@ class TestBrowseUploadVolume(cloudstackTestCase):
if results.status_code != 200:
self.fail("Upload is not fine")
- self.validate_uploaded_volume(getuploadparamsresponce.id, 'Uploaded')
+ self.validate_uploaded_volume(getuploadparamsresponse.id, 'Uploaded')
- def posturlwithdeletedvolume(self, getuploadparamsresponce):
+ def posturlwithdeletedvolume(self, getuploadparamsresponse):
- signt = getuploadparamsresponce.signature
- posturl = getuploadparamsresponce.postURL
- metadata = getuploadparamsresponce.metadata
- expiredata = getuploadparamsresponce.expires
- self.validate_uploaded_volume(getuploadparamsresponce.id, 'UploadAbandoned')
+ signt = getuploadparamsresponse.signature
+ posturl = getuploadparamsresponse.postURL
+ metadata = getuploadparamsresponse.metadata
+ expiredata = getuploadparamsresponse.expires
+ self.validate_uploaded_volume(getuploadparamsresponse.id, 'UploadAbandoned')
cmd = deleteVolume.deleteVolumeCmd()
- cmd.id = getuploadparamsresponce.id
+ cmd.id = getuploadparamsresponse.id
self.apiclient.delete_volume(cmd)
@@ -1954,11 +1954,11 @@ class TestBrowseUploadVolume(cloudstackTestCase):
def getvolumelimts(self):
- totalresoucelist = Account.list(
+ totalresourcelist = Account.list(
self.apiclient,
id=self.account.id
)
- totalvolumes = totalresoucelist[0].volumetotal
+ totalvolumes = totalresourcelist[0].volumetotal
return (totalvolumes)
@@ -1969,9 +1969,9 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.domainid = self.domain.id
cmd.resourcetype = rtype
- responce = self.apiclient.updateResourceCount(cmd)
+ response = self.apiclient.updateResourceCount(cmd)
- totalstorage = responce[0].resourcecount
+ totalstorage = response[0].resourcecount
return (totalstorage)
@@ -2315,13 +2315,13 @@ class TestBrowseUploadVolume(cloudstackTestCase):
return
@attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
- def test_06_Browser_Upload_Volume_with_extended_file_extenstions(self):
+ def test_06_Browser_Upload_Volume_with_extended_file_extensions(self):
"""
- Test Browser_Upload_Volume_with_extended_file_extenstions
+ Test Browser_Upload_Volume_with_extended_file_extensions
"""
try:
- self.debug("========================= Test 35 Upload volume with extended file extenstions=========================")
+ self.debug("========================= Test 35 Upload volume with extended file extensions=========================")
if self.uploadvolumeformat == "OVA":
raise unittest.SkipTest("This test is need not be executed on VMWARE")
self.uploadwithextendedfileextentions()
@@ -2484,7 +2484,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
afteruploadvolumelimit = self.getvolumelimts()
if int(afteruploadvolumelimit) != (int(initialvolumelimit) + 1):
- self.fail("Volume Resouce Count is not updated")
+ self.fail("Volume Resource Count is not updated")
cmd = deleteVolume.deleteVolumeCmd()
cmd.id = deleted_browse_up_vol1.id
@@ -2512,7 +2512,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
afteruploadsecondarystoragelimit = self.getstoragelimts(11)
if afteruploadsecondarystoragelimit != (initialsecondarystoragelimit + volumedetails[0].size):
- self.fail("Secondary Storage Resouce Count is not updated")
+ self.fail("Secondary Storage Resource Count is not updated")
cmd = deleteVolume.deleteVolumeCmd()
cmd.id = deleted_browse_up_vol1.id
@@ -2592,7 +2592,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
afteruploadsecondarystoragelimit = self.getstoragelimts(11)
if afteruploadsecondarystoragelimit != (initialuploadsecondarystoragelimit - volumedetails[0].size):
- self.fail("Secondary Storage Resouce Count is not updated after deletion")
+ self.fail("Secondary Storage Resource Count is not updated after deletion")
except Exception as e:
self.fail("Exception occurred : %s" % e)
diff --git a/test/integration/component/test_deploy_vgpu_vm.py b/test/integration/component/test_deploy_vgpu_vm.py
index fb99e877a51..b17adefc22f 100644
--- a/test/integration/component/test_deploy_vgpu_vm.py
+++ b/test/integration/component/test_deploy_vgpu_vm.py
@@ -509,7 +509,7 @@ class TestvGPUWindowsVm(cloudstackTestCase):
self.assertNotEqual(
len(list_vm_response),
0,
- "Check VM avaliable in List Virtual Machines"
+ "Check VM available in List Virtual Machines"
)
self.assertEqual(
diff --git a/test/integration/component/test_dynamic_compute_offering.py b/test/integration/component/test_dynamic_compute_offering.py
index 45ebb401e1d..ed754214739 100644
--- a/test/integration/component/test_dynamic_compute_offering.py
+++ b/test/integration/component/test_dynamic_compute_offering.py
@@ -185,7 +185,7 @@ class TestDynamicServiceOffering(cloudstackTestCase):
self.services["service_offering"])
self.cleanup_co.append(serviceOffering)
self.fail(
- "Compute Offering creation succeded, it should have failed")
+ "Compute Offering creation succeeded, it should have failed")
except Exception:
self.debug("Compute Offering Creation failed as expected")
return
@@ -207,7 +207,7 @@ class TestDynamicServiceOffering(cloudstackTestCase):
self.services["service_offering"])
self.cleanup_co.append(serviceOffering)
self.fail(
- "Compute Offering creation succeded, it should have failed")
+ "Compute Offering creation succeeded, it should have failed")
except Exception:
self.debug("Compute Offering Creation failed as expected")
return
@@ -229,7 +229,7 @@ class TestDynamicServiceOffering(cloudstackTestCase):
self.services["service_offering"])
self.cleanup_co.append(serviceOffering)
self.fail(
- "Compute Offering creation succeded, it should have failed")
+ "Compute Offering creation succeeded, it should have failed")
except Exception:
self.debug("Compute Offering Creation failed as expected")
return
@@ -360,7 +360,7 @@ class TestDynamicServiceOffering(cloudstackTestCase):
serviceofferingid=serviceOffering.id,
accountid=self.account.name,
domainid=self.account.domainid)
- self.fail("VM creation succeded, it should have failed")
+ self.fail("VM creation succeeded, it should have failed")
except Exception as e:
self.debug("vm creation failed as expected with error: %s" % e)
@@ -389,7 +389,7 @@ class TestDynamicServiceOffering(cloudstackTestCase):
customcpunumber=2,
accountid=self.account.name,
domainid=self.account.domainid)
- self.fail("VM deployment should have failed, it succeded")
+ self.fail("VM deployment should have failed, it succeeded")
except Exception as e:
self.debug("vm creation failed as expected: %s" % e)
return
@@ -1058,7 +1058,7 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
customcpunumber=4)
self.fail(
"Changing service offering with incomplete data should\
- have failed, it succeded")
+ have failed, it succeeded")
except Exception as e:
self.debug(
"Failure while changing service offering as expected: %s" %
@@ -1242,7 +1242,7 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
serviceOfferingId=serviceOffering_dynamic_1.id,
customcpunumber=4)
self.fail(
- "Changing service offering should have failed, it succeded")
+ "Changing service offering should have failed, it succeeded")
except Exception as e:
self.debug("Failure while changing service offering: %s" % e)
@@ -1443,7 +1443,7 @@ class TestAccountLimits(cloudstackTestCase):
customcpuspeed=512,
custommemory=256)
self.fail("Scaling virtual machine with cpu number more than \
- allowed limit (of account) succeded, should have failed")
+ allowed limit (of account) succeeded, should have failed")
except Exception as e:
self.debug(
"Failure while changing service offering as expected: %s" %
@@ -1581,7 +1581,7 @@ class TestAccountLimits(cloudstackTestCase):
customcpuspeed=512,
custommemory=512)
self.fail("Scaling virtual machine with cpu number more than \
- allowed limit (of account) succeded, should have failed")
+ allowed limit (of account) succeeded, should have failed")
except Exception as e:
self.debug(
"Failure while changing service offering as expected: %s" %
@@ -1746,7 +1746,7 @@ class TestAffinityGroup(cloudstackTestCase):
custommemory=128,
affinitygroupnames=[
affinityGroup.name])
- self.fail("vm creation should have failed, it succeded")
+ self.fail("vm creation should have failed, it succeeded")
except Exception as e:
self.debug("vm creation failed as expected with error: %s" % e)
diff --git a/test/integration/component/test_haproxy.py b/test/integration/component/test_haproxy.py
index e17fbcf30f5..961864c6e62 100644
--- a/test/integration/component/test_haproxy.py
+++ b/test/integration/component/test_haproxy.py
@@ -289,7 +289,7 @@ class TestHAProxyStickyness(cloudstackTestCase):
domainid=self.account.domainid,
listall=True)
self.assertIsInstance(routers, list,
- "List routers should return a valid repsonse")
+ "List routers should return a valid response")
return routers[0]
def create_VPN(self, public_ip):
diff --git a/test/integration/component/test_multiple_ips_per_nic.py b/test/integration/component/test_multiple_ips_per_nic.py
index fc75b552999..85cbd438952 100644
--- a/test/integration/component/test_multiple_ips_per_nic.py
+++ b/test/integration/component/test_multiple_ips_per_nic.py
@@ -340,7 +340,7 @@ class TestBasicOperations(cloudstackTestCase):
self.apiclient, id=(
virtual_machine.nic[0].id + random_gen()))
self.fail(
- "Adding secondary IP with wrong NIC id succeded, it shoud have failed")
+ "Adding secondary IP with wrong NIC id succeeded, it shoud have failed")
except Exception as e:
self.debug("Failed while adding secondary IP to wrong NIC")
@@ -350,7 +350,7 @@ class TestBasicOperations(cloudstackTestCase):
id=virtual_machine.nic[0].id,
ipaddress="255.255.255.300")
self.fail(
- "Adding secondary IP with wrong ipaddress succeded, it should have failed")
+ "Adding secondary IP with wrong ipaddress succeeded, it should have failed")
except Exception as e:
self.debug(
"Failed while adding wrong secondary IP to NIC of VM %s: %s" %
diff --git a/test/integration/component/test_organization_states.py b/test/integration/component/test_organization_states.py
index a84f59d9f7c..0d9afd3144a 100644
--- a/test/integration/component/test_organization_states.py
+++ b/test/integration/component/test_organization_states.py
@@ -240,7 +240,7 @@ class TestOrganizationStates(cloudstackTestCase):
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_16_enableZone(self):
"""
- Enable Zone that is diabled
+ Enable Zone that is disabled
Validate that listZones() returns the allocationstate as "Enabled"
"""
self.debug("Zone to be enabled: " + self.zone.id)
@@ -354,7 +354,7 @@ class TestOrganizationStates(cloudstackTestCase):
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_26_enablePod(self):
"""
- Enable Pod that is diabled
+ Enable Pod that is disabled
Validate that listPods() returns the allocationstate as "Enabled"
"""
self.debug("Pod to be enabled: " + self.zone.id)
@@ -470,7 +470,7 @@ class TestOrganizationStates(cloudstackTestCase):
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_36_enableCluster(self):
"""
- Enable Cluster that is diabled
+ Enable Cluster that is disabled
Validate that listClusters() returns the allocationstate as "Enabled"
"""
self.debug("Cluster to be enabled: " + self.cluster.id)
@@ -612,7 +612,7 @@ class TestOrganizationStates(cloudstackTestCase):
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_47_enableHost(self):
"""
- Enable Host that is diabled
+ Enable Host that is disabled
Validate that listHosts() returns the allocationstate as "Enabled"
"""
self.debug("Host to be enabled: " + self.host.id)
diff --git a/test/integration/component/test_stopped_vm.py b/test/integration/component/test_stopped_vm.py
index 2b3a8a6227d..b7fe4b04b04 100644
--- a/test/integration/component/test_stopped_vm.py
+++ b/test/integration/component/test_stopped_vm.py
@@ -1701,7 +1701,7 @@ class TestDeployOnSpecificHost(cloudstackTestCase):
except Exception as e:
self.fail("Deploy VM failed with exception: %s" % e)
- self.debug("Cheking the state of deployed VM")
+ self.debug("Checking the state of deployed VM")
vms = VirtualMachine.list(
self.apiclient,
id=vm.id,
diff --git a/test/integration/component/test_template_from_snapshot_with_template_details.py b/test/integration/component/test_template_from_snapshot_with_template_details.py
index 6b03860319a..ce7ae006ec8 100644
--- a/test/integration/component/test_template_from_snapshot_with_template_details.py
+++ b/test/integration/component/test_template_from_snapshot_with_template_details.py
@@ -334,7 +334,7 @@ class TestCreateTemplate(cloudstackTestCase):
self.assertNotEqual(
len(list_vm_response),
0,
- "Check VM avaliable in List Virtual Machines"
+ "Check VM available in List Virtual Machines"
)
template = Template.create_from_snapshot(
diff --git a/test/integration/component/test_vpc_offerings.py b/test/integration/component/test_vpc_offerings.py
index c5f594b34d9..064c68c9878 100644
--- a/test/integration/component/test_vpc_offerings.py
+++ b/test/integration/component/test_vpc_offerings.py
@@ -960,7 +960,7 @@ class TestVPCOffering(cloudstackTestCase):
except Exception as e:
self.fail("Failed to update VPC offering- %s" % e)
- self.logger.debug("Cheking if the changes are reflected to listVPC call?")
+ self.logger.debug("Checking if the changes are reflected to listVPC call?")
vpc_offs = vpc_off.list(
self.apiclient,
id=vpc_off.id,
@@ -1045,7 +1045,7 @@ class TestVPCOffering(cloudstackTestCase):
self.logger.debug("Deleting the VPC offering: %s" % vpc_off_4.name)
vpc_off_4.delete(self.apiclient)
- self.logger.debug("Cheking if listVPCOff return the deleted VPC off")
+ self.logger.debug("Checking if listVPCOff return the deleted VPC off")
vpc_offs = VpcOffering.list(
self.apiclient,
id=vpc_off_4.id,
@@ -1057,7 +1057,7 @@ class TestVPCOffering(cloudstackTestCase):
"List VPC offerings should nt return any response for deleted offering"
)
- self.logger.debug("Validating the listVPCOfferings repsonse by ids")
+ self.logger.debug("Validating the listVPCOfferings response by ids")
self.validate_vpc_offering(vpc_off_3)
self.logger.debug("ListVPCOfferings by displaytext & verifying the response")
diff --git a/test/integration/smoke/test_accounts.py b/test/integration/smoke/test_accounts.py
index 1cb590b7f6b..7daa8f8ffd4 100644
--- a/test/integration/smoke/test_accounts.py
+++ b/test/integration/smoke/test_accounts.py
@@ -1420,16 +1420,16 @@ class TestUserLogin(cloudstackTestCase):
self.cleanup.append(self.account)
self.debug("Logging into the cloudstack with login API")
- respose = User.login(
+ response = User.login(
self.apiclient,
username=self.account.name,
password=self.services["account"]["password"]
)
- self.debug("Login API response: %s" % respose)
+ self.debug("Login API response: %s" % response)
self.assertNotEqual(
- respose.sessionkey,
+ response.sessionkey,
None,
"Login to the CloudStack should be successful" +
"response shall have non Null key"
@@ -1574,15 +1574,15 @@ class TestUserLogin(cloudstackTestCase):
)
self.debug("Logging into the cloudstack with login API")
- respose = User.login(
+ response = User.login(
self.apiclient,
username=self.account.name,
password=self.services["account"]["password"],
domainid=domain.id)
- self.debug("Login API response: %s" % respose)
+ self.debug("Login API response: %s" % response)
self.assertNotEqual(
- respose.sessionkey,
+ response.sessionkey,
None,
"Login to the CloudStack should be successful" +
"response shall have non Null key"
diff --git a/test/integration/smoke/test_vm_life_cycle.py b/test/integration/smoke/test_vm_life_cycle.py
index 0581a8b67c9..012ec277613 100644
--- a/test/integration/smoke/test_vm_life_cycle.py
+++ b/test/integration/smoke/test_vm_life_cycle.py
@@ -406,7 +406,7 @@ class TestVMLifeCycle(cloudstackTestCase):
self.assertNotEqual(
len(list_vm_response),
0,
- "Check VM avaliable in List Virtual Machines"
+ "Check VM available in List Virtual Machines"
)
self.assertEqual(
@@ -440,7 +440,7 @@ class TestVMLifeCycle(cloudstackTestCase):
self.assertNotEqual(
len(list_vm_response),
0,
- "Check VM avaliable in List Virtual Machines"
+ "Check VM available in List Virtual Machines"
)
self.debug(
@@ -550,7 +550,7 @@ class TestVMLifeCycle(cloudstackTestCase):
self.assertNotEqual(
len(list_vm_response),
0,
- "Check VM avaliable in List Virtual Machines"
+ "Check VM available in List Virtual Machines"
)
self.assertEqual(
@@ -590,7 +590,7 @@ class TestVMLifeCycle(cloudstackTestCase):
self.assertNotEqual(
len(list_vm_response),
0,
- "Check VM avaliable in List Virtual Machines"
+ "Check VM available in List Virtual Machines"
)
self.assertEqual(
diff --git a/test/integration/smoke/test_vm_snapshots.py b/test/integration/smoke/test_vm_snapshots.py
index b8590bc77a0..07779e78c58 100644
--- a/test/integration/smoke/test_vm_snapshots.py
+++ b/test/integration/smoke/test_vm_snapshots.py
@@ -513,7 +513,7 @@ class TestChangeServiceOfferingForVmWithSnapshots(cloudstackTestCase):
self.assertNotEqual(
len(list_vm_response),
0,
- "Check VM avaliable in List Virtual Machines"
+ "Check VM available in List Virtual Machines"
)
self.assertEqual(
list_vm_response[0].state,
diff --git a/test/integration/testpaths/testpath_stopped_vm.py b/test/integration/testpaths/testpath_stopped_vm.py
index da37567ff14..f495317ed03 100644
--- a/test/integration/testpaths/testpath_stopped_vm.py
+++ b/test/integration/testpaths/testpath_stopped_vm.py
@@ -201,13 +201,13 @@ class TestAdvancedZoneStoppedVM(cloudstackTestCase):
# Set Zones and disk offerings
# Check that we are able to login to the created account
- respose = User.login(
+ response = User.login(
cls.apiclient,
username=cls.account.name,
password=cls.testdata["account"]["password"]
)
- assert respose.sessionkey is not None,\
+ assert response.sessionkey is not None,\
"Login to the CloudStack should be successful\
response shall have non Null key"
diff --git a/test/integration/testpaths/testpath_usage.py b/test/integration/testpaths/testpath_usage.py
index 9d04ffaf09f..3774bf8471c 100644
--- a/test/integration/testpaths/testpath_usage.py
+++ b/test/integration/testpaths/testpath_usage.py
@@ -1961,11 +1961,11 @@ class TestUsage(cloudstackTestCase):
# 3. Create LB rule for the IP address and verify LB rule usage
is generated for the account
# 4. Create another LB rule with different ports and verify
- seperate usage is generated for new LB rule
+ separate usage is generated for new LB rule
# 5. Create egress firewall rule for VM and SSH to VM
# 6. Ping external network from the VM and verify that
- network byte usage is genrated correctly
+ network byte usage is generated correctly
# 7. Delete one LB rule and verify that the usage
is stopped for the LB rule
# 8. Stop the network router and
@@ -2394,19 +2394,19 @@ class TestUsage(cloudstackTestCase):
"""
Steps:
# 1. Add VM in VPC network, verify that
- # usage is genrated for source nat ip pf network in vpc
+ # usage is generated for source nat ip pf network in vpc
# 2. Acquire a public ip in VPC network and verify
- usage is genrated for the public ip
+ usage is generated for the public ip
# 3. Create multiple PF rule on this ip in VPC network,
and verify that usage is generated for both pf rules
# 4. Enable vpn on source nat ip in vpc network
# 5. Add 2 vpn user
- And verify that usage is genrated for both the vpn users
+ And verify that usage is generated for both the vpn users
# 6. Delete one VPn user, and verify that usage is stopped
for deleted user
# 7. Open Egress rules on this VPC network
# 8. Create network traffic on this network ping www.google.com,
- and verify that usage is genrated for network traffic
+ and verify that usage is generated for network traffic
# 9. Delete onePF rule in VPC network
And verify that usage is stopped for the pf rule
# 10. Stop router for VPC network
diff --git a/test/integration/testpaths/testpath_vmlc.py b/test/integration/testpaths/testpath_vmlc.py
index f6376277552..0ae70c8e4bd 100755
--- a/test/integration/testpaths/testpath_vmlc.py
+++ b/test/integration/testpaths/testpath_vmlc.py
@@ -276,13 +276,13 @@ class TestPathVMLC(cloudstackTestCase):
cls.template.download(cls.apiclient)
# Check that we are able to login to the created account
- respose = User.login(
+ response = User.login(
cls.apiclient,
username=cls.account.name,
password=cls.testdata["account"]["password"]
)
- assert respose.sessionkey is not None,\
+ assert response.sessionkey is not None,\
"Login to the CloudStack should be successful\
response shall have non Null key"
@@ -339,7 +339,7 @@ class TestPathVMLC(cloudstackTestCase):
# 14. Verify VM accessibility on new host
"""
if self.hypervisor.lower() in ['hyperv', 'lxc'] and value == VPC_NETWORK:
- self.skipTest("cann't be run for {} hypervisor".format(self.hypervisor))
+ self.skipTest("can't be run for {} hypervisor".format(self.hypervisor))
# List created service offering in setUpClass by name
listServiceOfferings = ServiceOffering.list(
@@ -740,7 +740,7 @@ class TestPathVMLC(cloudstackTestCase):
# 5. Try to reboot the VM in destroyed state, operation should fail
"""
if self.hypervisor.lower() in ['hyperv', 'lxc'] and value == VPC_NETWORK:
- self.skipTest("cann't be run for {} hypervisor".format(self.hypervisor))
+ self.skipTest("can't be run for {} hypervisor".format(self.hypervisor))
network = CreateNetwork(self, value)
networkid = network.id
@@ -839,7 +839,7 @@ class TestPathVMLC(cloudstackTestCase):
"""
if self.hypervisor.lower() in ['hyperv', 'lxc'] and value == VPC_NETWORK:
- self.skipTest("cann't be run for {} hypervisor".format(self.hypervisor))
+ self.skipTest("can't be run for {} hypervisor".format(self.hypervisor))
network = CreateNetwork(self, value)
networkid = network.id
diff --git a/tools/marvin/marvin/sandbox/demo/simulator/testcase/test_vm_life_cycle.py b/tools/marvin/marvin/sandbox/demo/simulator/testcase/test_vm_life_cycle.py
index 28c4c3513a8..b94174ec500 100644
--- a/tools/marvin/marvin/sandbox/demo/simulator/testcase/test_vm_life_cycle.py
+++ b/tools/marvin/marvin/sandbox/demo/simulator/testcase/test_vm_life_cycle.py
@@ -358,7 +358,7 @@ class TestVMLifeCycle(cloudstackTestCase):
self.assertNotEqual(
len(list_vm_response),
0,
- "Check VM avaliable in List Virtual Machines"
+ "Check VM available in List Virtual Machines"
)
self.debug(
@@ -474,7 +474,7 @@ class TestVMLifeCycle(cloudstackTestCase):
self.assertNotEqual(
len(list_vm_response),
0,
- "Check VM avaliable in List Virtual Machines"
+ "Check VM available in List Virtual Machines"
)
self.assertEqual(
@@ -512,7 +512,7 @@ class TestVMLifeCycle(cloudstackTestCase):
self.assertNotEqual(
len(list_vm_response),
0,
- "Check VM avaliable in List Virtual Machines"
+ "Check VM available in List Virtual Machines"
)
self.assertEqual(
diff --git a/tools/ngui/static/js/lib/jquery-1.7.2.js b/tools/ngui/static/js/lib/jquery-1.7.2.js
index 877f0b2e9e1..0226b700644 100644
--- a/tools/ngui/static/js/lib/jquery-1.7.2.js
+++ b/tools/ngui/static/js/lib/jquery-1.7.2.js
@@ -2338,7 +2338,7 @@ jQuery.fn.extend({
classNames = value.split( rspace );
while ( (className = classNames[ i++ ]) ) {
- // check each className given, space seperated list
+ // check each className given, space separated list
state = isBool ? state : !self.hasClass( className );
self[ state ? "addClass" : "removeClass" ]( className );
}
diff --git a/ui/docs/development.md b/ui/docs/development.md
index 1693307238d..3ace7a60d97 100644
--- a/ui/docs/development.md
+++ b/ui/docs/development.md
@@ -177,7 +177,7 @@ The columns that should be made available while displaying the list of
or a function in case we need to selectively (i.e., based on certain
conditions) restrict the view of certain columns.
-It also contains router-links to the resouce and other related data such as the
+It also contains router-links to the resource and other related data such as the
account, domain, etc of the resource if present
For example:
diff --git a/ui/public/locales/ja_JP.json b/ui/public/locales/ja_JP.json
index 91f7eedd696..f376b80b3ba 100644
--- a/ui/public/locales/ja_JP.json
+++ b/ui/public/locales/ja_JP.json
@@ -2909,7 +2909,7 @@
"message.enabling.vpc.offering": "VPCオファリングを有効にしています",
"message.enabling.zone": "ゾーンを有効にしています",
"message.enabling.zone.dots": "ゾーンを有効にしています...",
- "message.enter.seperated.list.multiple.cidrs": "CIDRが複数ある場合は、コンマ区切りの一覧を入力してください",
+ "message.enter.separated.list.multiple.cidrs": "CIDRが複数ある場合は、コンマ区切りの一覧を入力してください",
"message.enter.token": "メールの招待状に記載されているトークンを入力してください。",
"message.enter.valid.nic.ip": "NICの有効なIPアドレスを入力してください",
"message.error.access.key": "アクセスキーを入力してください",
diff --git a/ui/src/views/compute/DeployVM.vue b/ui/src/views/compute/DeployVM.vue
index e604a438b2e..01f38f8cc32 100644
--- a/ui/src/views/compute/DeployVM.vue
+++ b/ui/src/views/compute/DeployVM.vue
@@ -468,7 +468,7 @@
diff --git a/ui/src/views/network/LoadBalancing.vue b/ui/src/views/network/LoadBalancing.vue
index 9220f22bd2e..12246d8a039 100644
--- a/ui/src/views/network/LoadBalancing.vue
+++ b/ui/src/views/network/LoadBalancing.vue
@@ -1147,7 +1147,7 @@ export default {
errorMessage: this.$t('message.remove.rule.failed'),
errorMethod: () => {
if (this.selectedItems.length > 0) {
- eventBus.emit('update-resource-state', { selectedItems: this.selectedItems, resouce: rule.id, state: 'failed' })
+ eventBus.emit('update-resource-state', { selectedItems: this.selectedItems, resource: rule.id, state: 'failed' })
}
if (this.selectedRowKeys.length === 0) {
this.parentToggleLoading()
diff --git a/ui/src/views/plugins/quota/QuotaSummary.vue b/ui/src/views/plugins/quota/QuotaSummary.vue
index 17335b6face..5bfa72a8aba 100644
--- a/ui/src/views/plugins/quota/QuotaSummary.vue
+++ b/ui/src/views/plugins/quota/QuotaSummary.vue
@@ -20,7 +20,7 @@
@@ -47,7 +47,7 @@ export default {
}
},
computed: {
- isSummaryResouce () {
+ isSummaryResource () {
if (this.$route.path.startsWith('/quotasummary')) {
if (this.$route.query && 'quota' in this.$route.query && this.$route.query.quota) {
return true
diff --git a/utils/src/main/java/com/cloud/utils/cisco/n1kv/vsm/VsmResponse.java b/utils/src/main/java/com/cloud/utils/cisco/n1kv/vsm/VsmResponse.java
index eb7f0246575..c7ceffcd3d8 100644
--- a/utils/src/main/java/com/cloud/utils/cisco/n1kv/vsm/VsmResponse.java
+++ b/utils/src/main/java/com/cloud/utils/cisco/n1kv/vsm/VsmResponse.java
@@ -217,7 +217,7 @@ public abstract class VsmResponse {
LSSerializer lss = ls.createLSSerializer();
System.out.println(lss.writeToString(_docResponse));
} catch (ParserConfigurationException e) {
- s_logger.error("Error parsing the repsonse : " + e.toString());
+ s_logger.error("Error parsing the response : " + e.toString());
}
}
}