mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Fix spelling (#6272)
This commit is contained in:
parent
8a229baac5
commit
7d23a0a759
@ -457,7 +457,7 @@ public class Agent implements HandlerFactory, IAgentControl {
|
||||
try {
|
||||
link.send(request.toBytes());
|
||||
} catch (final ClosedChannelException e) {
|
||||
s_logger.warn("Unable to send reques: " + request.toString());
|
||||
s_logger.warn("Unable to send request: " + request.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -467,7 +467,7 @@ public class Agent implements HandlerFactory, IAgentControl {
|
||||
try {
|
||||
addr = InetAddress.getLocalHost();
|
||||
} catch (final UnknownHostException e) {
|
||||
s_logger.warn("unknow host? ", e);
|
||||
s_logger.warn("unknown host? ", e);
|
||||
throw new CloudRuntimeException("Cannot get local IP address");
|
||||
}
|
||||
|
||||
@ -954,7 +954,7 @@ public class Agent implements HandlerFactory, IAgentControl {
|
||||
try {
|
||||
_link.send(request.toBytes());
|
||||
} catch (final ClosedChannelException e) {
|
||||
s_logger.warn("Unable to post agent control reques: " + request.toString());
|
||||
s_logger.warn("Unable to post agent control request: " + request.toString());
|
||||
throw new AgentControlChannelException("Unable to post agent control request due to " + e.getMessage());
|
||||
}
|
||||
} else {
|
||||
|
||||
@ -362,7 +362,7 @@ public class AgentShell implements IAgentShell, Daemon {
|
||||
|
||||
s_logger.info("Agent started");
|
||||
} else {
|
||||
s_logger.error("Could not start the Agent because the absolut path of the \"log4j-cloud.xml\" file cannot be determined.");
|
||||
s_logger.error("Could not start the Agent because the absolute path of the \"log4j-cloud.xml\" file cannot be determined.");
|
||||
}
|
||||
|
||||
final Class<?> c = this.getClass();
|
||||
|
||||
@ -521,7 +521,7 @@ public class OVFHelper {
|
||||
public List<OVFNetworkTO> getNetPrerequisitesFromDocument(Document doc) throws InternalErrorException {
|
||||
if (doc == null) {
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("no document to parse; returning no prerequiste networks");
|
||||
s_logger.trace("no document to parse; returning no prerequisite networks");
|
||||
}
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
@ -65,7 +65,7 @@ public interface LoadBalancingRulesService {
|
||||
* @param cmd
|
||||
* the command specifying the stickiness method name, params (name,value pairs), policy name and
|
||||
* description.
|
||||
* @return the newly created stickiness policy if successfull, null otherwise
|
||||
* @return the newly created stickiness policy if successful, null otherwise
|
||||
* @thows NetworkRuleConflictException
|
||||
*/
|
||||
public StickinessPolicy createLBStickinessPolicy(CreateLBStickinessPolicyCmd cmd) throws NetworkRuleConflictException;
|
||||
@ -81,7 +81,7 @@ public interface LoadBalancingRulesService {
|
||||
* @param cmd
|
||||
* the command specifying the stickiness method name, params
|
||||
* (name,value pairs), policy name and description.
|
||||
* @return the newly created stickiness policy if successfull, null
|
||||
* @return the newly created stickiness policy if successful, null
|
||||
* otherwise
|
||||
* @thows NetworkRuleConflictException
|
||||
*/
|
||||
|
||||
@ -56,7 +56,7 @@ public interface Volume extends ControlledEntity, Identity, InternalIdentity, Ba
|
||||
NotUploaded("The volume entry is just created in DB, not yet uploaded"),
|
||||
UploadInProgress("Volume upload is in progress"),
|
||||
UploadError("Volume upload encountered some error"),
|
||||
UploadAbandoned("Volume upload is abandoned since the upload was never initiated within a specificed time"),
|
||||
UploadAbandoned("Volume upload is abandoned since the upload was never initiated within a specified time"),
|
||||
Attaching("The volume is attaching to a VM from Ready state.");
|
||||
|
||||
String _description;
|
||||
|
||||
@ -502,7 +502,7 @@ public interface UserVmService {
|
||||
String getVmUserData(long vmId);
|
||||
|
||||
/**
|
||||
* determin whether the uservm should be visible to the end user
|
||||
* determine whether the uservm should be visible to the end user
|
||||
* @return value of the display flag
|
||||
*/
|
||||
public boolean isDisplayResourceEnabled(Long vmId);
|
||||
|
||||
@ -69,7 +69,7 @@ public class ReleasePodIpCmdByAdmin extends BaseCmd {
|
||||
boolean result = _networkService.releasePodIp(this);
|
||||
if (result) {
|
||||
SuccessResponse response = new SuccessResponse(getCommandName());
|
||||
response.setDisplayText("IP is released sucessfully");
|
||||
response.setDisplayText("IP is released successfully");
|
||||
setResponseObject(response);
|
||||
} else {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to release Pod ip ");
|
||||
|
||||
@ -64,7 +64,7 @@ public class CreateStorageNetworkIpRangeCmd extends BaseAsyncCmd {
|
||||
|
||||
@Parameter(name = ApiConstants.VLAN,
|
||||
type = CommandType.INTEGER,
|
||||
description = "Optional. The vlan the ip range sits on, default to Null when it is not specificed which means you network is not on any Vlan. This is mainly for Vmware as other hypervisors can directly reterive bridge from pyhsical network traffic type table")
|
||||
description = "Optional. The vlan the ip range sits on, default to Null when it is not specified which means you network is not on any Vlan. This is mainly for Vmware as other hypervisors can directly reterive bridge from pyhsical network traffic type table")
|
||||
private Integer vlan;
|
||||
|
||||
@Parameter(name = ApiConstants.NETMASK, type = CommandType.STRING, required = true, description = "the netmask for storage network")
|
||||
|
||||
@ -126,7 +126,7 @@ public class AddUserToProjectCmd extends BaseAsyncCmd {
|
||||
|
||||
private void validateInput() {
|
||||
if (email == null && username == null) {
|
||||
throw new InvalidParameterValueException("Must specify atleast username");
|
||||
throw new InvalidParameterValueException("Must specify at least username");
|
||||
}
|
||||
if (email != null && username == null) {
|
||||
throw new InvalidParameterValueException("Must specify username for given email ID");
|
||||
|
||||
@ -37,7 +37,7 @@ public class QueryAsyncJobResultCmd extends BaseCmd {
|
||||
//////////////// API parameters /////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@Parameter(name = ApiConstants.JOB_ID, type = CommandType.UUID, entityType = AsyncJobResponse.class, required = true, description = "the ID of the asychronous job")
|
||||
@Parameter(name = ApiConstants.JOB_ID, type = CommandType.UUID, entityType = AsyncJobResponse.class, required = true, description = "the ID of the asynchronous job")
|
||||
private Long id;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@ -67,7 +67,7 @@ public class RemoveFromGlobalLoadBalancerRuleCmd extends BaseAsyncCmd {
|
||||
collectionType = CommandType.UUID,
|
||||
entityType = FirewallRuleResponse.class,
|
||||
required = true,
|
||||
description = "the list load balancer rules that will be assigned to gloabal load balancer rule")
|
||||
description = "the list load balancer rules that will be assigned to global load balancer rule")
|
||||
private List<Long> loadBalancerRulesIds;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@ -94,7 +94,7 @@ public class DeleteSecurityGroupCmd extends BaseCmd {
|
||||
}
|
||||
|
||||
if (id == null) {
|
||||
throw new InvalidParameterValueException("Either id or name parameter is requred by deleteSecurityGroup command");
|
||||
throw new InvalidParameterValueException("Either id or name parameter is required by deleteSecurityGroup command");
|
||||
}
|
||||
|
||||
return id;
|
||||
|
||||
@ -67,7 +67,7 @@ public class RestoreVMCmd extends BaseAsyncCmd implements UserCmd {
|
||||
|
||||
@Override
|
||||
public String getEventDescription() {
|
||||
return "Restore a VM to orignal template or specific snapshot";
|
||||
return "Restore a VM to original template or specific snapshot";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@ -55,7 +55,7 @@ public class LBStickinessPolicyResponse extends BaseResponse {
|
||||
@Param(description = "is policy for display to the regular user", since = "4.4", authorized = {RoleType.Admin})
|
||||
private Boolean forDisplay;
|
||||
|
||||
// FIXME : if prams with the same name exists more then once then value are concatinated with ":" as delimitor .
|
||||
// FIXME : if prams with the same name exists more then once then value are concatinated with ":" as delimiter .
|
||||
// Reason: Map does not support duplicate keys, need to look for the alernate data structure
|
||||
// Example: <params>{indirect=null, name=testcookie, nocache=null, domain=www.yahoo.com:www.google.com, postonly=null}</params>
|
||||
// in the above there are two domains with values www.yahoo.com and www.google.com
|
||||
|
||||
@ -55,7 +55,7 @@ public class SetFirewallRulesCommand extends NetworkElementCommand {
|
||||
*/
|
||||
if (fwTO.revoked()) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
/* This entry is added just to make sure atleast there will one entry in the list to get the ipaddress */
|
||||
/* This entry is added just to make sure at least there will one entry in the list to get the ipaddress */
|
||||
sb.append(fwTO.getSrcIp()).append(":reverted:0:0:0:0:").append(fwTO.getId()).append(":");
|
||||
String fwRuleEntry = sb.toString();
|
||||
toAdd.add(fwRuleEntry);
|
||||
|
||||
@ -60,7 +60,7 @@ public class SetNetworkACLCommand extends NetworkElementCommand {
|
||||
*/
|
||||
if (aclTO.revoked() == true) {
|
||||
final StringBuilder sb = new StringBuilder();
|
||||
/* This entry is added just to make sure atleast there will one entry in the list to get the ipaddress */
|
||||
/* This entry is added just to make sure at least there will one entry in the list to get the ipaddress */
|
||||
List<String> revertRuleItems = Arrays.asList("", "reverted", "0", "0", "0", "");
|
||||
sb.append(aclTO.getTrafficType().toString()).append(String.join(RULE_DETAIL_SEPARATOR, revertRuleItems));
|
||||
final String aclRuleEntry = sb.toString();
|
||||
|
||||
@ -719,7 +719,7 @@ public class Upgrade410to420 implements DbUpgrade {
|
||||
dcList = new ArrayList<String>();
|
||||
count = 0L;
|
||||
// Legacy zone term is meant only for VMware
|
||||
// Legacy zone is a zone with atleast 2 clusters & with multiple DCs or VCs
|
||||
// Legacy zone is a zone with at least 2 clusters & with multiple DCs or VCs
|
||||
clusters = clustersQuery.executeQuery();
|
||||
if (!clusters.next()) {
|
||||
continue; // Ignore the zone without any clusters
|
||||
|
||||
@ -213,25 +213,25 @@ class OvmStoragePool(OvmObject):
|
||||
def prepareOCFS2Nodes(clusterName, nodeString):
|
||||
def configureEtcHosts(nodes):
|
||||
if not exists(ETC_HOSTS):
|
||||
orignalConf = ""
|
||||
originalConf = ""
|
||||
else:
|
||||
fd = open(ETC_HOSTS, "r")
|
||||
orignalConf = fd.read()
|
||||
originalConf = fd.read()
|
||||
fd.close()
|
||||
|
||||
pattern = r"(.*%s.*)|(.*%s.*)"
|
||||
newlines = []
|
||||
for n in nodes:
|
||||
p = pattern % (n["ip_address"], n["name"])
|
||||
orignalConf = re.sub(p, "", orignalConf)
|
||||
originalConf = re.sub(p, "", originalConf)
|
||||
newlines.append("%s\t%s\n"%(n["ip_address"], n["name"]))
|
||||
|
||||
orignalConf = orignalConf + "".join(newlines)
|
||||
originalConf = originalConf + "".join(newlines)
|
||||
# remove extra empty lines
|
||||
orignalConf = re.sub(r"\n\s*\n*", "\n", orignalConf)
|
||||
logger.debug(OvmStoragePool.prepareOCFS2Nodes, "Configure /etc/hosts:%s\n"%orignalConf)
|
||||
originalConf = re.sub(r"\n\s*\n*", "\n", originalConf)
|
||||
logger.debug(OvmStoragePool.prepareOCFS2Nodes, "Configure /etc/hosts:%s\n"%originalConf)
|
||||
fd = open(ETC_HOSTS, "w")
|
||||
fd.write(orignalConf)
|
||||
fd.write(originalConf)
|
||||
fd.close()
|
||||
|
||||
def configureHostName(nodes):
|
||||
|
||||
@ -223,7 +223,7 @@ public class Ovm3VmSupport {
|
||||
xen.migrateVm(ovmObject.deDash(vm.getVmRootDiskPoolId()),
|
||||
vm.getVmUuid(), destIp);
|
||||
state = State.Stopping;
|
||||
msg = "Migration of " + vmName + " successfull";
|
||||
msg = "Migration of " + vmName + " successful";
|
||||
return new MigrateAnswer(cmd, true, msg, null);
|
||||
} catch (Ovm3ResourceException e) {
|
||||
msg = "Pooled VM Migrate" + ": Migration of " + vmName + " to "
|
||||
|
||||
@ -669,7 +669,7 @@ StaticNatServiceProvider, IpDeployer {
|
||||
boolean matchedEndChar = false;
|
||||
if (str.length() < 2)
|
||||
{
|
||||
return false; // atleast one numeric and one char. example:
|
||||
return false; // at least one numeric and one char. example:
|
||||
}
|
||||
// 3h
|
||||
final char strEnd = str.toCharArray()[str.length() - 1];
|
||||
|
||||
@ -792,7 +792,7 @@ public class ScaleIOGatewayClientImpl implements ScaleIOGatewayClient {
|
||||
long timeElapsedInSecs = (System.currentTimeMillis() - migrationStartTime) / 1000;
|
||||
int timeRemainingInSecs = (int) (timeoutInSecs - timeElapsedInSecs);
|
||||
if (timeRemainingInSecs > (timeoutInSecs / 2)) {
|
||||
// Try to pause gracefully (continue the migration) if atleast half of the time is remaining
|
||||
// Try to pause gracefully (continue the migration) if at least half of the time is remaining
|
||||
pauseVolumeMigration(srcVolumeId, false);
|
||||
status = waitForVolumeMigrationToComplete(volume.getVtreeId(), timeRemainingInSecs);
|
||||
}
|
||||
|
||||
@ -288,7 +288,7 @@ class LdapManagerImplSpec extends spock.lang.Specification {
|
||||
def ldapManager = new LdapManagerImpl(ldapConfigurationDao, ldapContextFactory, ldapUserManagerFactory, ldapConfiguration)
|
||||
when: "We search for users"
|
||||
def result = ldapManager.searchUsers("rmurphy");
|
||||
then: "A list of atleast 1 is returned"
|
||||
then: "A list of at least 1 is returned"
|
||||
result.size() > 0;
|
||||
}
|
||||
|
||||
@ -390,7 +390,7 @@ class LdapManagerImplSpec extends spock.lang.Specification {
|
||||
def ldapManager = new LdapManagerImpl(ldapConfigurationDao, ldapContextFactory, ldapUserManagerFactory, ldapConfiguration)
|
||||
when: "A request for configurations is made"
|
||||
def result = ldapManager.listConfigurations(new LdapListConfigurationCmd())
|
||||
then: "Then atleast 1 ldap configuration is returned"
|
||||
then: "Then at least 1 ldap configuration is returned"
|
||||
result.second() > 0
|
||||
}
|
||||
|
||||
|
||||
@ -55,7 +55,7 @@ class LdapSearchUserCmdSpec extends spock.lang.Specification {
|
||||
def ldapUserSearchCmd = new LdapUserSearchCmd(ldapManager)
|
||||
when: "The command is executed"
|
||||
ldapUserSearchCmd.execute()
|
||||
then: "A array with length of atleast 1 is returned"
|
||||
then: "A array with length of at least 1 is returned"
|
||||
ldapUserSearchCmd.responseObject.getResponses().size() > 0
|
||||
}
|
||||
|
||||
|
||||
@ -1563,7 +1563,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
|
||||
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = new HashMap<Volume, List<StoragePool>>();
|
||||
List<Volume> readyAndReusedVolumes = new ArrayList<Volume>();
|
||||
|
||||
// There should be atleast the ROOT volume of the VM in usable state
|
||||
// There should be at least the ROOT volume of the VM in usable state
|
||||
if (volumesTobeCreated.isEmpty()) {
|
||||
// OfflineVmwareMigration: find out what is wrong with the id of the vm we try to start
|
||||
throw new CloudRuntimeException("Unable to create deployment, no usable volumes found for the VM: " + vmProfile.getId());
|
||||
|
||||
@ -224,7 +224,7 @@ public class ActionEventUtils {
|
||||
eventDescription.put("status", state.toString());
|
||||
eventDescription.put("entity", resourceType);
|
||||
eventDescription.put("entityuuid", resourceUuid);
|
||||
//Put all the first class entities that are touched during the action. For now atleast put in the vmid.
|
||||
//Put all the first class entities that are touched during the action. For now at least put in the vmid.
|
||||
populateFirstClassEntities(eventDescription);
|
||||
eventDescription.put("description", description);
|
||||
|
||||
|
||||
@ -537,7 +537,7 @@ public class AutoScaleManagerImpl<Type> extends ManagerBase implements AutoScale
|
||||
for (ConditionVO condition : conditions) {
|
||||
if (counterIds.contains(condition.getCounterid())) {
|
||||
throw new InvalidParameterValueException(
|
||||
"atleast two conditions in the conditionids have the same counter. It is not right to apply two different conditions for the same counter");
|
||||
"at least two conditions in the conditionids have the same counter. It is not right to apply two different conditions for the same counter");
|
||||
}
|
||||
counterIds.add(condition.getCounterid());
|
||||
}
|
||||
|
||||
@ -503,7 +503,7 @@ public class RollingMaintenanceManagerImpl extends ManagerBase implements Rollin
|
||||
/**
|
||||
* Execute stage on host
|
||||
* @return tuple: (SUCCESS, DETAILS, AVOID_MAINTENANCE) where:
|
||||
* - SUCCESS: True if stage is successfull
|
||||
* - SUCCESS: True if stage is successful
|
||||
* - DETAILS: Information retrieved by the host after executing the stage
|
||||
* - AVOID_MAINTENANCE: True if maintenance stage must be avoided
|
||||
*/
|
||||
@ -519,7 +519,7 @@ public class RollingMaintenanceManagerImpl extends ManagerBase implements Rollin
|
||||
/**
|
||||
* Send rolling maintenance command to a host to perform a certain stage specified in cmd
|
||||
* @return tuple: (SUCCESS, DETAILS, AVOID_MAINTENANCE) where:
|
||||
* - SUCCESS: True if stage is successfull
|
||||
* - SUCCESS: True if stage is successful
|
||||
* - DETAILS: Information retrieved by the host after executing the stage
|
||||
* - AVOID_MAINTENANCE: True if maintenance stage must be avoided
|
||||
*/
|
||||
@ -617,7 +617,7 @@ public class RollingMaintenanceManagerImpl extends ManagerBase implements Rollin
|
||||
}
|
||||
List<String> hostTags = hostTagsDao.getHostTags(host.getId());
|
||||
|
||||
int sucessfullyCheckedVmMigrations = 0;
|
||||
int successfullyCheckedVmMigrations = 0;
|
||||
for (VMInstanceVO runningVM : vmsRunning) {
|
||||
boolean canMigrateVm = false;
|
||||
ServiceOfferingVO serviceOffering = serviceOfferingDao.findById(runningVM.getServiceOfferingId());
|
||||
@ -656,9 +656,9 @@ public class RollingMaintenanceManagerImpl extends ManagerBase implements Rollin
|
||||
s_logger.error(msg);
|
||||
return new Pair<>(false, msg);
|
||||
}
|
||||
sucessfullyCheckedVmMigrations++;
|
||||
successfullyCheckedVmMigrations++;
|
||||
}
|
||||
if (sucessfullyCheckedVmMigrations != vmsRunning.size()) {
|
||||
if (successfullyCheckedVmMigrations != vmsRunning.size()) {
|
||||
String migrationCheckDetails = String.format("%s cannot enter maintenance mode as capacity check failed for hosts in cluster %s", host, cluster);
|
||||
return new Pair<>(false, migrationCheckDetails);
|
||||
}
|
||||
|
||||
@ -409,7 +409,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR
|
||||
|
||||
success = true;
|
||||
} catch (ResourceUnavailableException e) {
|
||||
throw new CloudRuntimeException("Failed to update removed load balancer details from gloabal load balancer");
|
||||
throw new CloudRuntimeException("Failed to update removed load balancer details from global load balancer");
|
||||
}
|
||||
|
||||
return success;
|
||||
@ -479,7 +479,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR
|
||||
applyGlobalLoadBalancerRuleConfig(gslbRuleId, true);
|
||||
}
|
||||
} catch (ResourceUnavailableException e) {
|
||||
throw new CloudRuntimeException("Failed to update the gloabal load balancer");
|
||||
throw new CloudRuntimeException("Failed to update the global load balancer");
|
||||
}
|
||||
|
||||
Transaction.execute(new TransactionCallbackNoReturn() {
|
||||
|
||||
@ -250,7 +250,7 @@ public class NetworkACLManagerTest extends TestCase {
|
||||
|
||||
Mockito.verify(aclItem, Mockito.times(4)).getState();
|
||||
|
||||
assertTrue("Operation should be successfull!", result);
|
||||
assertTrue("Operation should be successful!", result);
|
||||
}
|
||||
|
||||
@Configuration
|
||||
|
||||
@ -229,7 +229,7 @@ public class Vnc33Authentication extends OneTimeSwitch {
|
||||
case RfbConstants.VNC_AUTH_OK: {
|
||||
// Nothing to do
|
||||
if (verbose)
|
||||
System.out.println("[" + this + "] INFO: Authentication successfull.");
|
||||
System.out.println("[" + this + "] INFO: Authentication successful.");
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@ -367,7 +367,7 @@ class TestRvRDeploymentPlanning(cloudstackTestCase):
|
||||
)
|
||||
enabled_pod = pods[0]
|
||||
|
||||
self.debug("Cheking if pod has atleast 2 clusters")
|
||||
self.debug("Cheking if pod has at least 2 clusters")
|
||||
clusters = Cluster.list(
|
||||
self.apiclient,
|
||||
podid=enabled_pod.id,
|
||||
@ -542,7 +542,7 @@ class TestRvRDeploymentPlanning(cloudstackTestCase):
|
||||
"""
|
||||
|
||||
# Steps to validate
|
||||
# 0. listStoragePools should have atleast two pools in a single
|
||||
# 0. listStoragePools should have at least two pools in a single
|
||||
# cluster (disable pods/clusters as necessary)
|
||||
# 1. create a network offering for redundant router
|
||||
# 2. create a network out of this offering
|
||||
@ -781,7 +781,7 @@ class TestRvRDeploymentPlanning(cloudstackTestCase):
|
||||
"""
|
||||
|
||||
# Steps to validate
|
||||
# 0. listHosts should have atleast two hosts in a single cluster
|
||||
# 0. listHosts should have at least two hosts in a single cluster
|
||||
# (disable pods/clusters as necessary)
|
||||
# 1. create a network offering for redundant router
|
||||
# 2. create a network out of this offering
|
||||
|
||||
@ -1284,7 +1284,7 @@ class TestPathDisableStorage_Cross_Cluster(cloudstackTestCase):
|
||||
"""
|
||||
# Tests in this path requires to be run independently (not to be run in parallel with any other tests \
|
||||
since it involves disabling/enabling storage pools and may cause unexpected failures in other tests
|
||||
# This test atleast 2 Clusters in the set up wiht suitable hosts for migration.
|
||||
# This test at least 2 Clusters in the set up wiht suitable hosts for migration.
|
||||
# For running the tests on local storage, ensure there are 2 local storage pools set up on each host
|
||||
|
||||
"""
|
||||
|
||||
@ -937,7 +937,7 @@ class TestStorageLiveMigrationVmware(cloudstackTestCase):
|
||||
storage_pool = []
|
||||
#if storage_scope == "across_cluster":
|
||||
if count_host < 2 or count_pool < 2:
|
||||
raise self.skipTest("The setup doesn't have enough pools or enough hosts. To run these tests the setup must have atleast 2 clusters, \
|
||||
raise self.skipTest("The setup doesn't have enough pools or enough hosts. To run these tests the setup must have at least 2 clusters, \
|
||||
each having min 1 host and 1 vmfs storage pools")
|
||||
|
||||
self.debug("---------------This is the test no 1--------------")
|
||||
@ -1220,7 +1220,7 @@ class TestStorageLiveMigrationVmware(cloudstackTestCase):
|
||||
pool_local = []
|
||||
if scope == "across_cluster":
|
||||
if count_host < 2:
|
||||
raise self.skipTest("The setup doesn't have enough pools or enough hosts. To run these tests the setup must have atleast 2 clusters, each having min 2 hosts ")
|
||||
raise self.skipTest("The setup doesn't have enough pools or enough hosts. To run these tests the setup must have at least 2 clusters, each having min 2 hosts ")
|
||||
|
||||
self.debug("---------------This is the test no 1--------------")
|
||||
"""
|
||||
@ -1449,7 +1449,7 @@ class TestStorageLiveMigrationVmware(cloudstackTestCase):
|
||||
if pool.type == storage_type:
|
||||
zwps_pools.append(pool)
|
||||
if len(zwps_pools) < 2:
|
||||
raise self.skipTest("The setup doesn't have enough zone wide primary storages of %s type, we need atleast 2" % storage_type)
|
||||
raise self.skipTest("The setup doesn't have enough zone wide primary storages of %s type, we need at least 2" % storage_type)
|
||||
|
||||
count_host = 0
|
||||
count_pool = 0
|
||||
@ -1470,7 +1470,7 @@ class TestStorageLiveMigrationVmware(cloudstackTestCase):
|
||||
pool_vmfs = []
|
||||
#if storage_scope == "across_cluster":
|
||||
if count_host < 2 | count_pool < 2:
|
||||
raise self.skipTest("The setup doesn't have enough pools or enough hosts. To run these tests the setup must have atleast 2 clusters, each having min 1 host and 1 vmfs storage pools")
|
||||
raise self.skipTest("The setup doesn't have enough pools or enough hosts. To run these tests the setup must have at least 2 clusters, each having min 1 host and 1 vmfs storage pools")
|
||||
|
||||
self.debug("---------------This is the test no 1--------------")
|
||||
"""
|
||||
@ -1934,9 +1934,9 @@ class TestStorageLiveMigrationVmware(cloudstackTestCase):
|
||||
elif pool.type == storage_type_nfs:
|
||||
zwps_nfs_pools.append(pool)
|
||||
if len(zwps_vmfs_pools) < 1:
|
||||
raise self.skipTest("The setup doesn't have enough zone wide primary storages of %s type, we need atleast 2" % storage_type_vmfs)
|
||||
raise self.skipTest("The setup doesn't have enough zone wide primary storages of %s type, we need at least 2" % storage_type_vmfs)
|
||||
if len(zwps_nfs_pools) < 1:
|
||||
raise self.skipTest("The setup doesn't have enough zone wide primary storages of %s type, we need atleast 2" % storage_type_nfs)
|
||||
raise self.skipTest("The setup doesn't have enough zone wide primary storages of %s type, we need at least 2" % storage_type_nfs)
|
||||
|
||||
count_host = 0
|
||||
count_pool_nfs = 0
|
||||
@ -1964,7 +1964,7 @@ class TestStorageLiveMigrationVmware(cloudstackTestCase):
|
||||
pool_nfs = []
|
||||
#if storage_scope == "across_cluster":
|
||||
if count_host < 2 or count_pool_vmfs < 2 or count_pool_nfs < 2:
|
||||
raise self.skipTest("The setup doesn't have enough pools or enough hosts. To run these tests the setup must have atleast 2 clusters, \
|
||||
raise self.skipTest("The setup doesn't have enough pools or enough hosts. To run these tests the setup must have at least 2 clusters, \
|
||||
each having min 2 host 2 vmfs storage pools and 2 nfs storage pools")
|
||||
|
||||
self.debug("---------------This is the test no 1--------------")
|
||||
|
||||
@ -926,7 +926,7 @@ class TestRemoveNetworkFromVirtualMachine(cloudstackTestCase):
|
||||
# 'deployVirtualMachine' api with 'ipaddress' as one of the parameters.
|
||||
# 4. Acquire public IP in n3 network.
|
||||
# 5. Configure PF on the acquired IP and assign it to vm v2
|
||||
# 6. Try to remove nic n2 from v1. Should be successfull
|
||||
# 6. Try to remove nic n2 from v1. Should be successful
|
||||
# There was a bug due to both vms has same ip address, so not allowing to remove nic
|
||||
|
||||
vm1 = self.virtual_machine
|
||||
|
||||
@ -599,7 +599,7 @@ class TestEgressFWRules(cloudstackTestCase):
|
||||
# 1. deploy VM.using network offering with egress policy true.
|
||||
# 2. create egress rule without specific CIDR.
|
||||
# 3. login to VM.
|
||||
# 4. access to public network should not be successfull.
|
||||
# 4. access to public network should not be successful.
|
||||
self.create_vm()
|
||||
self.createEgressRule(cidr=None)
|
||||
self.exec_script_on_user_vm('ping -c 1 www.google.com',
|
||||
@ -615,7 +615,7 @@ class TestEgressFWRules(cloudstackTestCase):
|
||||
# 1. deploy VM.using network offering with egress policy false.
|
||||
# 2. create egress rule without specific CIDR.
|
||||
# 3. login to VM.
|
||||
# 4. access to public network should be successfull.
|
||||
# 4. access to public network should be successful.
|
||||
self.create_vm(egress_policy=False)
|
||||
self.createEgressRule(cidr=None)
|
||||
self.exec_script_on_user_vm('ping -c 1 www.google.com',
|
||||
@ -631,7 +631,7 @@ class TestEgressFWRules(cloudstackTestCase):
|
||||
# 1. deploy VM using network offering with egress policy true.
|
||||
# 2. create egress rule without specific end port.
|
||||
# 3. login to VM.
|
||||
# 4. access to public network should not be successfull.
|
||||
# 4. access to public network should not be successful.
|
||||
self.create_vm()
|
||||
self.createEgressRule(protocol='tcp', start_port=80, cidr=TestEgressFWRules.zone.guestcidraddress)
|
||||
self.exec_script_on_user_vm(' wget -t1 http://apache.claz.org/favicon.ico 2>&1',
|
||||
@ -779,7 +779,7 @@ class TestEgressFWRules(cloudstackTestCase):
|
||||
# 1. deploy VM using network offering with egress policy true.
|
||||
# 2. create egress rule valid cidr and port range.
|
||||
# 3. reboot router.
|
||||
# 4. access to public network should not be successfull.
|
||||
# 4. access to public network should not be successful.
|
||||
self.create_vm()
|
||||
self.createEgressRule(cidr=TestEgressFWRules.zone.guestcidraddress)
|
||||
self.reboot_Router()
|
||||
@ -796,7 +796,7 @@ class TestEgressFWRules(cloudstackTestCase):
|
||||
# 1. deploy VM using network offering with egress policy false.
|
||||
# 2. create egress rule valid cidr port range.
|
||||
# 3. reboot router.
|
||||
# 4. access to public network should be successfull.
|
||||
# 4. access to public network should be successful.
|
||||
self.create_vm(egress_policy=False)
|
||||
self.createEgressRule(cidr=TestEgressFWRules.zone.guestcidraddress)
|
||||
self.reboot_Router()
|
||||
|
||||
@ -256,7 +256,7 @@ class TestDefaultSecurityGroupEgress(cloudstackTestCase):
|
||||
self.assertEqual(
|
||||
hasattr(vm_response, "securitygroup"),
|
||||
True,
|
||||
"List VM response should have atleast one security group"
|
||||
"List VM response should have at least one security group"
|
||||
)
|
||||
|
||||
# Verify listSecurity groups response
|
||||
|
||||
@ -112,7 +112,7 @@ class TestMigrationMaintainedPool(cloudstackTestCase):
|
||||
0,
|
||||
"Check list Storage pools response"
|
||||
)
|
||||
# Check if there are atleast two storage pools , else skip the test
|
||||
# Check if there are at least two storage pools , else skip the test
|
||||
if len(storage_pools_response) < 2 :
|
||||
self.skipTest("Atleast two storage pools are need to test Storage migration")
|
||||
|
||||
@ -149,7 +149,7 @@ class TestMigrationMaintainedPool(cloudstackTestCase):
|
||||
self.assertNotEqual(
|
||||
len(pools),
|
||||
0,
|
||||
"Check if atleast one pool is suitable for migration"
|
||||
"Check if at least one pool is suitable for migration"
|
||||
)
|
||||
pool = pools[0]
|
||||
self.debug("Migrating Volume-ID: %s to Pool: %s which is in Maintenance mode" % (volume.id, pool.id))
|
||||
|
||||
@ -128,7 +128,7 @@ class _NetScalerAddBase(_NetScalerBase):
|
||||
cls.api_client,
|
||||
zoneid=cls.zone.id
|
||||
)
|
||||
assert isinstance(physical_networks, list), "There should be atleast one physical network for advanced zone"
|
||||
assert isinstance(physical_networks, list), "There should be at least one physical network for advanced zone"
|
||||
cls.physical_network = physical_networks[0]
|
||||
|
||||
# Check if a NetScaler network service provider exists - if not add one
|
||||
|
||||
@ -1430,7 +1430,7 @@ class TestMultipleLbRules(cloudstackTestCase):
|
||||
self.assertIn(
|
||||
res,
|
||||
[virtual_machine_1.name, virtual_machine_2.name],
|
||||
"The hostname should match with atleast one of instance name"
|
||||
"The hostname should match with at least one of instance name"
|
||||
)
|
||||
except Exception as e:
|
||||
self.fail("Exception occurred during SSH: %s - %s" % (
|
||||
@ -1451,7 +1451,7 @@ class TestMultipleLbRules(cloudstackTestCase):
|
||||
self.assertIn(
|
||||
res,
|
||||
[virtual_machine_1.name, virtual_machine_2.name],
|
||||
"The hostname should match with atleast one of instance name"
|
||||
"The hostname should match with at least one of instance name"
|
||||
)
|
||||
except Exception as e:
|
||||
self.fail("Exception occurred during SSH: %s - %s" % (
|
||||
@ -1747,7 +1747,7 @@ class TestMultipleLbRulesSameIp(cloudstackTestCase):
|
||||
self.assertIn(
|
||||
res,
|
||||
[virtual_machine_1.name, virtual_machine_2.name],
|
||||
"The hostname should match with atleast one of instance name"
|
||||
"The hostname should match with at least one of instance name"
|
||||
)
|
||||
except Exception as e:
|
||||
self.fail("Exception occurred during SSH: %s - %s" % (
|
||||
@ -1769,7 +1769,7 @@ class TestMultipleLbRulesSameIp(cloudstackTestCase):
|
||||
self.assertIn(
|
||||
res,
|
||||
[virtual_machine_1.name, virtual_machine_2.name],
|
||||
"The hostname should match with atleast one of instance name"
|
||||
"The hostname should match with at least one of instance name"
|
||||
)
|
||||
except Exception as e:
|
||||
self.fail("Exception occurred during SSH: %s - %s" % (
|
||||
|
||||
@ -260,7 +260,7 @@ class TestAddMultipleNetScaler(cloudstackTestCase):
|
||||
self.assertEqual(
|
||||
isinstance(physical_networks, list),
|
||||
True,
|
||||
"There should be atleast one physical network for advanced zone"
|
||||
"There should be at least one physical network for advanced zone"
|
||||
)
|
||||
physical_network = physical_networks[0]
|
||||
self.debug("Adding netscaler device: %s" %
|
||||
@ -435,7 +435,7 @@ class TestAddMultipleNSDiffZone(cloudstackTestCase):
|
||||
self.assertEqual(
|
||||
isinstance(physical_networks, list),
|
||||
True,
|
||||
"There should be atleast one physical network for advanced zone"
|
||||
"There should be at least one physical network for advanced zone"
|
||||
)
|
||||
self.debug("Adding netscaler device: %s" %
|
||||
self.services["netscaler_1"]["ipaddress"])
|
||||
@ -478,7 +478,7 @@ class TestAddMultipleNSDiffZone(cloudstackTestCase):
|
||||
self.assertEqual(
|
||||
isinstance(physical_networks, list),
|
||||
True,
|
||||
"There should be atleast one physical network for advanced zone"
|
||||
"There should be at least one physical network for advanced zone"
|
||||
)
|
||||
physical_network = physical_networks[0]
|
||||
|
||||
|
||||
@ -1681,7 +1681,7 @@ class TestDeployOnSpecificHost(cloudstackTestCase):
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"CS should have atleast one host Up and Running"
|
||||
"CS should have at least one host Up and Running"
|
||||
)
|
||||
|
||||
host = hosts[0]
|
||||
|
||||
@ -323,7 +323,7 @@ class TestVPCRoutersBasic(cloudstackTestCase):
|
||||
)
|
||||
if len(hosts) < 2:
|
||||
self.skipTest(
|
||||
"No host available for migration. Test requires atleast 2 hosts")
|
||||
"No host available for migration. Test requires at least 2 hosts")
|
||||
|
||||
# Remove the host of current VM from the hosts list
|
||||
hosts[:] = [host for host in hosts if host.id != router.hostid]
|
||||
@ -975,7 +975,7 @@ class TestVPCRouterOneNetwork(cloudstackTestCase):
|
||||
)
|
||||
if len(hosts) < 2:
|
||||
self.skipTest(
|
||||
"No host available for migration. Test requires atleast 2 hosts")
|
||||
"No host available for migration. Test requires at least 2 hosts")
|
||||
|
||||
# Remove the host of current VM from the hosts list
|
||||
hosts[:] = [host for host in hosts if host.id != router.hostid]
|
||||
|
||||
@ -136,7 +136,7 @@ class TestRestConfigurationSettings(cloudstackTestCase):
|
||||
)
|
||||
|
||||
self.assertIsNotNone(accounts[0],
|
||||
"There should be atleast 1 account in the zone")
|
||||
"There should be at least 1 account in the zone")
|
||||
|
||||
config_name = "enable.additional.vm.configuration"
|
||||
#1. Get the default value
|
||||
@ -199,7 +199,7 @@ class TestRestConfigurationSettings(cloudstackTestCase):
|
||||
)
|
||||
|
||||
self.assertIsNotNone(cluster[0],
|
||||
"There should be atleast 1 cluster in the zone")
|
||||
"There should be at least 1 cluster in the zone")
|
||||
|
||||
config_name = "cluster.storage.operations.exclude"
|
||||
configs = Configurations.list(
|
||||
@ -262,7 +262,7 @@ class TestRestConfigurationSettings(cloudstackTestCase):
|
||||
)
|
||||
|
||||
self.assertIsNotNone(storage[0],
|
||||
"There should be atleast 1 primary storage pool in the zone")
|
||||
"There should be at least 1 primary storage pool in the zone")
|
||||
|
||||
config_name = "vmware.create.full.clone"
|
||||
configs = Configurations.list(
|
||||
|
||||
@ -157,7 +157,7 @@ class TestMultipleVolumeSnapshots(cloudstackTestCase):
|
||||
storage pools available in the setup")
|
||||
if len(list(storagePool for storagePool in self.pools
|
||||
if storagePool.scope == "CLUSTER")) < 2:
|
||||
self.skipTest("There must be at atleast two cluster wide\
|
||||
self.skipTest("There must be at at least two cluster wide\
|
||||
storage pools available in the setup")
|
||||
except Exception as e:
|
||||
self.skipTest(e)
|
||||
|
||||
@ -141,7 +141,7 @@ class TestRestoreVM(cloudstackTestCase):
|
||||
"Check: Failed to list cluster wide storage pools")
|
||||
|
||||
if len(self.pools) < 2:
|
||||
self.skipTest("There must be at atleast two cluster wide\
|
||||
self.skipTest("There must be at at least two cluster wide\
|
||||
storage pools available in the setup")
|
||||
|
||||
except Exception as e:
|
||||
|
||||
@ -1440,8 +1440,8 @@ class TestHardening(cloudstackTestCase):
|
||||
# which indicate two Storage Pools exist.
|
||||
assert (len(clusterid_tag_mapping)) >= 2 and\
|
||||
(len(tags) for tags in clusterid_tag_mapping.values(
|
||||
)) >= 2, "There must be atleast two Clusters and\
|
||||
each must have atleast two cluster wide storage pools in\
|
||||
)) >= 2, "There must be at least two Clusters and\
|
||||
each must have at least two cluster wide storage pools in\
|
||||
Up state in the setup"
|
||||
|
||||
except Exception as e:
|
||||
|
||||
@ -1107,7 +1107,7 @@ public class StressTestDirectAttach {
|
||||
if (isAuthenticated == false) {
|
||||
return "Authentication failed";
|
||||
} else {
|
||||
s_logger.info("Authentication is successfull");
|
||||
s_logger.info("Authentication is successful");
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
@ -2037,7 +2037,7 @@ public class TestClientWithAPI {
|
||||
if (isAuthenticated == false) {
|
||||
return "Authentication failed";
|
||||
} else {
|
||||
s_logger.info("Authentication is successfull");
|
||||
s_logger.info("Authentication is successful");
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
@ -18,7 +18,7 @@ The included VagrantFile will give you:
|
||||
|
||||
1. Due to the large amount of data to be pulled from the Internet, it's probably not a good idea to do this over WiFi or Mobile data.
|
||||
|
||||
1. Given the amount of virtual machines this brings up it is recommended you have atleast 8gb of ram before attempting this.
|
||||
1. Given the amount of virtual machines this brings up it is recommended you have at least 8gb of ram before attempting this.
|
||||
|
||||
1. Ensure your system has `git` installed.
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
Loading…
x
Reference in New Issue
Block a user