pre-commit upgrade codespell; fix spelling; (#10144)

This commit is contained in:
John Bampton 2025-11-14 23:17:10 +10:00 committed by GitHub
parent 86ae1fee7f
commit 4ed86a2627
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
39 changed files with 68 additions and 98 deletions

View File

@ -4,6 +4,7 @@ acount
actuall actuall
acuiring acuiring
acumulate acumulate
addin
addreess addreess
addtion addtion
adminstrator adminstrator
@ -12,10 +13,8 @@ afrer
afterall afterall
againt againt
ags ags
aktive
algoritm algoritm
allo allo
alloacate
allocted allocted
alocation alocation
alogrithm alogrithm
@ -65,6 +64,7 @@ bject
boardcast boardcast
bootstraper bootstraper
bu bu
callin
cant cant
capabilites capabilites
capablity capablity
@ -73,6 +73,7 @@ carrefully
cavaet cavaet
chaing chaing
checkd checkd
checkin
childs childs
choosen choosen
chould chould
@ -93,7 +94,6 @@ confg
configruation configruation
configuable configuable
conneciton conneciton
connexion
constrait constrait
constraits constraits
containg containg
@ -101,9 +101,7 @@ contex
continuesly continuesly
contro contro
controler controler
controles
controll controll
convienient
convinience convinience
coputer coputer
correcponding correcponding
@ -158,13 +156,13 @@ differnet
differnt differnt
direcotry direcotry
directroy directroy
disale
disbale disbale
discrepency discrepency
disover disover
dissapper dissapper
dissassociated dissassociated
divice divice
dockin
doesn' doesn'
doesnot doesnot
doesnt doesnt
@ -175,7 +173,6 @@ eanbled
earch earch
ect ect
elemnt elemnt
eles
elments elments
emmited emmited
enble enble
@ -187,22 +184,19 @@ environmnet
equivalant equivalant
erro erro
erronous erronous
everthing
everytime everytime
excute excute
execept execept
execption execption
exects
execut execut
executeable executeable
exeeded exeeded
exisitng exisitng
exisits exisits
existin
existsing existsing
exitting
expcted expcted
expection expection
explaination
explicitely explicitely
faield faield
faild faild
@ -215,7 +209,6 @@ fillled
findout findout
fisrt fisrt
fo fo
folowing
fowarding fowarding
frist frist
fro fro
@ -234,6 +227,7 @@ hanling
happend happend
hasing hasing
hasnt hasnt
havin
hda hda
hostanme hostanme
hould hould
@ -253,18 +247,13 @@ implmeneted
implmentation implmentation
incase incase
includeing includeing
incosistency
indecates indecates
indien
infor infor
informations informations
informaton informaton
infrastrcuture
ingore ingore
inital
initalize initalize
initator initator
initilization
inspite inspite
instace instace
instal instal
@ -284,12 +273,8 @@ ist
klunky klunky
lable lable
leve leve
lief
limite limite
linke
listner listner
lokal
lokales
maintainence maintainence
maintenace maintenace
maintenence maintenence
@ -298,7 +283,6 @@ mambers
manaully manaully
manuel manuel
maxium maxium
mehtod
mergable mergable
mesage mesage
messge messge
@ -308,7 +292,6 @@ minumum
mis mis
modifers modifers
mor mor
mot
mulitply mulitply
multipl multipl
multple multple
@ -322,7 +305,7 @@ nin
nodel nodel
nome nome
noone noone
nowe notin
numbe numbe
numer numer
occured occured
@ -390,11 +373,9 @@ remaning
remore remore
remvoing remvoing
renabling renabling
repeatly
reponse reponse
reqest reqest
reqiured reqiured
requieres
requried requried
reserv reserv
reserverd reserverd
@ -414,14 +395,13 @@ retuned
returing returing
rever rever
rocessor rocessor
roperty
runing runing
runnign runnign
sate sate
scalled scalled
scipt
scirpt scirpt
scrip scrip
seconadry
seconday seconday
seesion seesion
sepcified sepcified
@ -434,12 +414,10 @@ settig
sevices sevices
shoul shoul
shoule shoule
sie
signle signle
simplier simplier
singature singature
skiping skiping
snaphsot
snpashot snpashot
specied specied
specifed specifed
@ -450,7 +428,6 @@ standy
statics statics
stickyness stickyness
stil stil
stip
storeage storeage
strat strat
streched streched
@ -459,7 +436,6 @@ succesfull
successfull successfull
suceessful suceessful
suces suces
sucessfully
suiteable suiteable
suppots suppots
suppport suppport
@ -492,7 +468,6 @@ uncompressible
uneccessarily uneccessarily
unexepected unexepected
unexpect unexpect
unknow
unkonw unkonw
unkown unkown
unneccessary unneccessary
@ -500,14 +475,12 @@ unparseable
unrecoginized unrecoginized
unsupport unsupport
unxpected unxpected
updat
uptodate uptodate
usera usera
usign usign
usin usin
utlization utlization
vaidate vaidate
valiate
valule valule
valus valus
varibles varibles
@ -516,8 +489,6 @@ verfying
verifing verifing
virutal virutal
visable visable
wakup
wil wil
wit wit
wll
wth wth

View File

@ -116,7 +116,7 @@ repos:
args: [--markdown-linebreak-ext=md] args: [--markdown-linebreak-ext=md]
exclude: ^services/console-proxy/rdpconsole/src/test/doc/freerdp-debug-log\.txt$ exclude: ^services/console-proxy/rdpconsole/src/test/doc/freerdp-debug-log\.txt$
- repo: https://github.com/codespell-project/codespell - repo: https://github.com/codespell-project/codespell
rev: v2.2.6 rev: v2.4.1
hooks: hooks:
- id: codespell - id: codespell
name: run codespell name: run codespell

View File

@ -117,7 +117,7 @@ public class AgentProperties{
/** /**
* Local storage path.<br> * Local storage path.<br>
* This property allows multiple values to be entered in a single String. The differente values must be separated by commas.<br> * This property allows multiple values to be entered in a single String. The different values must be separated by commas.<br>
* Data type: String.<br> * Data type: String.<br>
* Default value: <code>/var/lib/libvirt/images/</code> * Default value: <code>/var/lib/libvirt/images/</code>
*/ */
@ -134,7 +134,7 @@ public class AgentProperties{
/** /**
* MANDATORY: The UUID for the local storage pool.<br> * MANDATORY: The UUID for the local storage pool.<br>
* This property allows multiple values to be entered in a single String. The differente values must be separated by commas.<br> * This property allows multiple values to be entered in a single String. The different values must be separated by commas.<br>
* Data type: String.<br> * Data type: String.<br>
* Default value: <code>null</code> * Default value: <code>null</code>
*/ */

View File

@ -128,7 +128,7 @@ public class Storage {
public static enum TemplateType { public static enum TemplateType {
ROUTING, // Router template ROUTING, // Router template
SYSTEM, /* routing, system vm template */ SYSTEM, /* routing, system vm template */
BUILTIN, /* buildin template */ BUILTIN, /* builtin template */
PERHOST, /* every host has this template, don't need to install it in secondary storage */ PERHOST, /* every host has this template, don't need to install it in secondary storage */
USER, /* User supplied template/iso */ USER, /* User supplied template/iso */
VNF, /* VNFs (virtual network functions) template */ VNF, /* VNFs (virtual network functions) template */

View File

@ -230,7 +230,7 @@ public interface StorageManager extends StorageService {
/** /**
* should we execute in sequence not involving any storages? * should we execute in sequence not involving any storages?
* @return tru if commands should execute in sequence * @return true if commands should execute in sequence
*/ */
static boolean shouldExecuteInSequenceOnVmware() { static boolean shouldExecuteInSequenceOnVmware() {
return shouldExecuteInSequenceOnVmware(null, null); return shouldExecuteInSequenceOnVmware(null, null);

View File

@ -42,7 +42,7 @@ public interface VMSnapshotManager extends VMSnapshotService, Manager {
boolean deleteAllVMSnapshots(long id, VMSnapshot.Type type); boolean deleteAllVMSnapshots(long id, VMSnapshot.Type type);
/** /**
* Sync VM snapshot state when VM snapshot in reverting or snapshoting or expunging state * Sync VM snapshot state when VM snapshot in reverting or snapshotting or expunging state
* Used for fullsync after agent connects * Used for fullsync after agent connects
* *
* @param vm, the VM in question * @param vm, the VM in question

View File

@ -159,7 +159,7 @@ public class Upgrade41810to41900 extends DbUpgradeAbstractImpl implements DbUpgr
try (PreparedStatement pstmt = conn.prepareStatement(createNewColumn)) { try (PreparedStatement pstmt = conn.prepareStatement(createNewColumn)) {
pstmt.execute(); pstmt.execute();
} catch (SQLException e) { } catch (SQLException e) {
String message = String.format("Unable to crate new backups' column date due to [%s].", e.getMessage()); String message = String.format("Unable to create new backups' column date due to [%s].", e.getMessage());
logger.error(message, e); logger.error(message, e);
throw new CloudRuntimeException(message, e); throw new CloudRuntimeException(message, e);
} }

View File

@ -1267,8 +1267,8 @@ namespace CloudStack.Plugin.WmiWrappers.ROOT.VIRTUALIZATION.V2 {
[Browsable(true)] [Browsable(true)]
[DesignerSerializationVisibility(DesignerSerializationVisibility.Hidden)] [DesignerSerializationVisibility(DesignerSerializationVisibility.Hidden)]
[Description("The current VMQ offloading usage on this port. The usage is the amount of VMQ res" + [Description("The current VMQ offloading usage on this port. The usage is the amount of VMQ " +
"ources in use on the port.")] "resources in use on the port.")]
[TypeConverter(typeof(WMIValueTypeConverter))] [TypeConverter(typeof(WMIValueTypeConverter))]
public uint VMQOffloadUsage { public uint VMQOffloadUsage {
get { get {

View File

@ -618,10 +618,9 @@ namespace CloudStack.Plugin.WmiWrappers.ROOT.VIRTUALIZATION.V2 {
[Browsable(true)] [Browsable(true)]
[DesignerSerializationVisibility(DesignerSerializationVisibility.Hidden)] [DesignerSerializationVisibility(DesignerSerializationVisibility.Hidden)]
[Description("If this property is true, then this Ethernet port can be connected to the switche" + [Description("If this property is true, then this Ethernet port can be connected to the switches" +
"s and thus can provide connectivity to virtual machine. If this property is fals" + " and thus can provide connectivity to virtual machine. If this property is false" +
"e, then this Ethernet is not being used by the virtual machine networking archit" + ", then this Ethernet is not being used by the virtual machine networking architecture.")]
"ecture.")]
[TypeConverter(typeof(WMIValueTypeConverter))] [TypeConverter(typeof(WMIValueTypeConverter))]
public bool IsBound { public bool IsBound {
get { get {

View File

@ -4605,7 +4605,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
vmMo.mountToolsInstaller(); vmMo.mountToolsInstaller();
logger.debug(String.format("Successfully re-mounted vmware tools installer for :[%s].", cmd.getVmName())); logger.debug(String.format("Successfully re-mounted vmware tools installer for :[%s].", cmd.getVmName()));
} catch (Exception e) { } catch (Exception e) {
logger.error(String.format("Unabled to re-mount vmware tools installer for: [%s].", cmd.getVmName()), e); logger.error(String.format("Unable to re-mount vmware tools installer for: [%s].", cmd.getVmName()), e);
} }
} }
} }
@ -7072,7 +7072,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
if (s_serviceContext.get() != null) { if (s_serviceContext.get() != null) {
context = s_serviceContext.get(); context = s_serviceContext.get();
String poolKey = VmwareContextPool.composePoolKey(_vCenterAddress, _username); String poolKey = VmwareContextPool.composePoolKey(_vCenterAddress, _username);
// Before re-using the thread local context, ensure it corresponds to the right vCenter API session and that it is valid to make calls. // Before reusing the thread local context, ensure it corresponds to the right vCenter API session and that it is valid to make calls.
if (context.getPoolKey().equals(poolKey)) { if (context.getPoolKey().equals(poolKey)) {
if (context.validate()) { if (context.validate()) {
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {

View File

@ -998,11 +998,11 @@ public class VmwareStorageProcessor implements StorageProcessor {
long wait, String nfsVersion) throws Exception { long wait, String nfsVersion) throws Exception {
String volumeFolder; String volumeFolder;
String volumeName; String volumeName;
String sufix = ".ova"; String suffix = ".ova";
int index = srcVolumePath.lastIndexOf(File.separator); int index = srcVolumePath.lastIndexOf(File.separator);
if (srcVolumePath.endsWith(sufix)) { if (srcVolumePath.endsWith(suffix)) {
volumeFolder = srcVolumePath.substring(0, index); volumeFolder = srcVolumePath.substring(0, index);
volumeName = srcVolumePath.substring(index + 1).replace(sufix, ""); volumeName = srcVolumePath.substring(index + 1).replace(suffix, "");
} else { } else {
volumeFolder = srcVolumePath; volumeFolder = srcVolumePath;
volumeName = srcVolumePath.substring(index + 1); volumeName = srcVolumePath.substring(index + 1);
@ -3790,16 +3790,16 @@ public class VmwareStorageProcessor implements StorageProcessor {
} }
DatastoreMO primaryDsMo = new DatastoreMO(hyperHost.getContext(), morPrimaryDs); DatastoreMO primaryDsMo = new DatastoreMO(hyperHost.getContext(), morPrimaryDs);
boolean isDatastoreStoragePolicyComplaint = primaryDsMo.isDatastoreStoragePolicyComplaint(storagePolicyId); boolean isDatastoreStoragePolicyCompliant = primaryDsMo.isDatastoreStoragePolicyCompliant(storagePolicyId);
String failedMessage = String.format("DataStore %s is not compliance with storage policy id %s", primaryStorageNameLabel, storagePolicyId); String failedMessage = String.format("DataStore %s is not compliant with storage policy id %s", primaryStorageNameLabel, storagePolicyId);
if (!isDatastoreStoragePolicyComplaint) if (!isDatastoreStoragePolicyCompliant)
return new Answer(cmd, isDatastoreStoragePolicyComplaint, failedMessage); return new Answer(cmd, isDatastoreStoragePolicyCompliant, failedMessage);
else else
return new Answer(cmd, isDatastoreStoragePolicyComplaint, null); return new Answer(cmd, isDatastoreStoragePolicyCompliant, null);
} catch (Throwable e) { } catch (Throwable e) {
hostService.createLogMessageException(e, cmd); hostService.createLogMessageException(e, cmd);
String details = String.format("Exception while checking if datastore [%s] is storage policy [%s] compliance due to: [%s]", primaryStorageNameLabel, storagePolicyId, VmwareHelper.getExceptionMessage(e)); String details = String.format("Exception while checking if datastore [%s] is storage policy [%s] compliant due to: [%s]", primaryStorageNameLabel, storagePolicyId, VmwareHelper.getExceptionMessage(e));
return new Answer(cmd, false, details); return new Answer(cmd, false, details);
} }
} }

View File

@ -2908,7 +2908,7 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S
* within a XenServer that's under CloudStack control. * within a XenServer that's under CloudStack control.
* *
* - Native Networks: these are networks that are untagged on the XenServer * - Native Networks: these are networks that are untagged on the XenServer
* and are used to crate VLAN networks on. These are created by the user and * and are used to create VLAN networks on. These are created by the user and
* is assumed to be one per cluster. - VLAN Networks: these are dynamically * is assumed to be one per cluster. - VLAN Networks: these are dynamically
* created by CloudStack and can have problems with duplicated names. - * created by CloudStack and can have problems with duplicated names. -
* LinkLocal Networks: these are dynamically created by CloudStack and can * LinkLocal Networks: these are dynamically created by CloudStack and can
@ -3773,7 +3773,7 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S
Host host = pbd.getHost(conn); Host host = pbd.getHost(conn);
if (!isRefNull(host) && StringUtils.equals(host.getUuid(conn), _host.getUuid())) { if (!isRefNull(host) && StringUtils.equals(host.getUuid(conn), _host.getUuid())) {
if (!pbd.getCurrentlyAttached(conn)) { if (!pbd.getCurrentlyAttached(conn)) {
logger.debug(String.format("PBD [%s] of local SR [%s] was unplugged, pluggin it now", pbd.getUuid(conn), srRec.uuid)); logger.debug(String.format("PBD [%s] of local SR [%s] was unplugged, plugging it in now", pbd.getUuid(conn), srRec.uuid));
pbd.plug(conn); pbd.plug(conn);
} }
logger.debug("Scanning local SR: " + srRec.uuid); logger.debug("Scanning local SR: " + srRec.uuid);

View File

@ -220,7 +220,7 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
srUuid = sr.getUuid(conn); srUuid = sr.getUuid(conn);
Set<PBD> pbDs = sr.getPBDs(conn); Set<PBD> pbDs = sr.getPBDs(conn);
for (PBD pbd : pbDs) { for (PBD pbd : pbDs) {
logger.debug(String.format("Unpluging PBD [%s] of SR [%s] as it is not working properly.", pbd.getUuid(conn), srUuid)); logger.debug(String.format("Unplugging PBD [%s] of SR [%s] as it is not working properly.", pbd.getUuid(conn), srUuid));
unplugPbd(conn, pbd); unplugPbd(conn, pbd);
} }
logger.debug(String.format("Forgetting SR [%s] as it is not working properly.", srUuid)); logger.debug(String.format("Forgetting SR [%s] as it is not working properly.", srUuid));
@ -239,7 +239,7 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
pbdUuid = pbd.getUuid(conn); pbdUuid = pbd.getUuid(conn);
pbd.unplug(conn); pbd.unplug(conn);
} catch (XenAPIException | XmlRpcException e) { } catch (XenAPIException | XmlRpcException e) {
throw new CloudRuntimeException(String.format("Exception while unpluging PBD [%s].", pbdUuid)); throw new CloudRuntimeException(String.format("Exception while unplugging PBD [%s].", pbdUuid));
} }
} }

View File

@ -3034,7 +3034,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
mgmtPhyNetwork = _networkModel.getDefaultPhysicalNetworkByZoneAndTrafficType(zoneId, TrafficType.Management); mgmtPhyNetwork = _networkModel.getDefaultPhysicalNetworkByZoneAndTrafficType(zoneId, TrafficType.Management);
if (NetworkType.Advanced == zone.getNetworkType() && !zone.isSecurityGroupEnabled()) { if (NetworkType.Advanced == zone.getNetworkType() && !zone.isSecurityGroupEnabled()) {
// advanced zone without SG should have a physical // advanced zone without SG should have a physical
// network with public Thpe // network with public Type
_networkModel.getDefaultPhysicalNetworkByZoneAndTrafficType(zoneId, TrafficType.Public); _networkModel.getDefaultPhysicalNetworkByZoneAndTrafficType(zoneId, TrafficType.Public);
} }

View File

@ -156,7 +156,7 @@ import com.google.gson.JsonParseException;
import static com.cloud.vm.VirtualMachineManager.SystemVmEnableUserData; import static com.cloud.vm.VirtualMachineManager.SystemVmEnableUserData;
/** /**
* Class to manage console proxys. <br><br> * Class to manage console proxies. <br><br>
* Possible console proxy state transition cases:<br> * Possible console proxy state transition cases:<br>
* - Stopped -> Starting -> Running <br> * - Stopped -> Starting -> Running <br>
* - HA -> Stopped -> Starting -> Running <br> * - HA -> Stopped -> Starting -> Running <br>
@ -569,7 +569,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy
if (!allowToLaunchNew(dataCenterId)) { if (!allowToLaunchNew(dataCenterId)) {
String configKey = ConsoleProxyLaunchMax.key(); String configKey = ConsoleProxyLaunchMax.key();
Integer configValue = ConsoleProxyLaunchMax.valueIn(dataCenterId); Integer configValue = ConsoleProxyLaunchMax.valueIn(dataCenterId);
logger.warn(String.format("The number of launched console proxys on zone [%s] has reached the limit [%s]. Limit set in [%s].", dataCenterId, configValue, configKey)); logger.warn(String.format("The number of launched console proxies on zone [%s] has reached the limit [%s]. Limit set in [%s].", dataCenterId, configValue, configKey));
return null; return null;
} }

View File

@ -240,7 +240,7 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis
} }
to.setNicSecIps(secIps); to.setNicSecIps(secIps);
} else { } else {
logger.warn("Unabled to load NicVO for NicProfile {}", profile); logger.warn("Unable to load NicVO for NicProfile {}", profile);
//Workaround for dynamically created nics //Workaround for dynamically created nics
//FixMe: uuid and secondary IPs can be made part of nic profile //FixMe: uuid and secondary IPs can be made part of nic profile
to.setUuid(UUID.randomUUID().toString()); to.setUuid(UUID.randomUUID().toString());

View File

@ -342,7 +342,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase
for (ExternalLoadBalancerDeviceVO lbDevice : lbDevices) { for (ExternalLoadBalancerDeviceVO lbDevice : lbDevices) {
if (lbDevice.getParentHostId() == hostId) { if (lbDevice.getParentHostId() == hostId) {
throw new CloudRuntimeException( throw new CloudRuntimeException(
"This load balancer device can not be deleted as there are one or more load balancers applainces provisioned by cloudstack on the device."); "This load balancer device can not be deleted as there are one or more load balancers appliances provisioned by cloudstack on the device.");
} }
} }
} }

View File

@ -667,7 +667,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules
} }
} else if (ipAddress.getAssociatedWithVmId() != null && ipAddress.getAssociatedWithVmId().longValue() != vmId) { } else if (ipAddress.getAssociatedWithVmId() != null && ipAddress.getAssociatedWithVmId().longValue() != vmId) {
throw new NetworkRuleConflictException("Failed to enable static for the ip address " + ipAddress + " and vm id=" + vmId + throw new NetworkRuleConflictException("Failed to enable static for the ip address " + ipAddress + " and vm id=" + vmId +
" as it's already assigned to antoher vm"); " as it's already assigned to another vm");
} }
//check whether the vm ip is already associated with any public ip address //check whether the vm ip is already associated with any public ip address

View File

@ -213,7 +213,7 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation {
_storagePoolWorkDao.persist(work); _storagePoolWorkDao.persist(work);
} catch (Exception e) { } catch (Exception e) {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("Work record already exists, re-using by re-setting values"); logger.debug("Work record already exists, reusing by re-setting values");
} }
StoragePoolWorkVO work = _storagePoolWorkDao.findByPoolIdAndVmId(pool.getId(), vmInstance.getId()); StoragePoolWorkVO work = _storagePoolWorkDao.findByPoolIdAndVmId(pool.getId(), vmInstance.getId());
work.setStartedAfterMaintenance(false); work.setStartedAfterMaintenance(false);

View File

@ -423,7 +423,7 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown
} }
/** /**
* Return pretified PEM certificate * Return prettified PEM certificate
*/ */
protected String getPretifiedCertificate(String certificateCer) { protected String getPretifiedCertificate(String certificateCer) {
String cert = certificateCer.replaceAll("(.{64})", "$1\n"); String cert = certificateCer.replaceAll("(.{64})", "$1\n");

View File

@ -197,7 +197,7 @@ public class ConsoleProxyThumbnailHandler implements HttpHandler {
startx = 0; startx = 0;
g.drawString(text, startx, h / 2); g.drawString(text, startx, h / 2);
} catch (Throwable e) { } catch (Throwable e) {
logger.warn("Problem in generating text to thumnail image, return blank image"); logger.warn("Problem in generating text to thumbnail image, return blank image");
} }
return img; return img;
} }

View File

@ -144,7 +144,7 @@ fw_entry() {
local lb_vif_list=$(get_lb_vif_list) local lb_vif_list=$(get_lb_vif_list)
for vif in $lb_vif_list; do for vif in $lb_vif_list; do
#TODO : The below delete will be used only when we upgrade the from older version to the newer one , the below delete become obsolute in the future. #TODO : The below delete will be used only when we upgrade the from older version to the newer one, the below delete become obsolete in the future.
sudo iptables -D INPUT -i $vif -p tcp -d $pubIp --dport $dport -j ACCEPT 2> /dev/null sudo iptables -D INPUT -i $vif -p tcp -d $pubIp --dport $dport -j ACCEPT 2> /dev/null
sudo iptables -A load_balancer_$vif -p tcp -d $pubIp --dport $dport -j ACCEPT sudo iptables -A load_balancer_$vif -p tcp -d $pubIp --dport $dport -j ACCEPT
@ -161,7 +161,7 @@ fw_entry() {
sudo iptables -A lb_stats -s $cidrs -p tcp -m state --state NEW -d $pubIp --dport $dport -j ACCEPT sudo iptables -A lb_stats -s $cidrs -p tcp -m state --state NEW -d $pubIp --dport $dport -j ACCEPT
#TODO : The below delete in the for-loop will be used only when we upgrade the from older version to the newer one , the below delete become obsolute in the future. #TODO : The below delete in the for-loop will be used only when we upgrade the from older version to the newer one, the below delete become obsolete in the future.
for i in $r for i in $r
do do
local pubIp=$(echo $i | cut -d: -f1) local pubIp=$(echo $i | cut -d: -f1)

View File

@ -575,7 +575,7 @@ class TestBaseImageUpdate(cloudstackTestCase):
) )
sleep_seconds = (self.services["recurring_snapshot"]["schedule"]) * 3600 + 600 sleep_seconds = (self.services["recurring_snapshot"]["schedule"]) * 3600 + 600
sleep_minutes = sleep_seconds/60 sleep_minutes = sleep_seconds/60
self.debug("Sleeping for %s minutes till the volume is snapshoted" %sleep_minutes) self.debug("Sleeping for %s minutes till the volume is snapshotted" %sleep_minutes)
time.sleep(sleep_seconds) time.sleep(sleep_seconds)
retriesCount = self.services["retriesCount"] retriesCount = self.services["retriesCount"]

View File

@ -1165,8 +1165,8 @@ class TestInvalidAccountAuthroize(cloudstackTestCase):
return return
@attr(tags = ["sg", "eip", "advancedsg"]) @attr(tags = ["sg", "eip", "advancedsg"])
def test_invalid_account_authroize(self): def test_invalid_account_authorize(self):
"""Test invalid account authroize """Test invalid account authorize
""" """

View File

@ -4341,7 +4341,7 @@ class TestIpAddresses(cloudstackTestCase):
vmgroup_disabled, vmgroup_disabled,
"Failed to disable Autoscale VM group" "Failed to disable Autoscale VM group"
) )
# Verifyign the state of the VM Group afte renabling # Verifying the state of the VM Group after renabling
self.assertEqual( self.assertEqual(
"disabled", "disabled",
vmgroup_disabled.state, vmgroup_disabled.state,
@ -4375,7 +4375,7 @@ class TestIpAddresses(cloudstackTestCase):
vmgroup_enabled, vmgroup_enabled,
"Failed to enable Autoscale VM group" "Failed to enable Autoscale VM group"
) )
# Verifyign the state of the VM Group afte renabling # Verifying the state of the VM Group after renabling
self.assertEqual( self.assertEqual(
"enabled", "enabled",
vmgroup_enabled.state, vmgroup_enabled.state,

View File

@ -15,7 +15,7 @@
# specific language governing permissions and limitations # specific language governing permissions and limitations
# under the License. # under the License.
""" P1 for stopped Virtual Maschine life cycle """ P1 for stopped Virtual Machine life cycle
""" """
# Import Local Modules # Import Local Modules
from nose.plugins.attrib import attr from nose.plugins.attrib import attr

View File

@ -440,5 +440,5 @@ class TestMigrateVolumeToAnotherPool(cloudstackTestCase):
self.assertEqual( self.assertEqual(
self.random_data_0, self.random_data_0,
result[0], result[0],
"Check the random data is equal with the ramdom file!" "Check the random data is equal with the random file!"
) )

View File

@ -472,7 +472,7 @@ class TestEncryptedVolumes(cloudstackTestCase):
self.assertEqual( self.assertEqual(
self.random_data_0, self.random_data_0,
result[0], result[0],
"Check the random data is equal with the ramdom file!" "Check the random data is equal with the random file!"
) )
# Delete VM snapshot # Delete VM snapshot

View File

@ -1136,7 +1136,7 @@ class TestStoragePool(cloudstackTestCase):
self.assertEqual( self.assertEqual(
self.random_data_0, self.random_data_0,
result[0], result[0],
"Check the random data is equal with the ramdom file!" "Check the random data is equal with the random file!"
) )
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")

View File

@ -465,7 +465,7 @@ class TestStoragePool(cloudstackTestCase):
self.assertEqual( self.assertEqual(
self.random_data_0, self.random_data_0,
result[0], result[0],
"Check the random data is equal with the ramdom file!" "Check the random data is equal with the random file!"
) )
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")

View File

@ -336,7 +336,7 @@ class TestVmSnapshot(cloudstackTestCase):
self.assertEqual( self.assertEqual(
self.random_data_0, self.random_data_0,
result[0], result[0],
"Check the random data is equal with the ramdom file!" "Check the random data is equal with the random file!"
) )
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")

View File

@ -262,7 +262,7 @@ class TestVmSnapshot(cloudstackTestCase):
self.assertEqual( self.assertEqual(
self.random_data_0, self.random_data_0,
result[0], result[0],
"Check the random data is equal with the ramdom file!" "Check the random data is equal with the random file!"
) )
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")

View File

@ -988,7 +988,7 @@ class TestVolumes(cloudstackTestCase):
return True, list_volume_response[0] return True, list_volume_response[0]
return False, None return False, None
# sleep interval is 1s, retries is 360, this will sleep atmost 360 seconds, or 6 mins # sleep interval is 1s, retries is 360, this will sleep at most 360 seconds, or 6 mins
res, response = wait_until(1, 360, checkVolumeResponse) res, response = wait_until(1, 360, checkVolumeResponse)
if not res: if not res:
self.fail("Failed to return root volume response") self.fail("Failed to return root volume response")

View File

@ -2809,7 +2809,7 @@ class TestLiveStorageMigration(cloudstackTestCase):
disk=root_volume_cluster, disk=root_volume_cluster,
disk_type="rootdiskdevice") disk_type="rootdiskdevice")
# Get Destnation Pool # Get Destination Pool
# Avoid storage Pool on which ROOT disk exists # Avoid storage Pool on which ROOT disk exists
storagePools_to_avoid = [root_volume_cluster.storage] storagePools_to_avoid = [root_volume_cluster.storage]
@ -2840,7 +2840,7 @@ class TestLiveStorageMigration(cloudstackTestCase):
disk=data_volume_1_cluster, disk=data_volume_1_cluster,
disk_type="datadiskdevice_1") disk_type="datadiskdevice_1")
# Get Destnation Pool # Get Destination Pool
# Avoid storage Pool allocated for ROOT disk, and Pool on which DATA # Avoid storage Pool allocated for ROOT disk, and Pool on which DATA
# disk1 exists # disk1 exists
storagePools_to_avoid = [ storagePools_to_avoid = [
@ -3021,7 +3021,7 @@ class TestLiveStorageMigration(cloudstackTestCase):
disk=root_volume_cluster, disk=root_volume_cluster,
disk_type="rootdiskdevice") disk_type="rootdiskdevice")
# Get Destnation Pool # Get Destination Pool
# Avoid storage Pool on which ROOT disk exists # Avoid storage Pool on which ROOT disk exists
storagePools_to_avoid = [root_volume_cluster.storage] storagePools_to_avoid = [root_volume_cluster.storage]
@ -3052,7 +3052,7 @@ class TestLiveStorageMigration(cloudstackTestCase):
disk=data_volume_1_cluster, disk=data_volume_1_cluster,
disk_type="datadiskdevice_1") disk_type="datadiskdevice_1")
# Get Destnation Pool # Get Destination Pool
# Avoid storage Pool allocated for ROOT disk, and Pool on which DATA # Avoid storage Pool allocated for ROOT disk, and Pool on which DATA
# disk1 exists # disk1 exists

View File

@ -60,7 +60,7 @@ deploy_server() {
if [ $? -gt 0 ]; then echo "failed to setup db.properties file on remote $1"; return 2; fi if [ $? -gt 0 ]; then echo "failed to setup db.properties file on remote $1"; return 2; fi
#ssh root@$1 "cd /root/cloudstack-oss && nohup ant run &" #ssh root@$1 "cd /root/cloudstack-oss && nohup ant run &"
#if [ $? -gt 0 ]; then echo "failed to start the softare on remote $1"; return 2; fi #if [ $? -gt 0 ]; then echo "failed to start the software on remote $1"; return 2; fi
echo "Remote management server is deployed as a part of cluster setup; you have to start it manually by logging in remotely" echo "Remote management server is deployed as a part of cluster setup; you have to start it manually by logging in remotely"
} }

View File

@ -164,7 +164,7 @@ def cleanPrimaryStorage(cscfg):
def seedSecondaryStorage(cscfg, hypervisor): def seedSecondaryStorage(cscfg, hypervisor):
""" """
erase secondary store and seed system VM template via puppet. The erase secondary store and seed system VM template via puppet. The
secseeder.sh script is executed on mgmt server bootup which will mount and secseeder.sh script is executed on mgmt server boot up which will mount and
place the system VM templates on the NFS place the system VM templates on the NFS
""" """
mgmt_server = cscfg.mgtSvr[0].mgtSvrIp mgmt_server = cscfg.mgtSvr[0].mgtSvrIp

File diff suppressed because one or more lines are too long

View File

@ -75,7 +75,7 @@ public class BasicRestClient implements RestClient {
final URI uri = request.getURI(); final URI uri = request.getURI();
String query = uri.getQuery(); String query = uri.getQuery();
query = query != null ? "?" + query : ""; query = query != null ? "?" + query : "";
logger.debug("Executig " + request.getMethod() + " request on " + clientContext.getTargetHost() + uri.getPath() + query); logger.debug("Executing " + request.getMethod() + " request on " + clientContext.getTargetHost() + uri.getPath() + query);
} }
@Override @Override

View File

@ -451,7 +451,7 @@ public class DatastoreMO extends BaseMO {
return isAccessible; return isAccessible;
} }
public boolean isDatastoreStoragePolicyComplaint(String storagePolicyId) throws Exception { public boolean isDatastoreStoragePolicyCompliant(String storagePolicyId) throws Exception {
PbmProfileManagerMO profMgrMo = new PbmProfileManagerMO(_context); PbmProfileManagerMO profMgrMo = new PbmProfileManagerMO(_context);
PbmProfile profile = profMgrMo.getStorageProfile(storagePolicyId); PbmProfile profile = profMgrMo.getStorageProfile(storagePolicyId);