pre-commit upgrade codespell; fix spelling; (#10144)

This commit is contained in:
John Bampton 2025-11-14 23:17:10 +10:00 committed by GitHub
parent 86ae1fee7f
commit 4ed86a2627
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
39 changed files with 68 additions and 98 deletions

View File

@ -4,6 +4,7 @@ acount
actuall
acuiring
acumulate
addin
addreess
addtion
adminstrator
@ -12,10 +13,8 @@ afrer
afterall
againt
ags
aktive
algoritm
allo
alloacate
allocted
alocation
alogrithm
@ -65,6 +64,7 @@ bject
boardcast
bootstraper
bu
callin
cant
capabilites
capablity
@ -73,6 +73,7 @@ carrefully
cavaet
chaing
checkd
checkin
childs
choosen
chould
@ -93,7 +94,6 @@ confg
configruation
configuable
conneciton
connexion
constrait
constraits
containg
@ -101,9 +101,7 @@ contex
continuesly
contro
controler
controles
controll
convienient
convinience
coputer
correcponding
@ -158,13 +156,13 @@ differnet
differnt
direcotry
directroy
disale
disbale
discrepency
disover
dissapper
dissassociated
divice
dockin
doesn'
doesnot
doesnt
@ -175,7 +173,6 @@ eanbled
earch
ect
elemnt
eles
elments
emmited
enble
@ -187,22 +184,19 @@ environmnet
equivalant
erro
erronous
everthing
everytime
excute
execept
execption
exects
execut
executeable
exeeded
exisitng
exisits
existin
existsing
exitting
expcted
expection
explaination
explicitely
faield
faild
@ -215,7 +209,6 @@ fillled
findout
fisrt
fo
folowing
fowarding
frist
fro
@ -234,6 +227,7 @@ hanling
happend
hasing
hasnt
havin
hda
hostanme
hould
@ -253,18 +247,13 @@ implmeneted
implmentation
incase
includeing
incosistency
indecates
indien
infor
informations
informaton
infrastrcuture
ingore
inital
initalize
initator
initilization
inspite
instace
instal
@ -284,12 +273,8 @@ ist
klunky
lable
leve
lief
limite
linke
listner
lokal
lokales
maintainence
maintenace
maintenence
@ -298,7 +283,6 @@ mambers
manaully
manuel
maxium
mehtod
mergable
mesage
messge
@ -308,7 +292,6 @@ minumum
mis
modifers
mor
mot
mulitply
multipl
multple
@ -322,7 +305,7 @@ nin
nodel
nome
noone
nowe
notin
numbe
numer
occured
@ -390,11 +373,9 @@ remaning
remore
remvoing
renabling
repeatly
reponse
reqest
reqiured
requieres
requried
reserv
reserverd
@ -414,14 +395,13 @@ retuned
returing
rever
rocessor
roperty
runing
runnign
sate
scalled
scipt
scirpt
scrip
seconadry
seconday
seesion
sepcified
@ -434,12 +414,10 @@ settig
sevices
shoul
shoule
sie
signle
simplier
singature
skiping
snaphsot
snpashot
specied
specifed
@ -450,7 +428,6 @@ standy
statics
stickyness
stil
stip
storeage
strat
streched
@ -459,7 +436,6 @@ succesfull
successfull
suceessful
suces
sucessfully
suiteable
suppots
suppport
@ -492,7 +468,6 @@ uncompressible
uneccessarily
unexepected
unexpect
unknow
unkonw
unkown
unneccessary
@ -500,14 +475,12 @@ unparseable
unrecoginized
unsupport
unxpected
updat
uptodate
usera
usign
usin
utlization
vaidate
valiate
valule
valus
varibles
@ -516,8 +489,6 @@ verfying
verifing
virutal
visable
wakup
wil
wit
wll
wth

View File

@ -116,7 +116,7 @@ repos:
args: [--markdown-linebreak-ext=md]
exclude: ^services/console-proxy/rdpconsole/src/test/doc/freerdp-debug-log\.txt$
- repo: https://github.com/codespell-project/codespell
rev: v2.2.6
rev: v2.4.1
hooks:
- id: codespell
name: run codespell

View File

@ -117,7 +117,7 @@ public class AgentProperties{
/**
* Local storage path.<br>
* This property allows multiple values to be entered in a single String. The differente values must be separated by commas.<br>
* This property allows multiple values to be entered in a single String. The different values must be separated by commas.<br>
* Data type: String.<br>
* Default value: <code>/var/lib/libvirt/images/</code>
*/
@ -134,7 +134,7 @@ public class AgentProperties{
/**
* MANDATORY: The UUID for the local storage pool.<br>
* This property allows multiple values to be entered in a single String. The differente values must be separated by commas.<br>
* This property allows multiple values to be entered in a single String. The different values must be separated by commas.<br>
* Data type: String.<br>
* Default value: <code>null</code>
*/

View File

@ -128,7 +128,7 @@ public class Storage {
public static enum TemplateType {
ROUTING, // Router template
SYSTEM, /* routing, system vm template */
BUILTIN, /* buildin template */
BUILTIN, /* builtin template */
PERHOST, /* every host has this template, don't need to install it in secondary storage */
USER, /* User supplied template/iso */
VNF, /* VNFs (virtual network functions) template */

View File

@ -230,7 +230,7 @@ public interface StorageManager extends StorageService {
/**
* should we execute in sequence not involving any storages?
* @return tru if commands should execute in sequence
* @return true if commands should execute in sequence
*/
static boolean shouldExecuteInSequenceOnVmware() {
return shouldExecuteInSequenceOnVmware(null, null);

View File

@ -42,7 +42,7 @@ public interface VMSnapshotManager extends VMSnapshotService, Manager {
boolean deleteAllVMSnapshots(long id, VMSnapshot.Type type);
/**
* Sync VM snapshot state when VM snapshot in reverting or snapshoting or expunging state
* Sync VM snapshot state when VM snapshot in reverting or snapshotting or expunging state
* Used for fullsync after agent connects
*
* @param vm, the VM in question

View File

@ -159,7 +159,7 @@ public class Upgrade41810to41900 extends DbUpgradeAbstractImpl implements DbUpgr
try (PreparedStatement pstmt = conn.prepareStatement(createNewColumn)) {
pstmt.execute();
} catch (SQLException e) {
String message = String.format("Unable to crate new backups' column date due to [%s].", e.getMessage());
String message = String.format("Unable to create new backups' column date due to [%s].", e.getMessage());
logger.error(message, e);
throw new CloudRuntimeException(message, e);
}

View File

@ -1267,8 +1267,8 @@ namespace CloudStack.Plugin.WmiWrappers.ROOT.VIRTUALIZATION.V2 {
[Browsable(true)]
[DesignerSerializationVisibility(DesignerSerializationVisibility.Hidden)]
[Description("The current VMQ offloading usage on this port. The usage is the amount of VMQ res" +
"ources in use on the port.")]
[Description("The current VMQ offloading usage on this port. The usage is the amount of VMQ " +
"resources in use on the port.")]
[TypeConverter(typeof(WMIValueTypeConverter))]
public uint VMQOffloadUsage {
get {

View File

@ -618,10 +618,9 @@ namespace CloudStack.Plugin.WmiWrappers.ROOT.VIRTUALIZATION.V2 {
[Browsable(true)]
[DesignerSerializationVisibility(DesignerSerializationVisibility.Hidden)]
[Description("If this property is true, then this Ethernet port can be connected to the switche" +
"s and thus can provide connectivity to virtual machine. If this property is fals" +
"e, then this Ethernet is not being used by the virtual machine networking archit" +
"ecture.")]
[Description("If this property is true, then this Ethernet port can be connected to the switches" +
" and thus can provide connectivity to virtual machine. If this property is false" +
", then this Ethernet is not being used by the virtual machine networking architecture.")]
[TypeConverter(typeof(WMIValueTypeConverter))]
public bool IsBound {
get {

View File

@ -4605,7 +4605,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
vmMo.mountToolsInstaller();
logger.debug(String.format("Successfully re-mounted vmware tools installer for :[%s].", cmd.getVmName()));
} catch (Exception e) {
logger.error(String.format("Unabled to re-mount vmware tools installer for: [%s].", cmd.getVmName()), e);
logger.error(String.format("Unable to re-mount vmware tools installer for: [%s].", cmd.getVmName()), e);
}
}
}
@ -7072,7 +7072,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
if (s_serviceContext.get() != null) {
context = s_serviceContext.get();
String poolKey = VmwareContextPool.composePoolKey(_vCenterAddress, _username);
// Before re-using the thread local context, ensure it corresponds to the right vCenter API session and that it is valid to make calls.
// Before reusing the thread local context, ensure it corresponds to the right vCenter API session and that it is valid to make calls.
if (context.getPoolKey().equals(poolKey)) {
if (context.validate()) {
if (logger.isTraceEnabled()) {

View File

@ -998,11 +998,11 @@ public class VmwareStorageProcessor implements StorageProcessor {
long wait, String nfsVersion) throws Exception {
String volumeFolder;
String volumeName;
String sufix = ".ova";
String suffix = ".ova";
int index = srcVolumePath.lastIndexOf(File.separator);
if (srcVolumePath.endsWith(sufix)) {
if (srcVolumePath.endsWith(suffix)) {
volumeFolder = srcVolumePath.substring(0, index);
volumeName = srcVolumePath.substring(index + 1).replace(sufix, "");
volumeName = srcVolumePath.substring(index + 1).replace(suffix, "");
} else {
volumeFolder = srcVolumePath;
volumeName = srcVolumePath.substring(index + 1);
@ -3790,16 +3790,16 @@ public class VmwareStorageProcessor implements StorageProcessor {
}
DatastoreMO primaryDsMo = new DatastoreMO(hyperHost.getContext(), morPrimaryDs);
boolean isDatastoreStoragePolicyComplaint = primaryDsMo.isDatastoreStoragePolicyComplaint(storagePolicyId);
boolean isDatastoreStoragePolicyCompliant = primaryDsMo.isDatastoreStoragePolicyCompliant(storagePolicyId);
String failedMessage = String.format("DataStore %s is not compliance with storage policy id %s", primaryStorageNameLabel, storagePolicyId);
if (!isDatastoreStoragePolicyComplaint)
return new Answer(cmd, isDatastoreStoragePolicyComplaint, failedMessage);
String failedMessage = String.format("DataStore %s is not compliant with storage policy id %s", primaryStorageNameLabel, storagePolicyId);
if (!isDatastoreStoragePolicyCompliant)
return new Answer(cmd, isDatastoreStoragePolicyCompliant, failedMessage);
else
return new Answer(cmd, isDatastoreStoragePolicyComplaint, null);
return new Answer(cmd, isDatastoreStoragePolicyCompliant, null);
} catch (Throwable e) {
hostService.createLogMessageException(e, cmd);
String details = String.format("Exception while checking if datastore [%s] is storage policy [%s] compliance due to: [%s]", primaryStorageNameLabel, storagePolicyId, VmwareHelper.getExceptionMessage(e));
String details = String.format("Exception while checking if datastore [%s] is storage policy [%s] compliant due to: [%s]", primaryStorageNameLabel, storagePolicyId, VmwareHelper.getExceptionMessage(e));
return new Answer(cmd, false, details);
}
}

View File

@ -2908,7 +2908,7 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S
* within a XenServer that's under CloudStack control.
*
* - Native Networks: these are networks that are untagged on the XenServer
* and are used to crate VLAN networks on. These are created by the user and
* and are used to create VLAN networks on. These are created by the user and
* is assumed to be one per cluster. - VLAN Networks: these are dynamically
* created by CloudStack and can have problems with duplicated names. -
* LinkLocal Networks: these are dynamically created by CloudStack and can
@ -3773,7 +3773,7 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S
Host host = pbd.getHost(conn);
if (!isRefNull(host) && StringUtils.equals(host.getUuid(conn), _host.getUuid())) {
if (!pbd.getCurrentlyAttached(conn)) {
logger.debug(String.format("PBD [%s] of local SR [%s] was unplugged, pluggin it now", pbd.getUuid(conn), srRec.uuid));
logger.debug(String.format("PBD [%s] of local SR [%s] was unplugged, plugging it in now", pbd.getUuid(conn), srRec.uuid));
pbd.plug(conn);
}
logger.debug("Scanning local SR: " + srRec.uuid);

View File

@ -220,7 +220,7 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
srUuid = sr.getUuid(conn);
Set<PBD> pbDs = sr.getPBDs(conn);
for (PBD pbd : pbDs) {
logger.debug(String.format("Unpluging PBD [%s] of SR [%s] as it is not working properly.", pbd.getUuid(conn), srUuid));
logger.debug(String.format("Unplugging PBD [%s] of SR [%s] as it is not working properly.", pbd.getUuid(conn), srUuid));
unplugPbd(conn, pbd);
}
logger.debug(String.format("Forgetting SR [%s] as it is not working properly.", srUuid));
@ -239,7 +239,7 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
pbdUuid = pbd.getUuid(conn);
pbd.unplug(conn);
} catch (XenAPIException | XmlRpcException e) {
throw new CloudRuntimeException(String.format("Exception while unpluging PBD [%s].", pbdUuid));
throw new CloudRuntimeException(String.format("Exception while unplugging PBD [%s].", pbdUuid));
}
}

View File

@ -3034,7 +3034,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
mgmtPhyNetwork = _networkModel.getDefaultPhysicalNetworkByZoneAndTrafficType(zoneId, TrafficType.Management);
if (NetworkType.Advanced == zone.getNetworkType() && !zone.isSecurityGroupEnabled()) {
// advanced zone without SG should have a physical
// network with public Thpe
// network with public Type
_networkModel.getDefaultPhysicalNetworkByZoneAndTrafficType(zoneId, TrafficType.Public);
}

View File

@ -156,7 +156,7 @@ import com.google.gson.JsonParseException;
import static com.cloud.vm.VirtualMachineManager.SystemVmEnableUserData;
/**
* Class to manage console proxys. <br><br>
* Class to manage console proxies. <br><br>
* Possible console proxy state transition cases:<br>
* - Stopped -> Starting -> Running <br>
* - HA -> Stopped -> Starting -> Running <br>
@ -569,7 +569,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy
if (!allowToLaunchNew(dataCenterId)) {
String configKey = ConsoleProxyLaunchMax.key();
Integer configValue = ConsoleProxyLaunchMax.valueIn(dataCenterId);
logger.warn(String.format("The number of launched console proxys on zone [%s] has reached the limit [%s]. Limit set in [%s].", dataCenterId, configValue, configKey));
logger.warn(String.format("The number of launched console proxies on zone [%s] has reached the limit [%s]. Limit set in [%s].", dataCenterId, configValue, configKey));
return null;
}

View File

@ -240,7 +240,7 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis
}
to.setNicSecIps(secIps);
} else {
logger.warn("Unabled to load NicVO for NicProfile {}", profile);
logger.warn("Unable to load NicVO for NicProfile {}", profile);
//Workaround for dynamically created nics
//FixMe: uuid and secondary IPs can be made part of nic profile
to.setUuid(UUID.randomUUID().toString());

View File

@ -342,7 +342,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase
for (ExternalLoadBalancerDeviceVO lbDevice : lbDevices) {
if (lbDevice.getParentHostId() == hostId) {
throw new CloudRuntimeException(
"This load balancer device can not be deleted as there are one or more load balancers applainces provisioned by cloudstack on the device.");
"This load balancer device can not be deleted as there are one or more load balancers appliances provisioned by cloudstack on the device.");
}
}
}

View File

@ -667,7 +667,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules
}
} else if (ipAddress.getAssociatedWithVmId() != null && ipAddress.getAssociatedWithVmId().longValue() != vmId) {
throw new NetworkRuleConflictException("Failed to enable static for the ip address " + ipAddress + " and vm id=" + vmId +
" as it's already assigned to antoher vm");
" as it's already assigned to another vm");
}
//check whether the vm ip is already associated with any public ip address

View File

@ -213,7 +213,7 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation {
_storagePoolWorkDao.persist(work);
} catch (Exception e) {
if (logger.isDebugEnabled()) {
logger.debug("Work record already exists, re-using by re-setting values");
logger.debug("Work record already exists, reusing by re-setting values");
}
StoragePoolWorkVO work = _storagePoolWorkDao.findByPoolIdAndVmId(pool.getId(), vmInstance.getId());
work.setStartedAfterMaintenance(false);

View File

@ -423,7 +423,7 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown
}
/**
* Return pretified PEM certificate
* Return prettified PEM certificate
*/
protected String getPretifiedCertificate(String certificateCer) {
String cert = certificateCer.replaceAll("(.{64})", "$1\n");

View File

@ -197,7 +197,7 @@ public class ConsoleProxyThumbnailHandler implements HttpHandler {
startx = 0;
g.drawString(text, startx, h / 2);
} catch (Throwable e) {
logger.warn("Problem in generating text to thumnail image, return blank image");
logger.warn("Problem in generating text to thumbnail image, return blank image");
}
return img;
}

View File

@ -144,7 +144,7 @@ fw_entry() {
local lb_vif_list=$(get_lb_vif_list)
for vif in $lb_vif_list; do
#TODO : The below delete will be used only when we upgrade the from older version to the newer one , the below delete become obsolute in the future.
#TODO : The below delete will be used only when we upgrade the from older version to the newer one, the below delete become obsolete in the future.
sudo iptables -D INPUT -i $vif -p tcp -d $pubIp --dport $dport -j ACCEPT 2> /dev/null
sudo iptables -A load_balancer_$vif -p tcp -d $pubIp --dport $dport -j ACCEPT
@ -161,7 +161,7 @@ fw_entry() {
sudo iptables -A lb_stats -s $cidrs -p tcp -m state --state NEW -d $pubIp --dport $dport -j ACCEPT
#TODO : The below delete in the for-loop will be used only when we upgrade the from older version to the newer one , the below delete become obsolute in the future.
#TODO : The below delete in the for-loop will be used only when we upgrade the from older version to the newer one, the below delete become obsolete in the future.
for i in $r
do
local pubIp=$(echo $i | cut -d: -f1)

View File

@ -575,7 +575,7 @@ class TestBaseImageUpdate(cloudstackTestCase):
)
sleep_seconds = (self.services["recurring_snapshot"]["schedule"]) * 3600 + 600
sleep_minutes = sleep_seconds/60
self.debug("Sleeping for %s minutes till the volume is snapshoted" %sleep_minutes)
self.debug("Sleeping for %s minutes till the volume is snapshotted" %sleep_minutes)
time.sleep(sleep_seconds)
retriesCount = self.services["retriesCount"]

View File

@ -1165,8 +1165,8 @@ class TestInvalidAccountAuthroize(cloudstackTestCase):
return
@attr(tags = ["sg", "eip", "advancedsg"])
def test_invalid_account_authroize(self):
"""Test invalid account authroize
def test_invalid_account_authorize(self):
"""Test invalid account authorize
"""

View File

@ -4341,7 +4341,7 @@ class TestIpAddresses(cloudstackTestCase):
vmgroup_disabled,
"Failed to disable Autoscale VM group"
)
# Verifyign the state of the VM Group afte renabling
# Verifying the state of the VM Group after renabling
self.assertEqual(
"disabled",
vmgroup_disabled.state,
@ -4375,7 +4375,7 @@ class TestIpAddresses(cloudstackTestCase):
vmgroup_enabled,
"Failed to enable Autoscale VM group"
)
# Verifyign the state of the VM Group afte renabling
# Verifying the state of the VM Group after renabling
self.assertEqual(
"enabled",
vmgroup_enabled.state,

View File

@ -15,7 +15,7 @@
# specific language governing permissions and limitations
# under the License.
""" P1 for stopped Virtual Maschine life cycle
""" P1 for stopped Virtual Machine life cycle
"""
# Import Local Modules
from nose.plugins.attrib import attr

View File

@ -440,5 +440,5 @@ class TestMigrateVolumeToAnotherPool(cloudstackTestCase):
self.assertEqual(
self.random_data_0,
result[0],
"Check the random data is equal with the ramdom file!"
"Check the random data is equal with the random file!"
)

View File

@ -472,7 +472,7 @@ class TestEncryptedVolumes(cloudstackTestCase):
self.assertEqual(
self.random_data_0,
result[0],
"Check the random data is equal with the ramdom file!"
"Check the random data is equal with the random file!"
)
# Delete VM snapshot

View File

@ -1136,7 +1136,7 @@ class TestStoragePool(cloudstackTestCase):
self.assertEqual(
self.random_data_0,
result[0],
"Check the random data is equal with the ramdom file!"
"Check the random data is equal with the random file!"
)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")

View File

@ -465,7 +465,7 @@ class TestStoragePool(cloudstackTestCase):
self.assertEqual(
self.random_data_0,
result[0],
"Check the random data is equal with the ramdom file!"
"Check the random data is equal with the random file!"
)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")

View File

@ -336,7 +336,7 @@ class TestVmSnapshot(cloudstackTestCase):
self.assertEqual(
self.random_data_0,
result[0],
"Check the random data is equal with the ramdom file!"
"Check the random data is equal with the random file!"
)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")

View File

@ -262,7 +262,7 @@ class TestVmSnapshot(cloudstackTestCase):
self.assertEqual(
self.random_data_0,
result[0],
"Check the random data is equal with the ramdom file!"
"Check the random data is equal with the random file!"
)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")

View File

@ -988,7 +988,7 @@ class TestVolumes(cloudstackTestCase):
return True, list_volume_response[0]
return False, None
# sleep interval is 1s, retries is 360, this will sleep atmost 360 seconds, or 6 mins
# sleep interval is 1s, retries is 360, this will sleep at most 360 seconds, or 6 mins
res, response = wait_until(1, 360, checkVolumeResponse)
if not res:
self.fail("Failed to return root volume response")

View File

@ -2809,7 +2809,7 @@ class TestLiveStorageMigration(cloudstackTestCase):
disk=root_volume_cluster,
disk_type="rootdiskdevice")
# Get Destnation Pool
# Get Destination Pool
# Avoid storage Pool on which ROOT disk exists
storagePools_to_avoid = [root_volume_cluster.storage]
@ -2840,7 +2840,7 @@ class TestLiveStorageMigration(cloudstackTestCase):
disk=data_volume_1_cluster,
disk_type="datadiskdevice_1")
# Get Destnation Pool
# Get Destination Pool
# Avoid storage Pool allocated for ROOT disk, and Pool on which DATA
# disk1 exists
storagePools_to_avoid = [
@ -3021,7 +3021,7 @@ class TestLiveStorageMigration(cloudstackTestCase):
disk=root_volume_cluster,
disk_type="rootdiskdevice")
# Get Destnation Pool
# Get Destination Pool
# Avoid storage Pool on which ROOT disk exists
storagePools_to_avoid = [root_volume_cluster.storage]
@ -3052,7 +3052,7 @@ class TestLiveStorageMigration(cloudstackTestCase):
disk=data_volume_1_cluster,
disk_type="datadiskdevice_1")
# Get Destnation Pool
# Get Destination Pool
# Avoid storage Pool allocated for ROOT disk, and Pool on which DATA
# disk1 exists

View File

@ -60,7 +60,7 @@ deploy_server() {
if [ $? -gt 0 ]; then echo "failed to setup db.properties file on remote $1"; return 2; fi
#ssh root@$1 "cd /root/cloudstack-oss && nohup ant run &"
#if [ $? -gt 0 ]; then echo "failed to start the softare on remote $1"; return 2; fi
#if [ $? -gt 0 ]; then echo "failed to start the software on remote $1"; return 2; fi
echo "Remote management server is deployed as a part of cluster setup; you have to start it manually by logging in remotely"
}

View File

@ -164,7 +164,7 @@ def cleanPrimaryStorage(cscfg):
def seedSecondaryStorage(cscfg, hypervisor):
"""
erase secondary store and seed system VM template via puppet. The
secseeder.sh script is executed on mgmt server bootup which will mount and
secseeder.sh script is executed on mgmt server boot up which will mount and
place the system VM templates on the NFS
"""
mgmt_server = cscfg.mgtSvr[0].mgtSvrIp

File diff suppressed because one or more lines are too long

View File

@ -75,7 +75,7 @@ public class BasicRestClient implements RestClient {
final URI uri = request.getURI();
String query = uri.getQuery();
query = query != null ? "?" + query : "";
logger.debug("Executig " + request.getMethod() + " request on " + clientContext.getTargetHost() + uri.getPath() + query);
logger.debug("Executing " + request.getMethod() + " request on " + clientContext.getTargetHost() + uri.getPath() + query);
}
@Override

View File

@ -451,7 +451,7 @@ public class DatastoreMO extends BaseMO {
return isAccessible;
}
public boolean isDatastoreStoragePolicyComplaint(String storagePolicyId) throws Exception {
public boolean isDatastoreStoragePolicyCompliant(String storagePolicyId) throws Exception {
PbmProfileManagerMO profMgrMo = new PbmProfileManagerMO(_context);
PbmProfile profile = profMgrMo.getStorageProfile(storagePolicyId);