mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Fix spelling (#6064)
* Fix spelling - `interupted` to `interrupted` - `paramter` to `parameter` * Fix more typos
This commit is contained in:
parent
7facd49d33
commit
6401c850b7
@ -119,11 +119,11 @@ domr.scripts.dir=scripts/network/domr/kvm
|
||||
# agent.hooks.libvirt_vm_xml_transformer.script=libvirt-vm-xml-transformer.groovy
|
||||
# agent.hooks.libvirt_vm_xml_transformer.method=transform
|
||||
#
|
||||
# The hook is called right after libvirt successfuly launched VM
|
||||
# The hook is called right after libvirt successfully launched VM
|
||||
# agent.hooks.libvirt_vm_on_start.script=libvirt-vm-state-change.groovy
|
||||
# agent.hooks.libvirt_vm_on_start.method=onStart
|
||||
#
|
||||
# The hook is called right after libvirt successfuly stopped VM
|
||||
# The hook is called right after libvirt successfully stopped VM
|
||||
# agent.hooks.libvirt_vm_on_stop.script=libvirt-vm-state-change.groovy
|
||||
# agent.hooks.libvirt_vm_on_stop.method=onStop
|
||||
#
|
||||
|
||||
@ -513,7 +513,7 @@ public class AgentShell implements IAgentShell, Daemon {
|
||||
while (!_exit)
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] AgentShell was interupted.");
|
||||
s_logger.debug("[ignored] AgentShell was interrupted.");
|
||||
}
|
||||
|
||||
} catch (final ConfigurationException e) {
|
||||
|
||||
@ -607,7 +607,7 @@ public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityG
|
||||
|
||||
public Map<Long, DiskOffering> getDataDiskTemplateToDiskOfferingMap() {
|
||||
if (diskOfferingId != null && dataDiskTemplateToDiskOfferingList != null) {
|
||||
throw new InvalidParameterValueException("diskofferingid paramter can't be specified along with datadisktemplatetodiskofferinglist parameter");
|
||||
throw new InvalidParameterValueException("diskofferingid parameter can't be specified along with datadisktemplatetodiskofferinglist parameter");
|
||||
}
|
||||
if (MapUtils.isEmpty(dataDiskTemplateToDiskOfferingList)) {
|
||||
return new HashMap<Long, DiskOffering>();
|
||||
|
||||
@ -102,7 +102,7 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase<VmRulesetLogVO, Long> im
|
||||
try {
|
||||
Thread.sleep(delayMs);
|
||||
} catch (InterruptedException ie) {
|
||||
s_logger.debug("[ignored] interupted while inserting security group rule log.");
|
||||
s_logger.debug("[ignored] interrupted while inserting security group rule log.");
|
||||
}
|
||||
} else
|
||||
s_logger.warn("Caught another deadlock exception while retrying inserting security group rule log, giving up");
|
||||
|
||||
@ -275,7 +275,7 @@ public class StorageCacheManagerImpl implements StorageCacheManager, Manager {
|
||||
try {
|
||||
lock.wait(miliSeconds);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while waiting for cache copy completion.");
|
||||
s_logger.debug("[ignored] interrupted while waiting for cache copy completion.");
|
||||
}
|
||||
s_logger.debug("waken up");
|
||||
|
||||
|
||||
@ -171,7 +171,7 @@ public class ClusterServiceServletContainer {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e1) {
|
||||
s_logger.debug("[ignored] interupted while waiting to retry running the servlet container.");
|
||||
s_logger.debug("[ignored] interrupted while waiting to retry running the servlet container.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -144,7 +144,7 @@ public class Merovingian2 extends StandardMBean implements MerovingianMBean {
|
||||
}
|
||||
Thread.sleep(5000);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while aquiring " + key);
|
||||
s_logger.debug("[ignored] interrupted while aquiring " + key);
|
||||
}
|
||||
}
|
||||
String msg = "Timed out on acquiring lock " + key + " . Waited for " + ((InaccurateClock.getTime() - startTime)/1000) + "seconds";
|
||||
|
||||
@ -54,7 +54,7 @@ public class GlobalLockTest {
|
||||
Thread.sleep(jobDuration * 1000);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while testing global lock.");
|
||||
s_logger.debug("[ignored] interrupted while testing global lock.");
|
||||
} finally {
|
||||
if (locked) {
|
||||
boolean unlocked = WorkLock.unlock();
|
||||
|
||||
@ -326,7 +326,7 @@ public class MessageBusBase implements MessageBus {
|
||||
try {
|
||||
wait();
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while guarding re-entrance on message bus.");
|
||||
s_logger.debug("[ignored] interrupted while guarding re-entrance on message bus.");
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
|
||||
@ -41,7 +41,7 @@ public class MessageDetector implements MessageSubscriber {
|
||||
try {
|
||||
wait(timeoutInMiliseconds);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while waiting on any message.");
|
||||
s_logger.debug("[ignored] interrupted while waiting on any message.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -84,7 +84,7 @@ public class StaticStrategy implements BalanceStrategy {
|
||||
try {
|
||||
Thread.sleep(250);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while fail over in progres.");
|
||||
s_logger.debug("[ignored] interrupted while fail over in progres.");
|
||||
}
|
||||
|
||||
// start fresh
|
||||
|
||||
@ -303,7 +303,7 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource
|
||||
try {
|
||||
TimeUnit.SECONDS.sleep(1);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while waiting to retry running script.");
|
||||
s_logger.debug("[ignored] interrupted while waiting to retry running script.");
|
||||
}
|
||||
continue;
|
||||
} else if (res == null) {
|
||||
|
||||
@ -2371,7 +2371,7 @@ public class HypervDirectConnectResource extends ServerResourceBase implements S
|
||||
try {
|
||||
Thread.sleep(5000);
|
||||
} catch (final InterruptedException ex) {
|
||||
s_logger.debug("[ignored] interupted while waiting to retry connecting to vm after exception: "+e.getLocalizedMessage());
|
||||
s_logger.debug("[ignored] interrupted while waiting to retry connecting to vm after exception: "+e.getLocalizedMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2379,7 +2379,7 @@ public class HypervDirectConnectResource extends ServerResourceBase implements S
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (final InterruptedException ex) {
|
||||
s_logger.debug("[ignored] interupted while connecting to vm.");
|
||||
s_logger.debug("[ignored] interrupted while connecting to vm.");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -104,7 +104,7 @@ public final class LibvirtBackupSnapshotCommandWrapper extends CommandWrapper<Ba
|
||||
r.confSet("key", primaryPool.getAuthSecret());
|
||||
r.confSet("client_mount_timeout", "30");
|
||||
r.connect();
|
||||
s_logger.debug("Succesfully connected to Ceph cluster at " + r.confGet("mon_host"));
|
||||
s_logger.debug("Successfully connected to Ceph cluster at " + r.confGet("mon_host"));
|
||||
|
||||
final IoCTX io = r.ioCtxCreate(primaryPool.getSourceDir());
|
||||
final Rbd rbd = new Rbd(io);
|
||||
|
||||
@ -121,7 +121,7 @@ public final class LibvirtManageSnapshotCommandWrapper extends CommandWrapper<Ma
|
||||
r.confSet("key", primaryPool.getAuthSecret());
|
||||
r.confSet("client_mount_timeout", "30");
|
||||
r.connect();
|
||||
s_logger.debug("Succesfully connected to Ceph cluster at " + r.confGet("mon_host"));
|
||||
s_logger.debug("Successfully connected to Ceph cluster at " + r.confGet("mon_host"));
|
||||
|
||||
final IoCTX io = r.ioCtxCreate(primaryPool.getSourceDir());
|
||||
final Rbd rbd = new Rbd(io);
|
||||
|
||||
@ -159,7 +159,7 @@ public class IscsiStorageCleanupMonitor implements Runnable{
|
||||
try {
|
||||
Thread.sleep(CLEANUP_INTERVAL_SEC * 1000);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted between heartbeats.");
|
||||
s_logger.debug("[ignored] interrupted between heartbeats.");
|
||||
}
|
||||
|
||||
Thread monitorThread = new Thread(new Monitor());
|
||||
@ -167,7 +167,7 @@ public class IscsiStorageCleanupMonitor implements Runnable{
|
||||
try {
|
||||
monitorThread.join();
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted joining monitor.");
|
||||
s_logger.debug("[ignored] interrupted joining monitor.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -325,7 +325,7 @@ public class KVMStoragePoolManager {
|
||||
try {
|
||||
Thread.sleep(3000);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while trying to get storage pool.");
|
||||
s_logger.debug("[ignored] interrupted while trying to get storage pool.");
|
||||
}
|
||||
cnt++;
|
||||
}
|
||||
|
||||
@ -1604,7 +1604,7 @@ public class KVMStorageProcessor implements StorageProcessor {
|
||||
r.confSet(CEPH_AUTH_KEY, primaryPool.getAuthSecret());
|
||||
r.confSet(CEPH_CLIENT_MOUNT_TIMEOUT, CEPH_DEFAULT_MOUNT_TIMEOUT);
|
||||
r.connect();
|
||||
s_logger.debug("Succesfully connected to Ceph cluster at " + r.confGet(CEPH_MON_HOST));
|
||||
s_logger.debug("Successfully connected to Ceph cluster at " + r.confGet(CEPH_MON_HOST));
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
@ -509,7 +509,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
pool.setUsed(storage.getInfo().allocation);
|
||||
pool.setAvailable(storage.getInfo().available);
|
||||
|
||||
s_logger.debug("Succesfully refreshed pool " + uuid +
|
||||
s_logger.debug("Successfully refreshed pool " + uuid +
|
||||
" Capacity: " + toHumanReadableSize(storage.getInfo().capacity) +
|
||||
" Used: " + toHumanReadableSize(storage.getInfo().allocation) +
|
||||
" Available: " + toHumanReadableSize(storage.getInfo().available));
|
||||
@ -721,7 +721,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
s.free();
|
||||
}
|
||||
|
||||
s_logger.info("Storage pool " + uuid + " was succesfully removed from libvirt.");
|
||||
s_logger.info("Storage pool " + uuid + " was successfully removed from libvirt.");
|
||||
|
||||
return true;
|
||||
} catch (LibvirtException e) {
|
||||
@ -908,7 +908,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
r.confSet("key", pool.getAuthSecret());
|
||||
r.confSet("client_mount_timeout", "30");
|
||||
r.connect();
|
||||
s_logger.debug("Succesfully connected to Ceph cluster at " + r.confGet("mon_host"));
|
||||
s_logger.debug("Successfully connected to Ceph cluster at " + r.confGet("mon_host"));
|
||||
|
||||
IoCTX io = r.ioCtxCreate(pool.getSourceDir());
|
||||
Rbd rbd = new Rbd(io);
|
||||
@ -926,7 +926,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
s_logger.debug("Removing snapshot " + pool.getSourceDir() + "/" + uuid + "@" + snap.name);
|
||||
image.snapRemove(snap.name);
|
||||
}
|
||||
s_logger.info("Succesfully unprotected and removed any remaining snapshots (" + snaps.size() + ") of "
|
||||
s_logger.info("Successfully unprotected and removed any remaining snapshots (" + snaps.size() + ") of "
|
||||
+ pool.getSourceDir() + "/" + uuid + " Continuing to remove the RBD image");
|
||||
} catch (RbdException e) {
|
||||
s_logger.error("Failed to remove snapshot with exception: " + e.toString() +
|
||||
@ -1105,7 +1105,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
r.confSet("key", srcPool.getAuthSecret());
|
||||
r.confSet("client_mount_timeout", "30");
|
||||
r.connect();
|
||||
s_logger.debug("Succesfully connected to Ceph cluster at " + r.confGet("mon_host"));
|
||||
s_logger.debug("Successfully connected to Ceph cluster at " + r.confGet("mon_host"));
|
||||
|
||||
IoCTX io = r.ioCtxCreate(srcPool.getSourceDir());
|
||||
Rbd rbd = new Rbd(io);
|
||||
@ -1154,7 +1154,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
}
|
||||
|
||||
rbd.clone(template.getName(), rbdTemplateSnapName, io, disk.getName(), RBD_FEATURES, rbdOrder);
|
||||
s_logger.debug("Succesfully cloned " + template.getName() + "@" + rbdTemplateSnapName + " to " + disk.getName());
|
||||
s_logger.debug("Successfully cloned " + template.getName() + "@" + rbdTemplateSnapName + " to " + disk.getName());
|
||||
/* We also need to resize the image if the VM was deployed with a larger root disk size */
|
||||
if (disk.getVirtualSize() > template.getVirtualSize()) {
|
||||
RbdImage diskImage = rbd.open(disk.getName());
|
||||
@ -1176,14 +1176,14 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
rSrc.confSet("key", srcPool.getAuthSecret());
|
||||
rSrc.confSet("client_mount_timeout", "30");
|
||||
rSrc.connect();
|
||||
s_logger.debug("Succesfully connected to source Ceph cluster at " + rSrc.confGet("mon_host"));
|
||||
s_logger.debug("Successfully connected to source Ceph cluster at " + rSrc.confGet("mon_host"));
|
||||
|
||||
Rados rDest = new Rados(destPool.getAuthUserName());
|
||||
rDest.confSet("mon_host", destPool.getSourceHost() + ":" + destPool.getSourcePort());
|
||||
rDest.confSet("key", destPool.getAuthSecret());
|
||||
rDest.confSet("client_mount_timeout", "30");
|
||||
rDest.connect();
|
||||
s_logger.debug("Succesfully connected to source Ceph cluster at " + rDest.confGet("mon_host"));
|
||||
s_logger.debug("Successfully connected to source Ceph cluster at " + rDest.confGet("mon_host"));
|
||||
|
||||
IoCTX sIO = rSrc.ioCtxCreate(srcPool.getSourceDir());
|
||||
Rbd sRbd = new Rbd(sIO);
|
||||
@ -1343,7 +1343,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
|
||||
s_logger.debug("Starting copy from source image " + srcFile.getFileName() + " to RBD image " + rbdDestPath);
|
||||
qemu.convert(srcFile, destFile);
|
||||
s_logger.debug("Succesfully converted source image " + srcFile.getFileName() + " to RBD image " + rbdDestPath);
|
||||
s_logger.debug("Successfully converted source image " + srcFile.getFileName() + " to RBD image " + rbdDestPath);
|
||||
|
||||
/* We have to stat the RBD image to see how big it became afterwards */
|
||||
Rados r = new Rados(destPool.getAuthUserName());
|
||||
@ -1351,7 +1351,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
r.confSet("key", destPool.getAuthSecret());
|
||||
r.confSet("client_mount_timeout", "30");
|
||||
r.connect();
|
||||
s_logger.debug("Succesfully connected to Ceph cluster at " + r.confGet("mon_host"));
|
||||
s_logger.debug("Successfully connected to Ceph cluster at " + r.confGet("mon_host"));
|
||||
|
||||
IoCTX io = r.ioCtxCreate(destPool.getSourceDir());
|
||||
Rbd rbd = new Rbd(io);
|
||||
|
||||
@ -271,7 +271,7 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor {
|
||||
|
||||
LOGGER.debug("Starting copy from source disk image " + srcFile.getFileName() + " to PowerFlex volume: " + destDisk.getPath());
|
||||
qemu.convert(srcFile, destFile, true);
|
||||
LOGGER.debug("Succesfully converted source disk image " + srcFile.getFileName() + " to PowerFlex volume: " + destDisk.getPath());
|
||||
LOGGER.debug("Successfully converted source disk image " + srcFile.getFileName() + " to PowerFlex volume: " + destDisk.getPath());
|
||||
} catch (QemuImgException | LibvirtException e) {
|
||||
try {
|
||||
Map<String, String> srcInfo = qemu.info(srcFile);
|
||||
@ -373,7 +373,7 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor {
|
||||
LOGGER.debug("Starting copy from source downloaded template " + srcFile.getFileName() + " to PowerFlex template volume: " + destDisk.getPath());
|
||||
QemuImg qemu = new QemuImg(timeout);
|
||||
qemu.convert(srcFile, destFile);
|
||||
LOGGER.debug("Succesfully converted source downloaded template " + srcFile.getFileName() + " to PowerFlex template volume: " + destDisk.getPath());
|
||||
LOGGER.debug("Successfully converted source downloaded template " + srcFile.getFileName() + " to PowerFlex template volume: " + destDisk.getPath());
|
||||
} catch (QemuImgException | LibvirtException e) {
|
||||
LOGGER.error("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + e.getMessage(), e);
|
||||
destDisk = null;
|
||||
|
||||
@ -1158,7 +1158,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
Thread.currentThread();
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while trying to get mac.");
|
||||
s_logger.debug("[ignored] interrupted while trying to get mac.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -6496,7 +6496,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
try {
|
||||
Thread.sleep(5000);
|
||||
} catch (InterruptedException ex) {
|
||||
s_logger.debug("[ignored] interupted while waiting to retry connect after failure.", e);
|
||||
s_logger.debug("[ignored] interrupted while waiting to retry connect after failure.", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -6504,7 +6504,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException ex) {
|
||||
s_logger.debug("[ignored] interupted while waiting to retry connect.");
|
||||
s_logger.debug("[ignored] interrupted while waiting to retry connect.");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -853,7 +853,7 @@ public class XenServerStorageProcessor implements StorageProcessor {
|
||||
|
||||
VDI.Record vdir;
|
||||
vdir = vdi.getRecord(conn);
|
||||
s_logger.debug("Succesfully created VDI: Uuid = " + vdir.uuid);
|
||||
s_logger.debug("Successfully created VDI: Uuid = " + vdir.uuid);
|
||||
|
||||
final VolumeObjectTO newVol = new VolumeObjectTO();
|
||||
newVol.setName(vdir.nameLabel);
|
||||
|
||||
@ -72,7 +72,7 @@ public final class CitrixCreateCommandWrapper extends CommandWrapper<CreateComma
|
||||
VDI.Record vdir;
|
||||
vdir = vdi.getRecord(conn);
|
||||
|
||||
s_logger.debug("Succesfully created VDI for " + command + ". Uuid = " + vdir.uuid);
|
||||
s_logger.debug("Successfully created VDI for " + command + ". Uuid = " + vdir.uuid);
|
||||
|
||||
final VolumeTO vol =
|
||||
new VolumeTO(command.getVolumeId(), dskch.getType(), pool.getType(), pool.getUuid(), vdir.nameLabel, pool.getPath(), vdir.uuid, vdir.virtualSize, null);
|
||||
|
||||
@ -961,7 +961,7 @@ public class NetscalerResource implements ServerResource {
|
||||
try {
|
||||
Thread.sleep(10000);
|
||||
} catch (final InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while waiting for netscaler to be 'up'.");
|
||||
s_logger.debug("[ignored] interrupted while waiting for netscaler to be 'up'.");
|
||||
}
|
||||
final ns refreshNsObj = new ns();
|
||||
refreshNsObj.set_id(newVpx.get_id());
|
||||
|
||||
@ -964,7 +964,7 @@ public class ElastistorUtil {
|
||||
public List<Pair<String, String>> getCommandParameters();
|
||||
|
||||
/*
|
||||
* Adds new key-value pair to the query paramters lists.
|
||||
* Adds new key-value pair to the query parameters lists.
|
||||
*/
|
||||
public void putCommandParameter(String key, String value);
|
||||
|
||||
|
||||
@ -206,15 +206,15 @@ done
|
||||
|
||||
#install_cloud_agent $dflag
|
||||
#install_cloud_consoleP $dflag
|
||||
paramters=
|
||||
parameters=
|
||||
if [ -n "$pubNic" ]
|
||||
then
|
||||
paramters=" --pubNic=$pubNic"
|
||||
parameters=" --pubNic=$pubNic"
|
||||
fi
|
||||
|
||||
if [ -n "$prvNic" ]
|
||||
then
|
||||
paramters=" --prvNic=$prvNic $paramters"
|
||||
parameters=" --prvNic=$prvNic $parameters"
|
||||
fi
|
||||
|
||||
selenabled=`cat /selinux/enforce`
|
||||
@ -224,5 +224,5 @@ then
|
||||
setenforce 0
|
||||
fi
|
||||
|
||||
cloudstack-setup-agent --host=$host --zone=$zone --pod=$pod --cluster=$cluster --guid=$guid $paramters -a > /dev/null
|
||||
cloudstack-setup-agent --host=$host --zone=$zone --pod=$pod --cluster=$cluster --guid=$guid $parameters -a > /dev/null
|
||||
#cloud_consoleP_setup $host $zone $pod
|
||||
|
||||
@ -1250,7 +1250,7 @@ public class ApiDBUtils {
|
||||
// Currently, KVM only supports RBD and PowerFlex images of type RAW.
|
||||
// This results in a weird collision with OVM volumes which
|
||||
// can only be raw, thus making KVM RBD volumes show up as OVM
|
||||
// rather than RBD. This block of code can (hopefuly) by checking to
|
||||
// rather than RBD. This block of code can (hopefully) by checking to
|
||||
// see if the pool is using either RBD or NFS. However, it isn't
|
||||
// quite clear what to do if both storage types are used. If the image
|
||||
// format is RAW, it narrows the hypervisor choice down to OVM and KVM / RBD or KVM / CLVM
|
||||
|
||||
@ -239,7 +239,7 @@ public class NetworkMigrationManagerImpl implements NetworkMigrationManager {
|
||||
assignRouterNicsToNewNetwork(network.getId(), networkCopyId);
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Succesfully created a copy of network " + originalNetwork.getName() + "(" + originalNetwork.getUuid() + ") id is " + originalNetwork.getId() + " for migration. The network copy has uuid " + network.getUuid() + " and id " + network.getId());
|
||||
s_logger.debug("Successfully created a copy of network " + originalNetwork.getName() + "(" + originalNetwork.getUuid() + ") id is " + originalNetwork.getId() + " for migration. The network copy has uuid " + network.getUuid() + " and id " + network.getId());
|
||||
}
|
||||
return networkCopyId;
|
||||
}
|
||||
@ -311,7 +311,7 @@ public class NetworkMigrationManagerImpl implements NetworkMigrationManager {
|
||||
reassignGatewayToNewVpc(vpcId, copyOfVpcId);
|
||||
copyVpcResourceTagsToNewVpc(vpcId, copyOfVpcId);
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Succesfully created a copy of network " + vpc.getName() + "(" + vpc.getUuid() + ") id is " + vpc.getId() + " for migration. The network copy has uuid " + copyVpcVO.getUuid() + " and id " + copyOfVpc.getId());
|
||||
s_logger.debug("Successfully created a copy of network " + vpc.getName() + "(" + vpc.getUuid() + ") id is " + vpc.getId() + " for migration. The network copy has uuid " + copyVpcVO.getUuid() + " and id " + copyOfVpc.getId());
|
||||
}
|
||||
} catch (ResourceAllocationException e) {
|
||||
throw new CloudRuntimeException(e.getMessage());
|
||||
|
||||
@ -175,7 +175,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian
|
||||
s_logger.warn("Failed to remove the router " + router + " from network " + network + " as a part of cleanup");
|
||||
}
|
||||
} else {
|
||||
s_logger.debug("Succesfully added router " + router + " to guest network " + network);
|
||||
s_logger.debug("Successfully added router " + router + " to guest network " + network);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1528,7 +1528,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules
|
||||
_ipAddrMgr.handleSystemIpRelease(ip);
|
||||
throw new CloudRuntimeException("Failed to enable static nat on system ip for the vm " + vm);
|
||||
} else {
|
||||
s_logger.warn("Succesfully enabled static nat on system ip " + ip + " for the vm " + vm);
|
||||
s_logger.warn("Successfully enabled static nat on system ip " + ip + " for the vm " + vm);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1141,7 +1141,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis
|
||||
// update the instance with removed flag only when the cleanup is
|
||||
// executed successfully
|
||||
if (_vpcDao.remove(vpc.getId())) {
|
||||
s_logger.debug("Vpc " + vpc + " is destroyed succesfully");
|
||||
s_logger.debug("Vpc " + vpc + " is destroyed successfully");
|
||||
return true;
|
||||
} else {
|
||||
s_logger.warn("Vpc " + vpc + " failed to destroy");
|
||||
@ -1433,7 +1433,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis
|
||||
for (final VpcProvider element : getVpcElements()) {
|
||||
if (providersToImplement.contains(element.getProvider())) {
|
||||
if (element.shutdownVpc(vpc, context)) {
|
||||
s_logger.debug("Vpc " + vpc + " has been shutdown succesfully");
|
||||
s_logger.debug("Vpc " + vpc + " has been shutdown successfully");
|
||||
} else {
|
||||
s_logger.warn("Vpc " + vpc + " failed to shutdown");
|
||||
success = false;
|
||||
@ -1961,7 +1961,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis
|
||||
}
|
||||
}
|
||||
if (success) {
|
||||
s_logger.debug("Private gateway " + gateway + " was applied succesfully on the backend");
|
||||
s_logger.debug("Private gateway " + gateway + " was applied successfully on the backend");
|
||||
if (vo.getState() != VpcGateway.State.Ready) {
|
||||
vo.setState(VpcGateway.State.Ready);
|
||||
_vpcGatewayDao.update(vo.getId(), vo);
|
||||
@ -2029,7 +2029,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis
|
||||
for (final VpcProvider provider : getVpcElements()) {
|
||||
if (providersToImplement.contains(provider.getProvider())) {
|
||||
if (provider.deletePrivateGateway(gateway)) {
|
||||
s_logger.debug("Private gateway " + gateway + " was applied succesfully on the backend");
|
||||
s_logger.debug("Private gateway " + gateway + " was applied successfully on the backend");
|
||||
} else {
|
||||
s_logger.warn("Private gateway " + gateway + " failed to apply on the backend");
|
||||
gatewayVO.setState(VpcGateway.State.Ready);
|
||||
|
||||
@ -256,7 +256,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement
|
||||
try {
|
||||
Thread.sleep(_pauseInterval * 1000);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while retry cmd.");
|
||||
s_logger.debug("[ignored] interrupted while retry cmd.");
|
||||
}
|
||||
|
||||
s_logger.debug("Retrying...");
|
||||
|
||||
@ -61,7 +61,7 @@ public class SystemVmLoadScanner<T> {
|
||||
try {
|
||||
_capacityScanScheduler.awaitTermination(1000, TimeUnit.MILLISECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while stopping systemvm load scanner.");
|
||||
s_logger.debug("[ignored] interrupted while stopping systemvm load scanner.");
|
||||
}
|
||||
|
||||
_capacityScanLock.releaseRef();
|
||||
|
||||
@ -1127,7 +1127,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
||||
if (template == null) {
|
||||
template = createDefaultDummyVmImportTemplate();
|
||||
if (template == null) {
|
||||
throw new InvalidParameterValueException(String.format("Default VM import template with unique name: %s for hypervisor: %s cannot be created. Please use templateid paramter for import", VM_IMPORT_DEFAULT_TEMPLATE_NAME, cluster.getHypervisorType().toString()));
|
||||
throw new InvalidParameterValueException(String.format("Default VM import template with unique name: %s for hypervisor: %s cannot be created. Please use templateid parameter for import", VM_IMPORT_DEFAULT_TEMPLATE_NAME, cluster.getHypervisorType().toString()));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
@ -264,7 +264,7 @@ public class SyncLink implements Link {
|
||||
try {
|
||||
Thread.sleep(100);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.info("[ignored] interupted during pull", e);
|
||||
s_logger.info("[ignored] interrupted during pull", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -69,7 +69,7 @@ public class FakeSource extends BaseElement {
|
||||
try {
|
||||
Thread.sleep(delay);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.info("[ignored] interupted while creating latency", e);
|
||||
s_logger.info("[ignored] interrupted while creating latency", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -230,7 +230,7 @@ public abstract class ConsoleProxyClientBase implements ConsoleProxyClient, Cons
|
||||
try {
|
||||
Thread.sleep(100);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] Console proxy was interupted while waiting for viewer to become ready.");
|
||||
s_logger.debug("[ignored] Console proxy was interrupted while waiting for viewer to become ready.");
|
||||
}
|
||||
}
|
||||
return false;
|
||||
@ -343,7 +343,7 @@ public abstract class ConsoleProxyClientBase implements ConsoleProxyClient, Cons
|
||||
try {
|
||||
tileDirtyEvent.wait(3000);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] Console proxy ajax update was interupted while waiting for viewer to become ready.");
|
||||
s_logger.debug("[ignored] Console proxy ajax update was interrupted while waiting for viewer to become ready.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -111,7 +111,7 @@ public class ConsoleProxyGCThread extends Thread {
|
||||
try {
|
||||
Thread.sleep(5000);
|
||||
} catch (InterruptedException ex) {
|
||||
s_logger.debug("[ignored] Console proxy was interupted during GC.");
|
||||
s_logger.debug("[ignored] Console proxy was interrupted during GC.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -494,7 +494,7 @@ class TestSystemVmLocalStorage(cloudstackTestCase):
|
||||
name = 'secstorage.service.offering'
|
||||
else:
|
||||
raise Exception(
|
||||
"type paramter is not correct it should be system vm "
|
||||
"type parameter is not correct it should be system vm "
|
||||
"type{console proxy,secsroragevm}")
|
||||
|
||||
# 3-update global settings with system offering uuid
|
||||
|
||||
@ -562,7 +562,7 @@ class TestSnapshotList(cloudstackTestCase):
|
||||
True,
|
||||
"Account access check failed!!")
|
||||
|
||||
## Domain Admin - Test cases without passing listall paramter
|
||||
## Domain Admin - Test cases without passing listall parameter
|
||||
|
||||
@attr("simulator_only", tags=["advanced"], required_hardware="false")
|
||||
def test_listSnapshot_as_domainadmin(self):
|
||||
@ -1308,7 +1308,7 @@ class TestSnapshotList(cloudstackTestCase):
|
||||
True,
|
||||
"Account access check failed!!")
|
||||
|
||||
## ROOT Admin - Test cases without passing listall paramter
|
||||
## ROOT Admin - Test cases without passing listall parameter
|
||||
|
||||
@attr("simulator_only", tags=["advanced"], required_hardware="false")
|
||||
def test_listSnapshot_as_rootadmin(self):
|
||||
@ -2022,7 +2022,7 @@ class TestSnapshotList(cloudstackTestCase):
|
||||
True,
|
||||
"Account access check failed!!")
|
||||
|
||||
## Regular User - Test cases without passing listall paramter
|
||||
## Regular User - Test cases without passing listall parameter
|
||||
|
||||
@attr("simulator_only", tags=["advanced"], required_hardware="false")
|
||||
def test_listSnapshot_as_regularuser(self):
|
||||
|
||||
@ -535,7 +535,7 @@ class TestVMList(cloudstackTestCase):
|
||||
True,
|
||||
"Account access check failed!!")
|
||||
|
||||
## Domain Admin - Test cases without passing listall paramter
|
||||
## Domain Admin - Test cases without passing listall parameter
|
||||
|
||||
@attr("simulator_only", tags=["advanced"], required_hardware="false")
|
||||
def test_listVM_as_domainadmin(self):
|
||||
@ -1281,7 +1281,7 @@ class TestVMList(cloudstackTestCase):
|
||||
True,
|
||||
"Account access check failed!!")
|
||||
|
||||
## ROOT Admin - Test cases without passing listall paramter
|
||||
## ROOT Admin - Test cases without passing listall parameter
|
||||
|
||||
@attr("simulator_only", tags=["advanced"], required_hardware="false")
|
||||
def test_listVM_as_rootadmin(self):
|
||||
@ -1996,7 +1996,7 @@ class TestVMList(cloudstackTestCase):
|
||||
True,
|
||||
"Account access check failed!!")
|
||||
|
||||
## Regular User - Test cases without passing listall paramter
|
||||
## Regular User - Test cases without passing listall parameter
|
||||
|
||||
@attr("simulator_only", tags=["advanced"], required_hardware="false")
|
||||
def test_listVM_as_regularuser(self):
|
||||
|
||||
@ -548,7 +548,7 @@ class TestVolumeList(cloudstackTestCase):
|
||||
True,
|
||||
"Account access check failed!!")
|
||||
|
||||
## Domain Admin - Test cases without passing listall paramter
|
||||
## Domain Admin - Test cases without passing listall parameter
|
||||
|
||||
@attr("simulator_only", tags=["advanced"], required_hardware="false")
|
||||
def test_listVolume_as_domainadmin(self):
|
||||
@ -1290,7 +1290,7 @@ class TestVolumeList(cloudstackTestCase):
|
||||
True,
|
||||
"Account access check failed!!")
|
||||
|
||||
## ROOT Admin - Test cases without passing listall paramter
|
||||
## ROOT Admin - Test cases without passing listall parameter
|
||||
|
||||
@attr("simulator_only", tags=["advanced"], required_hardware="false")
|
||||
def test_listVolume_as_rootadmin(self):
|
||||
@ -2004,7 +2004,7 @@ class TestVolumeList(cloudstackTestCase):
|
||||
True,
|
||||
"Account access check failed!!")
|
||||
|
||||
## Regular User - Test cases without passing listall paramter
|
||||
## Regular User - Test cases without passing listall parameter
|
||||
|
||||
@attr("simulator_only", tags=["advanced"], required_hardware="false")
|
||||
def test_listVolume_as_regularuser(self):
|
||||
|
||||
@ -940,7 +940,7 @@ class TestTemplates(cloudstackTestCase):
|
||||
# 2. Perform snapshot on the root disk of this VM.
|
||||
# 3. Create a template from snapshot.
|
||||
# 4. Delete the template and create a new template with same name
|
||||
# 5. Template should be created succesfully
|
||||
# 5. Template should be created successfully
|
||||
|
||||
# Create a snapshot from the ROOTDISK
|
||||
if self.hypervisor.lower() in ['hyperv']:
|
||||
|
||||
@ -858,7 +858,7 @@ class TestHAProxyStickyness(cloudstackTestCase):
|
||||
# * by passing the Invlaid parameter
|
||||
# * Invalid method name
|
||||
# * required parameter not present
|
||||
# * passing invalid values to valid paramters.
|
||||
# * passing invalid values to valid parameters.
|
||||
|
||||
self.debug("Creating LB rule for account: %s" % self.account.name)
|
||||
lb_rule = self.create_LB_Rule(
|
||||
|
||||
@ -851,7 +851,7 @@ class TestResizeVolume(cloudstackTestCase):
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during"
|
||||
" VM deployment with new"
|
||||
" rootdisk paramter : %s" % e)
|
||||
" rootdisk parameter : %s" % e)
|
||||
|
||||
|
||||
@attr(tags=["advanced"], required_hardware="true")
|
||||
|
||||
@ -604,7 +604,7 @@ class TestVPCNetworkPFRules(cloudstackTestCase):
|
||||
# 7. Use the Create PF rule ssh and http for vm1 and vm2 in network1.
|
||||
# 8. Use the Create PF rule ssh and http for vm3 and vm4 in network2.
|
||||
# 9. Successfully ssh into the Guest vm1, vm2, vm3 and vm4 using the PF rule.
|
||||
# 10. Succesfully wget a file from http server present on vm1, vm2, vm3 and vm4.
|
||||
# 10. Successfully wget a file from http server present on vm1, vm2, vm3 and vm4.
|
||||
# 11. Stop VPC Virtual Router.
|
||||
# 12. Delete all PF rultes for vm1, vm2, vm3 and vm4.
|
||||
# 12. Start VPC Virtual Router.
|
||||
@ -684,7 +684,7 @@ class TestVPCNetworkPFRules(cloudstackTestCase):
|
||||
# 7. Use the Create PF rule ssh and http for vm1 and vm2 in network1.
|
||||
# 8. Use the Create PF rule ssh and http for vm3 and vm4 in network2.
|
||||
# 9. Successfully ssh into the Guest vm1, vm2, vm3 and vm4 using the PF rule.
|
||||
# 10. Succesfully wget a file from http server present on vm1, vm2, vm3 and vm4.
|
||||
# 10. Successfully wget a file from http server present on vm1, vm2, vm3 and vm4.
|
||||
# 12. Delete all PF rultes for vm1, vm2, vm3 and vm4.
|
||||
# 13. Fail to ssh and http to vm1, vm2, vm3 and vm4.
|
||||
"""
|
||||
|
||||
@ -549,7 +549,7 @@ class TestVPCNetworkPFRules(cloudstackTestCase):
|
||||
# 7. Use the Create PF rule ssh and http for vm1 and vm2 in network1.
|
||||
# 8. Use the Create PF rule ssh and http for vm3 and vm4 in network2.
|
||||
# 9. Successfully ssh into the Guest vm1, vm2, vm3 and vm4 using the PF rule.
|
||||
# 10. Succesfully wget a file from http server present on vm1, vm2, vm3 and vm4.
|
||||
# 10. Successfully wget a file from http server present on vm1, vm2, vm3 and vm4.
|
||||
# 12. Delete all PF rultes for vm1, vm2, vm3 and vm4.
|
||||
# 13. Fail to ssh and http to vm1, vm2, vm3 and vm4.
|
||||
"""
|
||||
|
||||
@ -1542,7 +1542,7 @@ class TestUserLogin(cloudstackTestCase):
|
||||
parentdomainid=self.domain.id
|
||||
)
|
||||
self.cleanup.append(domain)
|
||||
self.debug("Domain: %s is created succesfully." % domain.name)
|
||||
self.debug("Domain: %s is created successfully." % domain.name)
|
||||
self.debug(
|
||||
"Checking if the created domain is listed in list domains API")
|
||||
domains = Domain.list(self.apiclient, id=domain.id, listall=True)
|
||||
@ -1818,7 +1818,7 @@ class TestDomainForceRemove(cloudstackTestCase):
|
||||
parentdomainid=self.domain.id
|
||||
)
|
||||
self.cleanup.append(self.child_domain)
|
||||
self.debug("Domain is created succesfully.")
|
||||
self.debug("Domain is created successfully.")
|
||||
self.debug(
|
||||
"Checking if the created domain is listed in list domains API")
|
||||
domains = Domain.list(self.apiclient, id=self.child_domain.id, listall=True)
|
||||
|
||||
@ -448,7 +448,7 @@ class TestSnapshotsHardning(cloudstackTestCase):
|
||||
"""snapshot hardning
|
||||
1. Take VM snapshot then migrate the VM to another host
|
||||
and again take volume snapshot and check its integrity
|
||||
2. Verify that snapshot gets created successfuly while VM
|
||||
2. Verify that snapshot gets created successfully while VM
|
||||
is getting Migrated to another host
|
||||
3. Verify that snapshot should succeed after vm's are HA-ed
|
||||
to different host and also check its integrity
|
||||
@ -860,7 +860,7 @@ class TestSnapshotsHardning(cloudstackTestCase):
|
||||
1. Take VM snapshot then migrate the VM to another
|
||||
host and again take
|
||||
volume snapshot and check its intigrity
|
||||
2. Verify that snapshot gets created successfuly
|
||||
2. Verify that snapshot gets created successfully
|
||||
while VM is getting
|
||||
Migrated to another host
|
||||
3. Verify that snapshot should succeed after vm's are
|
||||
|
||||
@ -471,7 +471,7 @@ class TestPathVolume(cloudstackTestCase):
|
||||
self.cluster_volume.id), "volume does not exist %s" %
|
||||
self.cluster_volume.id)
|
||||
self.debug(
|
||||
"volume id %s got created successfuly" %
|
||||
"volume id %s got created successfully" %
|
||||
list_cluster_volume[0].id)
|
||||
self.virtual_machine_zone_4.attach_volume(self.userapiclient,
|
||||
self.cluster_volume
|
||||
@ -634,7 +634,7 @@ class TestPathVolume(cloudstackTestCase):
|
||||
self.template_from_vm1_root_disk.delete(self.userapiclient
|
||||
)
|
||||
self.debug(
|
||||
"Template id: %s got deleted successfuly" %
|
||||
"Template id: %s got deleted successfully" %
|
||||
self.template_from_vm1_root_disk.id)
|
||||
except Exception as e:
|
||||
raise Exception("Template deletion failed with error %s" % e)
|
||||
@ -690,7 +690,7 @@ class TestPathVolume(cloudstackTestCase):
|
||||
list_volume[0].id, str(
|
||||
self.migrate_volume.id), "volume does not exist %s" %
|
||||
self.migrate_volume.id)
|
||||
self.debug("volume id %s got created successfuly" % list_volume[0].id)
|
||||
self.debug("volume id %s got created successfully" % list_volume[0].id)
|
||||
|
||||
self.virtual_machine_1.attach_volume(self.userapiclient,
|
||||
self.migrate_volume
|
||||
@ -720,7 +720,7 @@ class TestPathVolume(cloudstackTestCase):
|
||||
list_volume[0].id, str(
|
||||
self.migrate_volume.id), "volume does not exist %s" %
|
||||
self.migrate_volume.id)
|
||||
self.debug("volume id %s got created successfuly" % list_volume[0].id)
|
||||
self.debug("volume id %s got created successfully" % list_volume[0].id)
|
||||
list_pool = StoragePool.list(self.apiclient,
|
||||
id=list_volume[0].storageid
|
||||
)
|
||||
|
||||
@ -802,7 +802,7 @@ public class ApiCommand {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while during async job result query.");
|
||||
s_logger.debug("[ignored] interrupted while during async job result query.");
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
|
||||
@ -1337,7 +1337,7 @@ public class StressTestDirectAttach {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while during async job result query.");
|
||||
s_logger.debug("[ignored] interrupted while during async job result query.");
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
|
||||
@ -2273,7 +2273,7 @@ public class TestClientWithAPI {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while during async job result query.");
|
||||
s_logger.debug("[ignored] interrupted while during async job result query.");
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
|
||||
@ -67,7 +67,7 @@ public class ConsoleProxy implements Runnable {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted.");
|
||||
s_logger.debug("[ignored] interrupted.");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
2
tools/ngui/static/js/lib/angular.js
vendored
2
tools/ngui/static/js/lib/angular.js
vendored
@ -3339,7 +3339,7 @@ function Browser(window, document, $log, $sniffer) {
|
||||
* Cancels a defered task identified with `deferId`.
|
||||
*
|
||||
* @param {*} deferId Token returned by the `$browser.defer` function.
|
||||
* @returns {boolean} Returns `true` if the task hasn't executed yet and was successfuly canceled.
|
||||
* @returns {boolean} Returns `true` if the task hasn't executed yet and was successfully canceled.
|
||||
*/
|
||||
self.defer.cancel = function(deferId) {
|
||||
if (pendingDeferIds[deferId]) {
|
||||
|
||||
@ -1759,7 +1759,7 @@ public class HypervisorHostHelper {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while waiting to config vm.");
|
||||
s_logger.debug("[ignored] interrupted while waiting to config vm.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -275,7 +275,7 @@ public class VirtualMachineMO extends BaseMO {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while dealing with vm questions.");
|
||||
s_logger.debug("[ignored] interrupted while dealing with vm questions.");
|
||||
}
|
||||
}
|
||||
s_logger.info("VM Question monitor stopped");
|
||||
@ -544,7 +544,7 @@ public class VirtualMachineMO extends BaseMO {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while waiting for snapshot to be done.");
|
||||
s_logger.debug("[ignored] interrupted while waiting for snapshot to be done.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1651,7 +1651,7 @@ public class VirtualMachineMO extends BaseMO {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while handling vm question about iso detach.");
|
||||
s_logger.debug("[ignored] interrupted while handling vm question about iso detach.");
|
||||
}
|
||||
}
|
||||
s_logger.info("VM Question monitor stopped");
|
||||
@ -3367,7 +3367,7 @@ public class VirtualMachineMO extends BaseMO {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("[ignored] interupted while handling vm question about umount tools install.");
|
||||
s_logger.debug("[ignored] interrupted while handling vm question about umount tools install.");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -681,7 +681,7 @@ public class VmwareContext {
|
||||
try {
|
||||
Thread.sleep(CONNECT_RETRY_INTERVAL);
|
||||
} catch (InterruptedException ex) {
|
||||
s_logger.debug("[ignored] interupted while connecting.");
|
||||
s_logger.debug("[ignored] interrupted while connecting.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user