diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index 4dd044981da..23ca873bd0f 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -1014,13 +1014,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } else { - s_logger.debug("Agent " + hostId + " can't be transfered yet as its request queue size is " + attache.getQueueSize() + " and listener queue size is " + s_logger.debug("Agent " + hostId + " can't be transferred yet as its request queue size is " + attache.getQueueSize() + " and listener queue size is " + attache.getNonRecurringListenersSize()); } } } else { if (s_logger.isTraceEnabled()) { - s_logger.trace("Found no agents to be transfered by the management server " + _nodeId); + s_logger.trace("Found no agents to be transferred by the management server " + _nodeId); } } } @@ -1060,7 +1060,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } if (result) { - s_logger.debug("Successfully transfered host id=" + hostId + " to management server " + futureOwnerId); + s_logger.debug("Successfully transferred host id=" + hostId + " to management server " + futureOwnerId); finishRebalance(hostId, futureOwnerId, Event.RebalanceCompleted); } else { s_logger.warn("Failed to transfer host id=" + hostId + " to management server " + futureOwnerId); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-2214to30.sql b/engine/schema/src/main/resources/META-INF/db/schema-2214to30.sql index f1d2d7dd4e3..ebcdfb61c53 100755 --- a/engine/schema/src/main/resources/META-INF/db/schema-2214to30.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-2214to30.sql @@ -446,7 +446,7 @@ CREATE TABLE `cloud`.`external_load_balancer_devices` ( `is_dedicated` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if device/appliance is provisioned for dedicated use only', `is_inline` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if load balancer will be used in in-line configuration with firewall', `is_managed` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if load balancer appliance is provisioned and its life cycle is managed by by cloudstack', - `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external load balancer device', + `host_id` bigint unsigned NOT NULL COMMENT 'host id corresponding to the external load balancer device', `parent_host_id` bigint unsigned COMMENT 'if the load balancer appliance is cloudstack managed, then host id on which this appliance is provisioned', PRIMARY KEY (`id`), CONSTRAINT `fk_external_lb_devices_host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, @@ -463,7 +463,7 @@ CREATE TABLE `cloud`.`external_firewall_devices` ( `device_state` varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'state (enabled/disabled/shutdown) of the device', `is_dedicated` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if device/appliance meant for dedicated use only', `allocation_state` varchar(32) NOT NULL DEFAULT 'Free' COMMENT 'Allocation state (Free/Allocated) of the device', - `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external firewall device', + `host_id` bigint unsigned NOT NULL COMMENT 'host id corresponding to the external firewall device', `capacity` bigint unsigned NOT NULL DEFAULT 0 COMMENT 'Capacity of the external firewall device', PRIMARY KEY (`id`), CONSTRAINT `fk_external_firewall_devices__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, diff --git a/engine/schema/src/main/resources/META-INF/db/schema-227to228.sql b/engine/schema/src/main/resources/META-INF/db/schema-227to228.sql index ea344d7b65e..c0b3eb12a2f 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-227to228.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-227to228.sql @@ -107,8 +107,8 @@ ALTER TABLE `cloud`.`network_offerings` ADD COLUMN `shared_source_nat_service` i CREATE TABLE IF NOT EXISTS `cloud`.`op_host_transfer` ( `id` bigint unsigned UNIQUE NOT NULL COMMENT 'Id of the host', - `initial_mgmt_server_id` bigint unsigned COMMENT 'management server the host is transfered from', - `future_mgmt_server_id` bigint unsigned COMMENT 'management server the host is transfered to', + `initial_mgmt_server_id` bigint unsigned COMMENT 'management server the host is transferred from', + `future_mgmt_server_id` bigint unsigned COMMENT 'management server the host is transferred to', `state` varchar(32) NOT NULL COMMENT 'the transfer state of the host', `created` datetime NOT NULL COMMENT 'date created', PRIMARY KEY (`id`), diff --git a/engine/schema/src/main/resources/META-INF/db/schema-302to40.sql b/engine/schema/src/main/resources/META-INF/db/schema-302to40.sql index 832228cb434..e632fa679e5 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-302to40.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-302to40.sql @@ -211,7 +211,7 @@ CREATE TABLE `cloud`.`external_nicira_nvp_devices` ( `physical_network_id` bigint unsigned NOT NULL COMMENT 'id of the physical network in to which nicira nvp device is added', `provider_name` varchar(255) NOT NULL COMMENT 'Service Provider name corresponding to this nicira nvp device', `device_name` varchar(255) NOT NULL COMMENT 'name of the nicira nvp device', - `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external nicira nvp device', + `host_id` bigint unsigned NOT NULL COMMENT 'host id corresponding to the external nicira nvp device', PRIMARY KEY (`id`), CONSTRAINT `fk_external_nicira_nvp_devices__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, CONSTRAINT `fk_external_nicira_nvp_devices__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE diff --git a/engine/schema/src/main/resources/META-INF/db/schema-307to410.sql b/engine/schema/src/main/resources/META-INF/db/schema-307to410.sql index 5624f0010e2..e5387853b3c 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-307to410.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-307to410.sql @@ -29,7 +29,7 @@ CREATE TABLE `cloud`.`external_nicira_nvp_devices` ( `physical_network_id` bigint unsigned NOT NULL COMMENT 'id of the physical network in to which nicira nvp device is added', `provider_name` varchar(255) NOT NULL COMMENT 'Service Provider name corresponding to this nicira nvp device', `device_name` varchar(255) NOT NULL COMMENT 'name of the nicira nvp device', - `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external nicira nvp device', + `host_id` bigint unsigned NOT NULL COMMENT 'host id corresponding to the external nicira nvp device', PRIMARY KEY (`id`), CONSTRAINT `fk_external_nicira_nvp_devices__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, CONSTRAINT `fk_external_nicira_nvp_devices__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE @@ -165,7 +165,7 @@ CREATE TABLE `cloud`.`s3` ( `https` tinyint unsigned DEFAULT NULL COMMENT ' Flag indicating whether or not to connect over HTTPS', `connection_timeout` integer COMMENT ' The amount of time to wait (in milliseconds) when initially establishing a connection before giving up and timing out.', `max_error_retry` integer COMMENT ' The maximum number of retry attempts for failed retryable requests (ex: 5xx error responses from services).', - `socket_timeout` integer COMMENT ' The amount of time to wait (in milliseconds) for data to be transfered over an established, open connection before the connection times out and is closed.', + `socket_timeout` integer COMMENT ' The amount of time to wait (in milliseconds) for data to be transferred over an established, open connection before the connection times out and is closed.', `created` datetime COMMENT 'date the s3 first signed on', PRIMARY KEY (`id`), CONSTRAINT `uc_s3__uuid` UNIQUE (`uuid`) @@ -304,7 +304,7 @@ CREATE TABLE `cloud`.`external_bigswitch_vns_devices` ( `physical_network_id` bigint unsigned NOT NULL COMMENT 'id of the physical network in to which bigswitch vns device is added', `provider_name` varchar(255) NOT NULL COMMENT 'Service Provider name corresponding to this bigswitch vns device', `device_name` varchar(255) NOT NULL COMMENT 'name of the bigswitch vns device', - `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external bigswitch vns device', + `host_id` bigint unsigned NOT NULL COMMENT 'host id corresponding to the external bigswitch vns device', PRIMARY KEY (`id`), CONSTRAINT `fk_external_bigswitch_vns_devices__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, CONSTRAINT `fk_external_bigswitch_vns_devices__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE @@ -1536,7 +1536,7 @@ CREATE TABLE IF NOT EXISTS `cloud`.`baremetal_dhcp_devices`( `pod_id` bigint unsigned DEFAULT NULL COMMENT 'Pod id where this dhcp server in', `device_type` varchar(255) DEFAULT NULL COMMENT 'type of the external device', `physical_network_id` bigint unsigned DEFAULT NULL COMMENT 'id of the physical network in to which external dhcp device is added', - `host_id` bigint unsigned DEFAULT NULL COMMENT 'host id coresponding to the external dhcp device', + `host_id` bigint unsigned DEFAULT NULL COMMENT 'host id corresponding to the external dhcp device', PRIMARY KEY (`id`) )ENGINE=InnoDB DEFAULT CHARSET=utf8; @@ -1544,7 +1544,7 @@ ALTER TABLE `cloud`.`baremetal_dhcp_devices` CHANGE COLUMN `nsp_id` `nsp_id` big ALTER TABLE `cloud`.`baremetal_dhcp_devices` CHANGE COLUMN `pod_id` `pod_id` bigint unsigned DEFAULT NULL COMMENT 'Pod id where this dhcp server in'; ALTER TABLE `cloud`.`baremetal_dhcp_devices` CHANGE COLUMN `device_type` `device_type` varchar(255) DEFAULT NULL COMMENT 'type of the external device'; ALTER TABLE `cloud`.`baremetal_dhcp_devices` CHANGE COLUMN `physical_network_id` `physical_network_id` bigint unsigned DEFAULT NULL COMMENT 'id of the physical network in to which external dhcp device is added'; -ALTER TABLE `cloud`.`baremetal_dhcp_devices` CHANGE COLUMN `host_id` `host_id` bigint unsigned DEFAULT NULL COMMENT 'host id coresponding to the external dhcp device'; +ALTER TABLE `cloud`.`baremetal_dhcp_devices` CHANGE COLUMN `host_id` `host_id` bigint unsigned DEFAULT NULL COMMENT 'host id corresponding to the external dhcp device'; CREATE TABLE IF NOT EXISTS `cloud`.`baremetal_pxe_devices` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', @@ -1553,7 +1553,7 @@ CREATE TABLE IF NOT EXISTS `cloud`.`baremetal_pxe_devices` ( `pod_id` bigint unsigned DEFAULT NULL COMMENT 'Pod id where this pxe server in, for pxe per zone this field is null', `device_type` varchar(255) DEFAULT NULL COMMENT 'type of the pxe device', `physical_network_id` bigint unsigned DEFAULT NULL COMMENT 'id of the physical network in to which external pxe device is added', - `host_id` bigint unsigned DEFAULT NULL COMMENT 'host id coresponding to the external pxe device', + `host_id` bigint unsigned DEFAULT NULL COMMENT 'host id corresponding to the external pxe device', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; @@ -1561,7 +1561,7 @@ ALTER TABLE `cloud`.`baremetal_pxe_devices` CHANGE COLUMN `nsp_id` `nsp_id` bigi ALTER TABLE `cloud`.`baremetal_pxe_devices` CHANGE COLUMN `pod_id` `pod_id` bigint unsigned DEFAULT NULL COMMENT 'Pod id where this pxe server in, for pxe per zone this field is null'; ALTER TABLE `cloud`.`baremetal_pxe_devices` CHANGE COLUMN `device_type` `device_type` varchar(255) DEFAULT NULL COMMENT 'type of the pxe device'; ALTER TABLE `cloud`.`baremetal_pxe_devices` CHANGE COLUMN `physical_network_id` `physical_network_id` bigint unsigned DEFAULT NULL COMMENT 'id of the physical network in to which external pxe device is added'; -ALTER TABLE `cloud`.`baremetal_pxe_devices` CHANGE COLUMN `host_id` `host_id` bigint unsigned DEFAULT NULL COMMENT 'host id coresponding to the external pxe device'; +ALTER TABLE `cloud`.`baremetal_pxe_devices` CHANGE COLUMN `host_id` `host_id` bigint unsigned DEFAULT NULL COMMENT 'host id corresponding to the external pxe device'; #drop tables as the feature is not a part of 4.2 DROP TABLE IF EXISTS `cloud`.`host_updates`; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-40to410.sql b/engine/schema/src/main/resources/META-INF/db/schema-40to410.sql index 53b4a1a5b8a..3d6dc654256 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-40to410.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-40to410.sql @@ -116,7 +116,7 @@ CREATE TABLE `cloud`.`s3` ( `https` tinyint unsigned DEFAULT NULL COMMENT ' Flag indicating whether or not to connect over HTTPS', `connection_timeout` integer COMMENT ' The amount of time to wait (in milliseconds) when initially establishing a connection before giving up and timing out.', `max_error_retry` integer COMMENT ' The maximum number of retry attempts for failed retryable requests (ex: 5xx error responses from services).', - `socket_timeout` integer COMMENT ' The amount of time to wait (in milliseconds) for data to be transfered over an established, open connection before the connection times out and is closed.', + `socket_timeout` integer COMMENT ' The amount of time to wait (in milliseconds) for data to be transferred over an established, open connection before the connection times out and is closed.', `created` datetime COMMENT 'date the s3 first signed on', PRIMARY KEY (`id`), CONSTRAINT `uc_s3__uuid` UNIQUE (`uuid`) @@ -278,7 +278,7 @@ CREATE TABLE `cloud`.`external_bigswitch_vns_devices` ( `physical_network_id` bigint unsigned NOT NULL COMMENT 'id of the physical network in to which bigswitch vns device is added', `provider_name` varchar(255) NOT NULL COMMENT 'Service Provider name corresponding to this bigswitch vns device', `device_name` varchar(255) NOT NULL COMMENT 'name of the bigswitch vns device', - `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external bigswitch vns device', + `host_id` bigint unsigned NOT NULL COMMENT 'host id corresponding to the external bigswitch vns device', PRIMARY KEY (`id`), CONSTRAINT `fk_external_bigswitch_vns_devices__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, CONSTRAINT `fk_external_bigswitch_vns_devices__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE @@ -1603,7 +1603,7 @@ CREATE TABLE `cloud`.`baremetal_dhcp_devices` ( `pod_id` bigint unsigned DEFAULT NULL COMMENT 'Pod id where this dhcp server in', `device_type` varchar(255) DEFAULT NULL COMMENT 'type of the external device', `physical_network_id` bigint unsigned DEFAULT NULL COMMENT 'id of the physical network in to which external dhcp device is added', - `host_id` bigint unsigned DEFAULT NULL COMMENT 'host id coresponding to the external dhcp device', + `host_id` bigint unsigned DEFAULT NULL COMMENT 'host id corresponding to the external dhcp device', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; @@ -1614,7 +1614,7 @@ CREATE TABLE `cloud`.`baremetal_pxe_devices` ( `pod_id` bigint unsigned DEFAULT NULL COMMENT 'Pod id where this pxe server in, for pxe per zone this field is null', `device_type` varchar(255) DEFAULT NULL COMMENT 'type of the pxe device', `physical_network_id` bigint unsigned DEFAULT NULL COMMENT 'id of the physical network in to which external pxe device is added', - `host_id` bigint unsigned DEFAULT NULL COMMENT 'host id coresponding to the external pxe device', + `host_id` bigint unsigned DEFAULT NULL COMMENT 'host id corresponding to the external pxe device', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql b/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql index 50f3fa9b290..96424f206df 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql @@ -1164,7 +1164,7 @@ CREATE TABLE `cloud`.`external_cisco_vnmc_devices` ( `physical_network_id` bigint unsigned NOT NULL COMMENT 'id of the physical network in to which cisco vnmc device is added', `provider_name` varchar(255) NOT NULL COMMENT 'Service Provider name corresponding to this cisco vnmc device', `device_name` varchar(255) NOT NULL COMMENT 'name of the cisco vnmc device', - `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external cisco vnmc device', + `host_id` bigint unsigned NOT NULL COMMENT 'host id corresponding to the external cisco vnmc device', PRIMARY KEY (`id`), CONSTRAINT `fk_external_cisco_vnmc_devices__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, CONSTRAINT `fk_external_cisco_vnmc_devices__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE diff --git a/engine/schema/src/main/resources/META-INF/db/schema-442to450.sql b/engine/schema/src/main/resources/META-INF/db/schema-442to450.sql index 5f9fff5ff95..06ee70bb2f7 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-442to450.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-442to450.sql @@ -272,7 +272,7 @@ CREATE TABLE `cloud`.`external_brocade_vcs_devices` ( `physical_network_id` bigint unsigned NOT NULL COMMENT 'id of the physical network in to which brocade vcs switch is added', `provider_name` varchar(255) NOT NULL COMMENT 'Service Provider name corresponding to this brocade vcs switch', `device_name` varchar(255) NOT NULL COMMENT 'name of the brocade vcs switch', - `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external brocade vcs switch', + `host_id` bigint unsigned NOT NULL COMMENT 'host id corresponding to the external brocade vcs switch', PRIMARY KEY (`id`), CONSTRAINT `fk_external_brocade_vcs_devices__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, CONSTRAINT `fk_external_brocade_vcs_devices__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE diff --git a/engine/schema/src/main/resources/META-INF/db/schema-452to460.sql b/engine/schema/src/main/resources/META-INF/db/schema-452to460.sql index ace22e4e0b8..1046a00efd9 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-452to460.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-452to460.sql @@ -391,7 +391,7 @@ CREATE TABLE IF NOT EXISTS `cloud`.`external_bigswitch_bcf_devices` ( `physical_network_id` bigint unsigned NOT NULL COMMENT 'id of the physical network in to which bigswitch bcf device is added', `provider_name` varchar(255) NOT NULL COMMENT 'Service Provider name corresponding to this bigswitch bcf device', `device_name` varchar(255) NOT NULL COMMENT 'name of the bigswitch bcf device', - `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external bigswitch bcf device', + `host_id` bigint unsigned NOT NULL COMMENT 'host id corresponding to the external bigswitch bcf device', `hostname` varchar(255) NOT NULL COMMENT 'host name or IP address for the bigswitch bcf device', `username` varchar(255) NOT NULL COMMENT 'username for the bigswitch bcf device', `password` varchar(255) NOT NULL COMMENT 'password for the bigswitch bcf device', diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java index 6bd61431290..44702dca116 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java @@ -687,7 +687,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw // this time-out check was disabled // "until we have found out a VMware API that can check if there are pending tasks on the subject VM" - // but as we expire jobs and those stale worker VMs stay around untill an MS reboot we opt in to have them removed anyway + // but as we expire jobs and those stale worker VMs stay around until an MS reboot we opt in to have them removed anyway Instant start = Instant.ofEpochMilli(startTick); Instant end = start.plusSeconds(2 * (AsyncJobManagerImpl.JobExpireMinutes.value() + AsyncJobManagerImpl.JobCancelThresholdMinutes.value()) * SECONDS_PER_MINUTE); Instant now = Instant.now(); diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java index dab7a74da1c..511d51bafc5 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -2575,7 +2575,7 @@ public class VmwareStorageProcessor implements StorageProcessor { HostMO hostMo = vmMo.getRunningHost(); List networks = vmMo.getNetworksWithDetails(); - // tear down all devices first before we destroy the VM to avoid accidently delete disk backing files + // tear down all devices first before we destroy the VM to avoid accidentally delete disk backing files if (VmwareResource.getVmState(vmMo) != PowerState.PowerOff) { vmMo.safePowerOff(_shutdownWaitMs); } diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/resource/NetScalerControlCenterResource.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/resource/NetScalerControlCenterResource.java index b702495f02d..c447d600561 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/resource/NetScalerControlCenterResource.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/resource/NetScalerControlCenterResource.java @@ -623,7 +623,7 @@ public class NetScalerControlCenterResource implements ServerResource { } } catch (Exception e) { - s_logger.error("Failed to get bytes sent and recived statistics due to " + e); + s_logger.error("Failed to get bytes sent and received statistics due to " + e); throw new ExecutionException(e.getMessage()); } diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/resource/NetscalerResource.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/resource/NetscalerResource.java index 35565cbf218..ac9b0c6b99d 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/resource/NetscalerResource.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/resource/NetscalerResource.java @@ -3560,7 +3560,7 @@ public class NetscalerResource implements ServerResource { } } } catch (final Exception e) { - s_logger.error("Failed to get bytes sent and recived statistics due to " + e); + s_logger.error("Failed to get bytes sent and received statistics due to " + e); throw new ExecutionException(e.getMessage()); } diff --git a/scripts/storage/qcow2/get_domr_kernel.sh b/scripts/storage/qcow2/get_domr_kernel.sh index d411a568dd6..d9f7f9da7d9 100755 --- a/scripts/storage/qcow2/get_domr_kernel.sh +++ b/scripts/storage/qcow2/get_domr_kernel.sh @@ -63,7 +63,7 @@ mount_local() { qemu-nbd -d /dev/nbd0p1 &> /dev/null sleep 0.5 qemu-nbd -d /dev/nbd0 &> /dev/null - printf "Faild to mount qcow2 image\n" + printf "Failed to mount qcow2 image\n" return 3 fi return $? diff --git a/services/console-proxy/rdpconsole/pom.xml b/services/console-proxy/rdpconsole/pom.xml index 89ce5d8bc52..9352bd4cc5a 100644 --- a/services/console-proxy/rdpconsole/pom.xml +++ b/services/console-proxy/rdpconsole/pom.xml @@ -36,7 +36,7 @@ ${project.version} diff --git a/services/console-proxy/rdpconsole/src/main/java/common/adapter/AwtBellAdapter.java b/services/console-proxy/rdpconsole/src/main/java/common/adapter/AwtBellAdapter.java index c25b07f19c6..e71506509f4 100644 --- a/services/console-proxy/rdpconsole/src/main/java/common/adapter/AwtBellAdapter.java +++ b/services/console-proxy/rdpconsole/src/main/java/common/adapter/AwtBellAdapter.java @@ -61,7 +61,7 @@ public class AwtBellAdapter extends BaseElement { Element source = new FakeSource("source") { { - incommingBufLength = 0; + incomingBufLength = 0; delay = 1000; numBuffers = 3; } diff --git a/services/console-proxy/rdpconsole/src/main/java/common/asn1/Sequence.java b/services/console-proxy/rdpconsole/src/main/java/common/asn1/Sequence.java index 6fa23f8baab..5d31c707b0f 100644 --- a/services/console-proxy/rdpconsole/src/main/java/common/asn1/Sequence.java +++ b/services/console-proxy/rdpconsole/src/main/java/common/asn1/Sequence.java @@ -80,7 +80,7 @@ public class Sequence extends Tag { // If tag is required, then throw exception if (!tags[i].optional) { - throw new RuntimeException("[" + this + "] ERROR: Required tag is missed: " + tags[i] + ". Unexected tag type: " + typeAndFlags + ". Data: " + buf + throw new RuntimeException("[" + this + "] ERROR: Required tag is missed: " + tags[i] + ". Unexpected tag type: " + typeAndFlags + ". Data: " + buf + "."); } else { // One or more tags are omitted, so skip them @@ -91,7 +91,7 @@ public class Sequence extends Tag { } if (i >= tags.length || !tags[i].isTypeValid(typeAndFlags)) { - throw new RuntimeException("[" + this + "] ERROR: No more tags to read or skip, but some data still left in buffer. Unexected tag type: " + throw new RuntimeException("[" + this + "] ERROR: No more tags to read or skip, but some data still left in buffer. Unexpected tag type: " + typeAndFlags + ". Data: " + buf + "."); } } diff --git a/services/console-proxy/rdpconsole/src/main/java/rdpclient/ntlmssp/ClientNtlmsspPubKeyAuth.java b/services/console-proxy/rdpconsole/src/main/java/rdpclient/ntlmssp/ClientNtlmsspPubKeyAuth.java index 7aae145237b..94bd5f98aca 100644 --- a/services/console-proxy/rdpconsole/src/main/java/rdpclient/ntlmssp/ClientNtlmsspPubKeyAuth.java +++ b/services/console-proxy/rdpconsole/src/main/java/rdpclient/ntlmssp/ClientNtlmsspPubKeyAuth.java @@ -529,7 +529,7 @@ public class ClientNtlmsspPubKeyAuth extends OneTimeSwitch implements NtlmConsta (byte)0x03, (byte)0x82, (byte)0x01, (byte)0x0f, // Bit string, length: 271 bytes - (byte)0x00, // Pading + (byte)0x00, // Padding (byte)0x30, (byte)0x82, (byte)0x01, (byte)0x0a, // Sequence (byte)0x02, (byte)0x82, (byte)0x01, (byte)0x01, // Integer, length: 257 bytes diff --git a/services/console-proxy/rdpconsole/src/main/java/rdpclient/rdp/ServerDemandActivePDU.java b/services/console-proxy/rdpconsole/src/main/java/rdpclient/rdp/ServerDemandActivePDU.java index 88ede17a19a..d11a26b7296 100644 --- a/services/console-proxy/rdpconsole/src/main/java/rdpclient/rdp/ServerDemandActivePDU.java +++ b/services/console-proxy/rdpconsole/src/main/java/rdpclient/rdp/ServerDemandActivePDU.java @@ -64,7 +64,7 @@ public class ServerDemandActivePDU extends BaseElement { // TS_SHARECONTROLHEADER::pduSource = 0x03ea (1002) int pduSource = buf.readSignedShortLE(); if (pduSource != 1002) - throw new RuntimeException("Unexepcted source of demand active PDU. Expected source: 1002, actual source: " + pduSource + "."); + throw new RuntimeException("Unexpected source of demand active PDU. Expected source: 1002, actual source: " + pduSource + "."); // (4 bytes): A 32-bit, unsigned integer. The share identifier for the // packet (see [T128] section 8.4.2 for more information regarding share diff --git a/services/console-proxy/rdpconsole/src/main/java/rdpclient/rdp/ServerMCSAttachUserConfirmPDU.java b/services/console-proxy/rdpconsole/src/main/java/rdpclient/rdp/ServerMCSAttachUserConfirmPDU.java index cbefcb93b69..3aa7d82c130 100644 --- a/services/console-proxy/rdpconsole/src/main/java/rdpclient/rdp/ServerMCSAttachUserConfirmPDU.java +++ b/services/console-proxy/rdpconsole/src/main/java/rdpclient/rdp/ServerMCSAttachUserConfirmPDU.java @@ -93,7 +93,7 @@ public class ServerMCSAttachUserConfirmPDU extends OneTimeSwitch { byte[] packet = new byte[] {(byte)0x2E, // MCS user confirm (001011.., // 0xb), InitiatorPresent: 1 // (......01, 0x1) - (byte)0x00, // RT successfull (0000...., 0x0) + (byte)0x00, // RT successful (0000...., 0x0) // Initiator: 1001+3 = 1004 (byte)0x00, (byte)0x03,}; diff --git a/services/console-proxy/rdpconsole/src/main/java/rdpclient/rdp/ServerX224DataPdu.java b/services/console-proxy/rdpconsole/src/main/java/rdpclient/rdp/ServerX224DataPdu.java index 2c0087e0fe1..0f0ecbcc347 100644 --- a/services/console-proxy/rdpconsole/src/main/java/rdpclient/rdp/ServerX224DataPdu.java +++ b/services/console-proxy/rdpconsole/src/main/java/rdpclient/rdp/ServerX224DataPdu.java @@ -46,13 +46,13 @@ public class ServerX224DataPdu extends BaseElement { int type = buf.readUnsignedByte(); // High nibble: type, low nibble: if ((type & 0xf0) != X224_TPDU_DATA) - throw new RuntimeException("[" + this + "] ERROR: Unexepcted X224 packet type. Expected packet type: " + X224_TPDU_DATA + throw new RuntimeException("[" + this + "] ERROR: Unexpected X224 packet type. Expected packet type: " + X224_TPDU_DATA + " (X224_TPDU_DATA), actual packet type: " + type + ", buf: " + buf + "."); int options = buf.readUnsignedByte(); if ((options & X224_TPDU_LAST_DATA_UNIT) != X224_TPDU_LAST_DATA_UNIT) - throw new RuntimeException("Unexepcted X224 packet options. Expected options: " + X224_TPDU_LAST_DATA_UNIT + throw new RuntimeException("Unexpected X224 packet options. Expected options: " + X224_TPDU_LAST_DATA_UNIT + " (X224_TPDU_LAST_DATA_UNIT), actual packet options: " + options + ", buf: " + buf + "."); ByteBuffer payload = buf.readBytes(buf.length - buf.cursor); diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/BaseElement.java b/services/console-proxy/rdpconsole/src/main/java/streamer/BaseElement.java index a93b0371b0b..e616165752a 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/BaseElement.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/BaseElement.java @@ -52,7 +52,7 @@ public class BaseElement implements Element { /** * Recommended size for incoming buffer in pull mode. */ - protected int incommingBufLength = -1; + protected int incomingBufLength = -1; protected Map inputPads = new HashMap(); protected Map outputPads = new HashMap(); @@ -393,7 +393,7 @@ public class BaseElement implements Element { { verbose = true; numBuffers = 10; - incommingBufLength = 3; + incomingBufLength = 3; delay = 100; } }; diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/DataSource.java b/services/console-proxy/rdpconsole/src/main/java/streamer/DataSource.java index 8ed41e46480..b143e322a60 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/DataSource.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/DataSource.java @@ -37,7 +37,7 @@ public interface DataSource { void pushBack(ByteBuffer buf); /** - * Hold data temporary to use at next pull. Don't return abything untill given + * Hold data temporary to use at next pull. Don't return abything until given * amount of data will be read from source, because data will be pushed back * anyway. * diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/InputStreamSource.java b/services/console-proxy/rdpconsole/src/main/java/streamer/InputStreamSource.java index 958e5e0f016..f596cf22cef 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/InputStreamSource.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/InputStreamSource.java @@ -101,7 +101,7 @@ public class InputStreamSource extends BaseElement { } // Create buffer of recommended size and with default offset - ByteBuffer buf = new ByteBuffer(incommingBufLength); + ByteBuffer buf = new ByteBuffer(incomingBufLength); if (verbose) System.out.println("[" + this + "] INFO: Reading data from stream."); diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/OutputStreamSink.java b/services/console-proxy/rdpconsole/src/main/java/streamer/OutputStreamSink.java index 27ef614440f..c2d58c0f1ff 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/OutputStreamSink.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/OutputStreamSink.java @@ -137,7 +137,7 @@ public class OutputStreamSink extends BaseElement { { verbose = true; numBuffers = 3; - incommingBufLength = 5; + incomingBufLength = 5; delay = 100; } }; diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/PipelineImpl.java b/services/console-proxy/rdpconsole/src/main/java/streamer/PipelineImpl.java index 299d0a466f7..342f2c3c52e 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/PipelineImpl.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/PipelineImpl.java @@ -312,7 +312,7 @@ public class PipelineImpl implements Pipeline { // Create elements pipeline.add(new FakeSource("source") { { - incommingBufLength = 3; + incomingBufLength = 3; numBuffers = 10; delay = 100; } diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/Queue.java b/services/console-proxy/rdpconsole/src/main/java/streamer/Queue.java index 910e073a058..ea64b323d2c 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/Queue.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/Queue.java @@ -108,7 +108,7 @@ public class Queue extends BaseElement { { delay = 100; numBuffers = 10; - incommingBufLength = 10; + incomingBufLength = 10; } }; @@ -116,7 +116,7 @@ public class Queue extends BaseElement { { delay = 100; numBuffers = 10; - incommingBufLength = 10; + incomingBufLength = 10; } }; diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/SyncLink.java b/services/console-proxy/rdpconsole/src/main/java/streamer/SyncLink.java index 444ef30b7da..d0e7d33934b 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/SyncLink.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/SyncLink.java @@ -357,7 +357,7 @@ public class SyncLink implements Link { * must be only one pull loop per thread. * * Pull loop will start after event STREAM_START. This link and source element - * incomming links will be switched to pull mode before pull loop will be + * incoming links will be switched to pull mode before pull loop will be * started using event LINK_SWITCH_TO_PULL_MODE. */ @Override @@ -412,7 +412,7 @@ public class SyncLink implements Link { throw new RuntimeException("[" + this + "] ERROR: Cannot drop link in pull mode."); if (cacheBuffer != null) - throw new RuntimeException("[" + this + "] ERROR: Cannot drop link when cache conatains data: " + cacheBuffer + "."); + throw new RuntimeException("[" + this + "] ERROR: Cannot drop link when cache contains data: " + cacheBuffer + "."); source.dropLink(this); sink.dropLink(this); diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketSink.java b/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketSink.java index 204ebb620e2..326570b7141 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketSink.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketSink.java @@ -78,7 +78,7 @@ public class AprSocketSink extends BaseElement { socketWrapper.upgradeToSsl(); break; case LINK_SWITCH_TO_PULL_MODE: - throw new RuntimeException("[" + this + "] ERROR: Unexpected event: sink recived LINK_SWITCH_TO_PULL_MODE event."); + throw new RuntimeException("[" + this + "] ERROR: Unexpected event: sink received LINK_SWITCH_TO_PULL_MODE event."); default: super.handleEvent(event, direction); } diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketSource.java b/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketSource.java index 32345769aa6..f4cd7e2539e 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketSource.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketSource.java @@ -98,7 +98,7 @@ public class AprSocketSource extends BaseElement { try { // Create buffer of recommended size and with default offset - ByteBuffer buf = new ByteBuffer(incommingBufLength); + ByteBuffer buf = new ByteBuffer(incomingBufLength); if (verbose) System.out.println("[" + this + "] INFO: Reading data from stream."); diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/debug/FakeSource.java b/services/console-proxy/rdpconsole/src/main/java/streamer/debug/FakeSource.java index 85ae8235d48..1a0f56b9a94 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/debug/FakeSource.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/debug/FakeSource.java @@ -77,7 +77,7 @@ public class FakeSource extends BaseElement { * Initialize data. */ public ByteBuffer initializeData() { - ByteBuffer buf = new ByteBuffer(incommingBufLength); + ByteBuffer buf = new ByteBuffer(incomingBufLength); // Set first byte of package to it sequance number buf.data[buf.offset] = (byte)(packetNumber % 128); @@ -106,7 +106,7 @@ public class FakeSource extends BaseElement { Element fakeSource = new FakeSource("source 3/10/100") { { verbose = true; - incommingBufLength = 3; + incomingBufLength = 3; numBuffers = 10; delay = 100; } diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/debug/MockServer.java b/services/console-proxy/rdpconsole/src/main/java/streamer/debug/MockServer.java index 384ff5ee8b5..c8f08b4bc5d 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/debug/MockServer.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/debug/MockServer.java @@ -88,7 +88,7 @@ public class MockServer implements Runnable { // Compare actual data with expected data if (actualDataLength != packet.data.length) { throw new AssertionError("Actual length of client request for packet #" + (i + 1) + " (\"" + packet.id + "\")" - + " does not match length of expected client request. Actual length: " + actualDataLength + ", expected legnth: " + packet.data.length + + " does not match length of expected client request. Actual length: " + actualDataLength + ", expected length: " + packet.data.length + "."); } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxy.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxy.java index fd4f68bcee0..22dc29be60d 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxy.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxy.java @@ -457,7 +457,7 @@ public class ConsoleProxy { s_logger.info("Added viewer object " + viewer); reportLoadChange = true; } else { - // protected against malicous attack by modifying URL content + // protected against malicious attack by modifying URL content if (ajaxSession != null) { long ajaxSessionIdFromUrl = Long.parseLong(ajaxSession); if (ajaxSessionIdFromUrl != viewer.getAjaxSessionId()) @@ -516,7 +516,7 @@ public class ConsoleProxy { ConsoleProxyAuthenticationResult authResult = authenticateConsoleAccess(param, false); if (authResult == null || !authResult.isSuccess()) { - s_logger.warn("External authenticator failed authencation request for vm " + param.getClientTag() + " with sid " + param.getClientHostPassword()); + s_logger.warn("External authenticator failed authentication request for vm " + param.getClientTag() + " with sid " + param.getClientHostPassword()); throw new AuthenticationException("External authenticator failed request for vm " + param.getClientTag() + " with sid " + param.getClientHostPassword()); } @@ -566,7 +566,7 @@ public class ConsoleProxy { try { authenticationExternally(param); } catch (Exception e) { - s_logger.error("Authencation failed for param: " + param); + s_logger.error("Authentication failed for param: " + param); return null; } s_logger.info("Initializing new novnc client and disconnecting existing session"); diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java index b7f969a1e57..28d6ec1cab7 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java @@ -40,7 +40,7 @@ public class ConsoleProxyHttpHandlerHelper { map.put(name, value); } else { if (s_logger.isDebugEnabled()) - s_logger.debug("Invalid paramemter in URL found. param: " + param); + s_logger.debug("Invalid parameter in URL found. param: " + param); } } diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java index 23b4e30f28c..b7b9736a742 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java @@ -149,7 +149,7 @@ public class PremiumSecondaryStorageManagerImpl extends SecondaryStorageManagerI } if (!suspendAutoLoading) { - // this is to avoid surprises that people may accidently see two SSVMs being launched, capacity expanding only happens when we have at least the primary SSVM is up + // this is to avoid surprises that people may accidentally see two SSVMs being launched, capacity expanding only happens when we have at least the primary SSVM is up if (alreadyRunning.size() == 0) { s_logger.info("Primary secondary storage is not even started, wait until next turn"); return new Pair(AfterScanAction.nop, null); diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java index 8906d781205..fa42d000f33 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java @@ -1129,7 +1129,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar s_logger.debug("Management server cidr list is " + mgmt_cidr); buf.append(" mgmtcidr=").append(mgmt_cidr); } else { - s_logger.error("Inavlid management server cidr list: " + mgmt_cidr); + s_logger.error("Invalid management server cidr list: " + mgmt_cidr); } buf.append(" localgw=").append(dest.getPod().getGateway()); buf.append(" private.network.device=").append("eth").append(deviceId); diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index 5ed7fb9121d..a0faddd1810 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -1139,7 +1139,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S } return new File(destFile.getAbsolutePath()); } catch (IOException e) { - s_logger.debug("Faild to get url:" + url + ", due to " + e.toString()); + s_logger.debug("Failed to get url: " + url + ", due to " + e.toString()); throw new CloudRuntimeException(e); } } diff --git a/setup/db/create-schema.sql b/setup/db/create-schema.sql index 2c68091807b..a241d9a4624 100755 --- a/setup/db/create-schema.sql +++ b/setup/db/create-schema.sql @@ -1948,8 +1948,8 @@ CREATE TABLE `cloud`.`swift` ( CREATE TABLE `cloud`.`op_host_transfer` ( `id` bigint unsigned UNIQUE NOT NULL COMMENT 'Id of the host', - `initial_mgmt_server_id` bigint unsigned COMMENT 'management server the host is transfered from', - `future_mgmt_server_id` bigint unsigned COMMENT 'management server the host is transfered to', + `initial_mgmt_server_id` bigint unsigned COMMENT 'management server the host is transferred from', + `future_mgmt_server_id` bigint unsigned COMMENT 'management server the host is transferred to', `state` varchar(32) NOT NULL COMMENT 'the transfer state of the host', `created` datetime NOT NULL COMMENT 'date created', PRIMARY KEY (`id`), @@ -2138,7 +2138,7 @@ CREATE TABLE `cloud`.`external_load_balancer_devices` ( `is_dedicated` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if device/appliance is provisioned for dedicated use only', `is_inline` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if load balancer will be used in in-line configuration with firewall', `is_managed` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if load balancer appliance is provisioned and its life cycle is managed by by cloudstack', - `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external load balancer device', + `host_id` bigint unsigned NOT NULL COMMENT 'host id corresponding to the external load balancer device', `parent_host_id` bigint unsigned COMMENT 'if the load balancer appliance is cloudstack managed, then host id on which this appliance is provisioned', PRIMARY KEY (`id`), CONSTRAINT `fk_external_lb_devices_host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, @@ -2155,7 +2155,7 @@ CREATE TABLE `cloud`.`external_firewall_devices` ( `device_state` varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'state (enabled/disabled/shutdown) of the device', `is_dedicated` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if device/appliance meant for dedicated use only', `allocation_state` varchar(32) NOT NULL DEFAULT 'Free' COMMENT 'Allocation state (Free/Allocated) of the device', - `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external firewall device', + `host_id` bigint unsigned NOT NULL COMMENT 'host id corresponding to the external firewall device', `capacity` bigint unsigned NOT NULL DEFAULT 0 COMMENT 'Capacity of the external firewall device', PRIMARY KEY (`id`), CONSTRAINT `fk_external_firewall_devices__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, @@ -2459,7 +2459,7 @@ CREATE TABLE `cloud`.`external_nicira_nvp_devices` ( `physical_network_id` bigint unsigned NOT NULL COMMENT 'id of the physical network in to which nicira nvp device is added', `provider_name` varchar(255) NOT NULL COMMENT 'Service Provider name corresponding to this nicira nvp device', `device_name` varchar(255) NOT NULL COMMENT 'name of the nicira nvp device', - `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external nicira nvp device', + `host_id` bigint unsigned NOT NULL COMMENT 'host id corresponding to the external nicira nvp device', PRIMARY KEY (`id`), CONSTRAINT `fk_external_nicira_nvp_devices__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, CONSTRAINT `fk_external_nicira_nvp_devices__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE diff --git a/systemvm/agent/noVNC/core/rfb.js b/systemvm/agent/noVNC/core/rfb.js index eda1597e6c0..c38e8e5e6f1 100644 --- a/systemvm/agent/noVNC/core/rfb.js +++ b/systemvm/agent/noVNC/core/rfb.js @@ -2674,7 +2674,7 @@ RFB.messages = { }, extendedClipboardProvide(sock, formats, inData) { - // Deflate incomming data and their sizes + // Deflate incoming data and their sizes let deflator = new Deflator(); let dataToDeflate = []; diff --git a/systemvm/debian/opt/cloud/bin/setup/common.sh b/systemvm/debian/opt/cloud/bin/setup/common.sh index ceb3150207b..c7b01c25bd4 100755 --- a/systemvm/debian/opt/cloud/bin/setup/common.sh +++ b/systemvm/debian/opt/cloud/bin/setup/common.sh @@ -619,7 +619,7 @@ parse_cmd_line() { KEY=$(echo $i | cut -d= -f1) VALUE=$(echo $i | cut -d= -f2) echo -en ${COMMA} >> ${CHEF_TMP_FILE} - # Two lines so values do not accidently interpretted as escapes!! + # Two lines so values do not accidentally interpretted as escapes!! echo -n \"${KEY}\"': '\"${VALUE}\" >> ${CHEF_TMP_FILE} COMMA=",\n\t" case $KEY in diff --git a/test/integration/component/maint/test_redundant_router.py b/test/integration/component/maint/test_redundant_router.py index f79856d3dc2..2518704e635 100644 --- a/test/integration/component/maint/test_redundant_router.py +++ b/test/integration/component/maint/test_redundant_router.py @@ -1579,7 +1579,7 @@ class TestRvRRedundancy(cloudstackTestCase): if retry == 0: self.fail("New router creation taking too long, timed out") - def wait_untill_router_stabilises(self): + def wait_until_router_stabilises(self): retry=4 while retry > 0: routers = Router.list( @@ -1588,7 +1588,7 @@ class TestRvRRedundancy(cloudstackTestCase): listall=True ) retry = retry-1 - self.info("waiting untill state of the routers is stable") + self.info("waiting until state of the routers is stable") if routers[0].redundantstate != 'UNKNOWN' and routers[1].redundantstate != 'UNKNOWN': return elif retry==0: @@ -1610,7 +1610,7 @@ class TestRvRRedundancy(cloudstackTestCase): #clean up the network to make sure it is in proper state. self.network.restart(self.apiclient,cleanup=True) time.sleep(self.testdata["sleep"]) - self.wait_untill_router_stabilises() + self.wait_until_router_stabilises() old_primary_router, old_backup_router = self.get_primary_and_backupRouter() self.info("old_primary_router:"+old_primary_router.name+" old_backup_router"+old_backup_router.name) #chek if the network is in correct state