mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 01:32:18 +02:00
misc: fix spelling (#7549)
Co-authored-by: Stephan Krug <stekrug@icloud.com>
This commit is contained in:
parent
9c4d18f9b1
commit
f090c77f41
@ -64,7 +64,7 @@ public class DeleteHostCmd extends BaseCmd {
|
||||
return (forced != null) ? forced : false;
|
||||
}
|
||||
|
||||
public boolean isForceDestoryLocalStorage() {
|
||||
public boolean isForceDestroyLocalStorage() {
|
||||
return (forceDestroyLocalStorage != null) ? forceDestroyLocalStorage : true;
|
||||
}
|
||||
|
||||
@ -79,7 +79,7 @@ public class DeleteHostCmd extends BaseCmd {
|
||||
|
||||
@Override
|
||||
public void execute() {
|
||||
boolean result = _resourceService.deleteHost(getId(), isForced(), isForceDestoryLocalStorage());
|
||||
boolean result = _resourceService.deleteHost(getId(), isForced(), isForceDestroyLocalStorage());
|
||||
if (result) {
|
||||
SuccessResponse response = new SuccessResponse(getCommandName());
|
||||
this.setResponseObject(response);
|
||||
|
||||
@ -133,7 +133,7 @@ public class AssignToLoadBalancerRuleCmd extends BaseAsyncCmd {
|
||||
throw new InvalidParameterValueException("Unable to find virtual machine ID: " + vmId);
|
||||
}
|
||||
|
||||
//check wether the given ip is valid ip or not
|
||||
//check whether the given ip is valid ip or not
|
||||
if (vmIp == null || !NetUtils.isValidIp4(vmIp)) {
|
||||
throw new InvalidParameterValueException("Invalid ip address "+ vmIp +" passed in vmidipmap for " +
|
||||
"vmid " + vmId);
|
||||
|
||||
@ -228,7 +228,7 @@ public class NetworkOfferingDaoImpl extends GenericDaoBase<NetworkOfferingVO, Lo
|
||||
}
|
||||
|
||||
/**
|
||||
* Persist L2 deafult Network offering
|
||||
* Persist L2 default Network offering
|
||||
*/
|
||||
private void persistL2DefaultNetworkOffering(String name, String displayText, boolean specifyVlan, boolean configDriveEnabled) {
|
||||
NetworkOfferingVO offering = new NetworkOfferingVO(name, displayText, TrafficType.Guest, false, specifyVlan,
|
||||
|
||||
@ -86,7 +86,7 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg
|
||||
boolean noDuplicate = true;
|
||||
StringBuffer helpInfo = new StringBuffer();
|
||||
String note =
|
||||
"DATABASE INTEGRITY ERROR\nManagement server detected there are some hosts connect to the same loacal storage, please contact CloudStack support team for solution. Below are detialed info, please attach all of them to CloudStack support. Thank you\n";
|
||||
"DATABASE INTEGRITY ERROR\nManagement server detected there are some hosts connect to the same local storage, please contact CloudStack support team for solution. Below are detailed info, please attach all of them to CloudStack support. Thank you\n";
|
||||
helpInfo.append(note);
|
||||
while (rs.next()) {
|
||||
try ( PreparedStatement sel_pstmt =
|
||||
|
||||
@ -1168,7 +1168,7 @@ public class Upgrade218to22 implements DbUpgrade {
|
||||
pstmt.executeUpdate();
|
||||
s_logger.debug("Upgraded userStatistcis with device_type=DomainRouter");
|
||||
|
||||
// update device_id infrormation
|
||||
// update device_id information
|
||||
try (
|
||||
PreparedStatement selectUserStatistics = conn.prepareStatement("SELECT id, account_id, data_center_id FROM user_statistics");
|
||||
ResultSet rs = selectUserStatistics.executeQuery();
|
||||
|
||||
@ -332,7 +332,7 @@ public class Upgrade224to225 implements DbUpgrade {
|
||||
pstmt.close();
|
||||
} catch (SQLException e) {
|
||||
s_logger.error("Unable to add missing foreign key; following statement was executed:" + pstmt);
|
||||
throw new CloudRuntimeException("Unable to add missign keys due to exception", e);
|
||||
throw new CloudRuntimeException("Unable to add missing keys due to exception", e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -348,7 +348,7 @@ public class Upgrade224to225 implements DbUpgrade {
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
s_logger.error("Unable to add missing ovs tunnel account due to ", e);
|
||||
throw new CloudRuntimeException("Unable to add missign ovs tunnel account due to ", e);
|
||||
throw new CloudRuntimeException("Unable to add missing ovs tunnel account due to ", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -66,7 +66,7 @@ public class Upgrade301to302 extends LegacyDbUpgrade {
|
||||
keys.add("i_host__allocation_state");
|
||||
uniqueKeys.put("host", keys);
|
||||
|
||||
s_logger.debug("Droping i_host__allocation_state key in host table");
|
||||
s_logger.debug("Dropping i_host__allocation_state key in host table");
|
||||
for (String tableName : uniqueKeys.keySet()) {
|
||||
DbUpgradeUtils.dropKeysIfExist(conn, tableName, uniqueKeys.get(tableName), false);
|
||||
}
|
||||
|
||||
@ -646,7 +646,7 @@ INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (88, 6, 'W
|
||||
INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (89, 6, 'Windows Server 2003 Standard Edition(32-bit)');
|
||||
INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (90, 6, 'Windows Server 2003 Standard Edition(64-bit)');
|
||||
INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (91, 6, 'Windows Server 2003 Web Edition');
|
||||
INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (92, 6, 'Microsoft Small Bussiness Server 2003');
|
||||
INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (92, 6, 'Microsoft Small Business Server 2003');
|
||||
INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (93, 6, 'Windows XP (32-bit)');
|
||||
INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (94, 6, 'Windows XP (64-bit)');
|
||||
INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (95, 6, 'Windows 2000 Advanced Server');
|
||||
@ -779,7 +779,7 @@ INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest
|
||||
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Windows Server 2003, Standard Edition (32-bit)', 89);
|
||||
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Windows Server 2003, Standard Edition (64-bit)', 90);
|
||||
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Windows Server 2003, Web Edition', 91);
|
||||
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Small Bussiness Server 2003', 92);
|
||||
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Small Business Server 2003', 92);
|
||||
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Windows Vista (32-bit)', 56);
|
||||
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Windows Vista (64-bit)', 101);
|
||||
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Windows XP Professional (32-bit)', 93);
|
||||
|
||||
@ -77,6 +77,6 @@ update host_details set name='memory' where host_id in (select id from host wher
|
||||
update host_details set name='privateip' where host_id in (select id from host where hypervisor_type='BareMetal') and name='agentIp';
|
||||
|
||||
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'management-server', 'vmware.root.disk.controller', 'ide', 'Specify the default disk controller for root volumes, valid values are scsi, ide');
|
||||
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'management-server', 'vm.destory.forcestop', 'false', 'On destory, force-stop takes this value');
|
||||
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'management-server', 'vm.destroy.forcestop', 'false', 'On destroy, force-stop takes this value');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.lock.timeout', '600', 'Lock wait timeout (seconds) while implementing network');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.disable.rpfilter','true','disable rp_filter on Domain Router VM public interfaces.');
|
||||
|
||||
@ -702,7 +702,7 @@ UPDATE `cloud`.`configuration` SET category = 'Usage' where name in ('usage.exec
|
||||
ALTER TABLE `cloud`.`op_dc_vnet_alloc` ADD CONSTRAINT `fk_op_dc_vnet_alloc__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `data_center`(`id`) ON DELETE CASCADE;
|
||||
ALTER TABLE `cloud`.`domain` ADD COLUMN `type` varchar(255) NOT NULL DEFAULT 'Normal' COMMENT 'type of the domain - can be Normal or Project';
|
||||
|
||||
UPDATE `cloud`.`configuration` SET name='vm.destroy.forcestop' where name='vm.destory.forcestop';
|
||||
UPDATE `cloud`.`configuration` SET name='vm.destroy.forcestop' where name='vm.destroy.forcestop';
|
||||
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'vm.destroy.forcestop', 'false', 'On destroy, force-stop takes this value');
|
||||
DELETE FROM `cloud`.`configuration` where name='skip.steps';
|
||||
|
||||
|
||||
@ -31,7 +31,7 @@ CREATE TABLE IF NOT EXISTS `cloud`.`keystore` (
|
||||
`id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`name` varchar(64) NOT NULL COMMENT 'unique name for the certifiation',
|
||||
`certificate` text NOT NULL COMMENT 'the actual certificate being stored in the db',
|
||||
`key` text NOT NULL COMMENT 'private key associated wih the certificate',
|
||||
`key` text NOT NULL COMMENT 'private key associated with the certificate',
|
||||
`domain_suffix` varchar(256) NOT NULL COMMENT 'DNS domain suffix associated with the certificate',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE(name)
|
||||
|
||||
@ -32,7 +32,7 @@ CREATE TABLE IF NOT EXISTS `cloud`.`keystore` (
|
||||
`id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`name` varchar(64) NOT NULL COMMENT 'unique name for the certifiation',
|
||||
`certificate` text NOT NULL COMMENT 'the actual certificate being stored in the db',
|
||||
`key` text NOT NULL COMMENT 'private key associated wih the certificate',
|
||||
`key` text NOT NULL COMMENT 'private key associated with the certificate',
|
||||
`domain_suffix` varchar(256) NOT NULL COMMENT 'DNS domain suffix associated with the certificate',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE(name)
|
||||
|
||||
@ -68,7 +68,7 @@ ALTER TABLE `cloud`.`port_forwarding_rules` ADD CONSTRAINT `fk_port_forwarding_r
|
||||
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'management-server', 'agent.load.threshold', '0.70', 'Percentage (as a value between 0 and 1) of connected agents after which agent load balancing will start happening');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.visibility', 'global', 'Load Balancer(haproxy) stats visibility, it can take the following four parameters : global,guest-network,link-local,disabled');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.uri','/admin?stats','Load Balancer(haproxy) uri.');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.auth','admin1:AdMiN123','Load Balancer(haproxy) authetication string in the format username:password');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.auth','admin1:AdMiN123','Load Balancer(haproxy) authentication string in the format username:password');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.port','8081','Load Balancer(haproxy) stats port number.');
|
||||
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'NetworkManager', 'use.external.dns', 'false', 'Bypass the cloudstack DHCP/DNS server vm name service, use zone external dns1 and dns2');
|
||||
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'management-server', 'network.loadbalancer.basiczone.elb.enabled', 'false', 'Whether the load balancing service is enabled for basic zones');
|
||||
|
||||
@ -50,7 +50,7 @@ INSERT IGNORE INTO `cloud`.`configuration` (category, instance, name, value, des
|
||||
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'management-server', 'agent.load.threshold', '0.70', 'Percentage (as a value between 0 and 1) of connected agents after which agent load balancing will start happening');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.visibility', 'global', 'Load Balancer(haproxy) stats visibility, it can take the following four parameters : global,guest-network,link-local,disabled');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.uri','/admin?stats','Load Balancer(haproxy) uri.');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.auth','admin1:AdMiN123','Load Balancer(haproxy) authetication string in the format username:password');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.auth','admin1:AdMiN123','Load Balancer(haproxy) authentication string in the format username:password');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.port','8081','Load Balancer(haproxy) stats port number.');
|
||||
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'NetworkManager', 'use.external.dns', 'false', 'Bypass the cloudstack DHCP/DNS server vm name service, use zone external dns1 and dns2');
|
||||
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'management-server', 'network.loadbalancer.basiczone.elb.enabled', 'false', 'Whether the load balancing service is enabled for basic zones');
|
||||
|
||||
@ -227,7 +227,7 @@ CREATE TABLE `cloud`.`nicira_nvp_nic_map` (
|
||||
|
||||
-- rrq 5839
|
||||
-- Remove the unique constraint on physical_network_id, provider_name from physical_network_service_providers
|
||||
-- Because the name of this contraint is not set we need this roundabout way
|
||||
-- Because the name of this constraint is not set we need this roundabout way
|
||||
-- The key is also used by the foreign key constraint so drop and recreate that one
|
||||
ALTER TABLE `cloud`.`physical_network_service_providers` DROP FOREIGN KEY fk_pnetwork_service_providers__physical_network_id;
|
||||
|
||||
|
||||
@ -51,7 +51,7 @@ ALTER TABLE `storage_pool` ADD `user_info` VARCHAR( 255 ) NULL COMMENT 'Authoriz
|
||||
INSERT INTO `cloud`.`configuration` (`category`, `instance`, `component`, `name`, `value`, `description`) VALUES ('Advanced', 'DEFAULT', 'management-server', 'event.purge.interval', '86400', 'The interval (in seconds) to wait before running the event purge thread');
|
||||
-- rrq 5839
|
||||
-- Remove the unique constraint on physical_network_id, provider_name from physical_network_service_providers
|
||||
-- Because the name of this contraint is not set we need this roundabout way
|
||||
-- Because the name of this constraint is not set we need this roundabout way
|
||||
-- The key is also used by the foreign key constraint so drop and recreate that one
|
||||
ALTER TABLE physical_network_service_providers DROP FOREIGN KEY fk_pnetwork_service_providers__physical_network_id;
|
||||
SET @constraintname = (select CONCAT(CONCAT('DROP INDEX ', A.CONSTRAINT_NAME), ' ON physical_network_service_providers' )
|
||||
|
||||
@ -154,7 +154,7 @@ CREATE VIEW `cloud`.`image_store_view` AS
|
||||
`cloud`.`image_store_details` ON image_store_details.store_id = image_store.id;
|
||||
|
||||
|
||||
-- here we have to allow null for store_id to accomodate baremetal case to search for ready templates since template state is only stored in this table
|
||||
-- here we have to allow null for store_id to accommodate baremetal case to search for ready templates since template state is only stored in this table
|
||||
-- FK also commented out due to this
|
||||
CREATE TABLE `cloud`.`template_store_ref` (
|
||||
`id` bigint unsigned NOT NULL auto_increment,
|
||||
@ -525,9 +525,9 @@ CREATE VIEW `cloud`.`event_view` AS
|
||||
left join
|
||||
`cloud`.`event` eve ON event.start_id = eve.id;
|
||||
|
||||
ALTER TABLE `cloud`.`region` ADD COLUMN `portableip_service_enabled` tinyint(1) unsigned NOT NULL DEFAULT 0 COMMENT 'Is Portable IP service enalbed in the Region';
|
||||
ALTER TABLE `cloud`.`region` ADD COLUMN `portableip_service_enabled` tinyint(1) unsigned NOT NULL DEFAULT 0 COMMENT 'Is Portable IP service enabled in the Region';
|
||||
|
||||
ALTER TABLE `cloud`.`region` ADD COLUMN `gslb_service_enabled` tinyint(1) unsigned NOT NULL DEFAULT 1 COMMENT 'Is GSLB service enalbed in the Region';
|
||||
ALTER TABLE `cloud`.`region` ADD COLUMN `gslb_service_enabled` tinyint(1) unsigned NOT NULL DEFAULT 1 COMMENT 'Is GSLB service enabled in the Region';
|
||||
|
||||
ALTER TABLE `cloud`.`external_load_balancer_devices` ADD COLUMN `is_gslb_provider` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if load balancer appliance is acting as gslb service provider in the zone';
|
||||
|
||||
@ -2067,7 +2067,7 @@ update `cloud`.`vpc_gateways` set network_acl_id = 2;
|
||||
|
||||
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'VpcManager', 'blacklisted.routes', NULL, 'Routes that are blacklisted, can not be used for Static Routes creation for the VPC Private Gateway');
|
||||
|
||||
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'enable.dynamic.scale.vm', 'false', 'Enables/Diables dynamically scaling a vm');
|
||||
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'enable.dynamic.scale.vm', 'false', 'Enables/Disables dynamically scaling a vm');
|
||||
|
||||
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'scale.retry', '2', 'Number of times to retry scaling up the vm');
|
||||
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
--;
|
||||
|
||||
-- Update the description to indicate this only works with KVM + Ceph
|
||||
-- (not implemented properly atm for KVM+NFS/local, and it accidentaly works with XS + NFS. Not applicable for VMware)
|
||||
-- (not implemented properly atm for KVM+NFS/local, and it accidentally works with XS + NFS. Not applicable for VMware)
|
||||
UPDATE `cloud`.`configuration` SET `description`='Indicates whether to always backup primary storage snapshot to secondary storage. Keeping snapshots only on Primary storage is applicable for KVM + Ceph only.' WHERE `name`='snapshot.backup.to.secondary';
|
||||
|
||||
-- KVM: enable storage data motion on KVM hypervisor_capabilities
|
||||
|
||||
@ -402,7 +402,7 @@ GROUP BY
|
||||
ALTER TABLE `cloud`.`load_balancing_rules`
|
||||
ADD cidr_list VARCHAR(4096);
|
||||
|
||||
-- savely add resources in parallel
|
||||
-- safely add resources in parallel
|
||||
-- PR#5984 Create table to persist VM stats.
|
||||
DROP TABLE IF EXISTS `cloud`.`resource_reservation`;
|
||||
CREATE TABLE `cloud`.`resource_reservation` (
|
||||
|
||||
@ -238,7 +238,7 @@ update `cloud`.`volumes` v, `cloud`.`volume_host_ref` vhr set v.format=vhr.fo
|
||||
|
||||
|
||||
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'baremetal.ipmi.lan.interface', 'default', 'option specified in -I option of impitool. candidates are: open/bmc/lipmi/lan/lanplus/free/imb, see ipmitool man page for details. default value "default" means using default option of ipmitool');
|
||||
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'baremetal.ipmi.fail.retry', 'default', "ipmi interface will be temporary out of order after power opertions(e.g. cycle, on), it leads following commands fail immediately. The value specifies retry times before accounting it as real failure");
|
||||
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'baremetal.ipmi.fail.retry', 'default', "ipmi interface will be temporary out of order after power operations(e.g. cycle, on), it leads following commands fail immediately. The value specifies retry times before accounting it as real failure");
|
||||
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'vmware.hung.wokervm.timeout', '7200', 'Worker VM timeout in seconds');
|
||||
INSERT IGNORE INTO `cloud`.`configuration` VALUES ("Alert", 'DEFAULT', 'management-server', "alert.smtp.connectiontimeout", "30000", "Socket connection timeout value in milliseconds. -1 for infinite timeout.");
|
||||
INSERT IGNORE INTO `cloud`.`configuration` VALUES ("Alert", 'DEFAULT', 'management-server', "alert.smtp.timeout", "30000", "Socket I/O timeout value in milliseconds. -1 for infinite timeout.");
|
||||
|
||||
@ -183,7 +183,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
|
||||
fullBackup = false;
|
||||
}
|
||||
} else if (oldestSnapshotOnPrimary.getId() != parentSnapshotOnPrimaryStore.getId()){
|
||||
// if there is an snapshot entry for previousPool(primary storage) of migrated volume, delete it becasue CS created one more snapshot entry for current pool
|
||||
// if there is an snapshot entry for previousPool(primary storage) of migrated volume, delete it because CS created one more snapshot entry for current pool
|
||||
snapshotStoreDao.remove(oldestSnapshotOnPrimary.getId());
|
||||
}
|
||||
}
|
||||
|
||||
@ -102,7 +102,7 @@ public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager {
|
||||
stateMachines.addTransition(State.Destroying, Event.OperationFailed, State.Destroying);
|
||||
stateMachines.addTransition(State.Failed, Event.DestroyRequested, State.Destroying);
|
||||
// TODO: further investigate why an extra event is sent when it is
|
||||
// alreay Ready for DownloadListener
|
||||
// already Ready for DownloadListener
|
||||
stateMachines.addTransition(State.Ready, Event.OperationSuccessed, State.Ready);
|
||||
// State transitions for data object migration
|
||||
stateMachines.addTransition(State.Ready, Event.MigrateDataRequested, State.Migrating);
|
||||
|
||||
@ -428,7 +428,7 @@ public class DefaultEndPointSelector implements EndPointSelector {
|
||||
}
|
||||
|
||||
// If ssvm doesn't exist then find any ssvm in the zone.
|
||||
s_logger.debug("Coudn't find ssvm for url" +downloadUrl);
|
||||
s_logger.debug("Couldn't find ssvm for url" +downloadUrl);
|
||||
return findEndpointForImageStorage(store);
|
||||
}
|
||||
|
||||
|
||||
@ -314,7 +314,7 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase<TemplateDataStoreVO
|
||||
|
||||
@Override
|
||||
public List<TemplateDataStoreVO> listByTemplateZoneDownloadStatus(long templateId, Long zoneId, Status... status) {
|
||||
// get all elgible image stores
|
||||
// get all eligible image stores
|
||||
List<DataStore> imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId));
|
||||
if (imgStores != null) {
|
||||
List<TemplateDataStoreVO> result = new ArrayList<TemplateDataStoreVO>();
|
||||
@ -341,7 +341,7 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase<TemplateDataStoreVO
|
||||
|
||||
@Override
|
||||
public TemplateDataStoreVO findByTemplateZoneDownloadStatus(long templateId, Long zoneId, Status... status) {
|
||||
// get all elgible image stores
|
||||
// get all eligible image stores
|
||||
List<DataStore> imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId));
|
||||
if (imgStores != null) {
|
||||
for (DataStore store : imgStores) {
|
||||
@ -357,7 +357,7 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase<TemplateDataStoreVO
|
||||
|
||||
@Override
|
||||
public TemplateDataStoreVO findByTemplateZoneStagingDownloadStatus(long templateId, Long zoneId, Status... status) {
|
||||
// get all elgible image stores
|
||||
// get all eligible image stores
|
||||
List<DataStore> cacheStores = _storeMgr.getImageCacheStores(new ZoneScope(zoneId));
|
||||
if (cacheStores != null) {
|
||||
for (DataStore store : cacheStores) {
|
||||
@ -448,7 +448,7 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase<TemplateDataStoreVO
|
||||
|
||||
@Override
|
||||
public TemplateDataStoreVO findByTemplateZone(long templateId, Long zoneId, DataStoreRole role) {
|
||||
// get all elgible image stores
|
||||
// get all eligible image stores
|
||||
List<DataStore> imgStores = null;
|
||||
if (role == DataStoreRole.Image) {
|
||||
imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId));
|
||||
|
||||
@ -32,8 +32,8 @@ public class BaseTypeTest {
|
||||
|
||||
@Test
|
||||
public void testIsSameTypeAs() {
|
||||
Assert.assertTrue("'a' and 'A' should be considdered the same type", new TestType("a").isSameTypeAs("A"));
|
||||
Assert.assertTrue("'B' and 'b' should be considdered the same address", new TestType("B").isSameTypeAs(new TestType("b")));
|
||||
Assert.assertTrue("'a' and 'A' should be considered the same type", new TestType("a").isSameTypeAs("A"));
|
||||
Assert.assertTrue("'B' and 'b' should be considered the same address", new TestType("B").isSameTypeAs(new TestType("b")));
|
||||
}
|
||||
class TestType extends BaseType {
|
||||
String content;
|
||||
|
||||
@ -24,7 +24,7 @@ using System.Text;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
// C# versions of certain CloudStack types to simplify JSON serialisation.
|
||||
// Limit to the number of types, becasue they are written and maintained manually.
|
||||
// Limit to the number of types, because they are written and maintained manually.
|
||||
// JsonProperty used to identify property name when serialised, which allows
|
||||
// later adoption of C# naming conventions if requried.
|
||||
namespace HypervResource
|
||||
|
||||
@ -217,7 +217,7 @@ namespace ServerResource.Tests
|
||||
{
|
||||
testSampleVolumeTempURIJSON = "\"storagepool\"";
|
||||
// Arrange
|
||||
String destoryCmd = //"{\"volume\":" + getSampleVolumeObjectTO() + "}";
|
||||
String destroyCmd = //"{\"volume\":" + getSampleVolumeObjectTO() + "}";
|
||||
"{\"volume\":{\"name\":\"" + testSampleVolumeTempUUIDNoExt
|
||||
+ "\",\"storagePoolType\":\"Filesystem\","
|
||||
+ "\"mountPoint\":"
|
||||
@ -233,15 +233,15 @@ namespace ServerResource.Tests
|
||||
HypervResourceController rsrcServer = new HypervResourceController();
|
||||
HypervResourceController.wmiCallsV2 = wmiCallsV2;
|
||||
|
||||
dynamic jsonDestoryCmd = JsonConvert.DeserializeObject(destoryCmd);
|
||||
dynamic jsonDestroyCmd = JsonConvert.DeserializeObject(destroyCmd);
|
||||
|
||||
// Act
|
||||
dynamic destoryAns = rsrcServer.DestroyCommand(jsonDestoryCmd);
|
||||
dynamic destroyAns = rsrcServer.DestroyCommand(jsonDestroyCmd);
|
||||
|
||||
// Assert
|
||||
JObject ansAsProperty2 = destoryAns[0];
|
||||
JObject ansAsProperty2 = destroyAns[0];
|
||||
dynamic ans = ansAsProperty2.GetValue(CloudStackTypes.Answer);
|
||||
String path = jsonDestoryCmd.volume.path;
|
||||
String path = jsonDestroyCmd.volume.path;
|
||||
Assert.True((bool)ans.result, "DestroyCommand did not succeed " + ans.details);
|
||||
Assert.True(!File.Exists(path), "Failed to delete file " + path);
|
||||
}
|
||||
|
||||
@ -232,7 +232,7 @@ namespace ServerResource.Tests
|
||||
{
|
||||
// Arrange
|
||||
String sampleVolume = getSampleVolumeObjectTO();
|
||||
String destoryCmd = //"{\"volume\":" + getSampleVolumeObjectTO() + "}";
|
||||
String destroyCmd = //"{\"volume\":" + getSampleVolumeObjectTO() + "}";
|
||||
"{\"volume\":{\"name\":\"" + testSampleVolumeTempUUIDNoExt
|
||||
+ "\",\"storagePoolType\":\"Filesystem\","
|
||||
+ "\"mountPoint\":"
|
||||
@ -243,15 +243,15 @@ namespace ServerResource.Tests
|
||||
+ "\"type\":\"ROOT\",\"id\":9,\"size\":0}}";
|
||||
|
||||
HypervResourceController rsrcServer = new HypervResourceController();
|
||||
dynamic jsonDestoryCmd = JsonConvert.DeserializeObject(destoryCmd);
|
||||
dynamic jsonDestroyCmd = JsonConvert.DeserializeObject(destroyCmd);
|
||||
|
||||
// Act
|
||||
dynamic destoryAns = rsrcServer.DestroyCommand(jsonDestoryCmd);
|
||||
dynamic destroyAns = rsrcServer.DestroyCommand(jsonDestroyCmd);
|
||||
|
||||
// Assert
|
||||
JObject ansAsProperty2 = destoryAns[0];
|
||||
JObject ansAsProperty2 = destroyAns[0];
|
||||
dynamic ans = ansAsProperty2.GetValue(CloudStackTypes.Answer);
|
||||
String path = jsonDestoryCmd.volume.path;
|
||||
String path = jsonDestroyCmd.volume.path;
|
||||
Assert.True((bool)ans.result, "DestroyCommand did not succeed " + ans.details);
|
||||
Assert.True(!File.Exists(path), "Failed to delete file " + path);
|
||||
}
|
||||
|
||||
@ -107,7 +107,7 @@ public class KVMGuestOsMapper {
|
||||
s_mapper.put("Windows Server 2003 Standard Edition(32-bit)", "Windows Server 2003");
|
||||
s_mapper.put("Windows Server 2003 Standard Edition(64-bit)", "Windows Server 2003");
|
||||
s_mapper.put("Windows Server 2003 Web Edition", "Windows Server 2003");
|
||||
s_mapper.put("Microsoft Small Bussiness Server 2003", "Windows Server 2003");
|
||||
s_mapper.put("Microsoft Small Business Server 2003", "Windows Server 2003");
|
||||
s_mapper.put("Windows Server 2008 (32-bit)", "Windows Server 2008");
|
||||
s_mapper.put("Windows Server 2008 (64-bit)", "Windows Server 2008");
|
||||
s_mapper.put("Windows Server 2008 R2 (64-bit)", "Windows Server 2008");
|
||||
|
||||
@ -100,7 +100,7 @@ class OvmVolume(OvmObject):
|
||||
priStorageMountPoint = sr.mountpoint
|
||||
volDir = join(priStorageMountPoint, 'running_pool', volDirUuid)
|
||||
if exists(volDir):
|
||||
raise Exception("Volume dir %s alreay existed, can not override"%volDir)
|
||||
raise Exception("Volume dir %s already existed, can not override"%volDir)
|
||||
os.makedirs(volDir)
|
||||
OvmStoragePool()._checkDirSizeForImage(volDir, templateUrl)
|
||||
volName = volUuid + '.raw'
|
||||
|
||||
@ -1581,7 +1581,7 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S
|
||||
try {
|
||||
final Set<VDI> vdis = VDI.getByNameLabel(conn, nameLabel);
|
||||
if (vdis.size() != 1) {
|
||||
s_logger.warn("destoryVDIbyNameLabel failed due to there are " + vdis.size() + " VDIs with name " + nameLabel);
|
||||
s_logger.warn("destroyVDIbyNameLabel failed due to there are " + vdis.size() + " VDIs with name " + nameLabel);
|
||||
return;
|
||||
}
|
||||
for (final VDI vdi : vdis) {
|
||||
@ -3199,7 +3199,7 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S
|
||||
// constraint
|
||||
// for
|
||||
// stability
|
||||
if (dynamicMaxRam > staticMax) { // XS contraint that dynamic max <=
|
||||
if (dynamicMaxRam > staticMax) { // XS constraint that dynamic max <=
|
||||
// static max
|
||||
s_logger.warn("dynamic max " + toHumanReadableSize(dynamicMaxRam) + " can't be greater than static max " + toHumanReadableSize(staticMax) + ", this can lead to stability issues. Setting static max as much as dynamic max ");
|
||||
return dynamicMaxRam;
|
||||
@ -3213,7 +3213,7 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S
|
||||
return dynamicMinRam;
|
||||
}
|
||||
|
||||
if (dynamicMinRam < recommendedValue) { // XS contraint that dynamic min
|
||||
if (dynamicMinRam < recommendedValue) { // XS constraint that dynamic min
|
||||
// > static min
|
||||
s_logger.warn("Vm ram is set to dynamic min " + toHumanReadableSize(dynamicMinRam) + " and is less than the recommended static min " + toHumanReadableSize(recommendedValue) + ", this could lead to stability issues");
|
||||
}
|
||||
@ -4589,7 +4589,7 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S
|
||||
removeSR(conn, sr);
|
||||
return;
|
||||
} catch (XenAPIException | XmlRpcException e) {
|
||||
s_logger.warn(logX(sr, "Unable to get current opertions " + e.toString()), e);
|
||||
s_logger.warn(logX(sr, "Unable to get current operations " + e.toString()), e);
|
||||
}
|
||||
String msg = "Remove SR failed";
|
||||
s_logger.warn(msg);
|
||||
@ -4684,9 +4684,9 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S
|
||||
removeSR(conn, sr);
|
||||
return null;
|
||||
} catch (final XenAPIException e) {
|
||||
s_logger.warn(logX(sr, "Unable to get current opertions " + e.toString()), e);
|
||||
s_logger.warn(logX(sr, "Unable to get current operations " + e.toString()), e);
|
||||
} catch (final XmlRpcException e) {
|
||||
s_logger.warn(logX(sr, "Unable to get current opertions " + e.getMessage()), e);
|
||||
s_logger.warn(logX(sr, "Unable to get current operations " + e.getMessage()), e);
|
||||
}
|
||||
final String msg = "Remove SR failed";
|
||||
s_logger.warn(msg);
|
||||
|
||||
@ -633,7 +633,7 @@ public class XenServerStorageProcessor implements StorageProcessor {
|
||||
try {
|
||||
final Set<VDI> vdis = VDI.getByNameLabel(conn, nameLabel);
|
||||
if (vdis.size() != 1) {
|
||||
s_logger.warn("destoryVDIbyNameLabel failed due to there are " + vdis.size() + " VDIs with name " + nameLabel);
|
||||
s_logger.warn("destroyVDIbyNameLabel failed due to there are " + vdis.size() + " VDIs with name " + nameLabel);
|
||||
return;
|
||||
}
|
||||
for (final VDI vdi : vdis) {
|
||||
|
||||
@ -244,7 +244,7 @@ public class ElastistorUtil {
|
||||
|
||||
if (listAccountResponse.getAccounts().getCount() != 0) {
|
||||
int i;
|
||||
// check weather a account in elasticenter with given Domain name is
|
||||
// check whether an account in elasticenter with given Domain name is
|
||||
// already present in the list of accounts
|
||||
for (i = 0; i < listAccountResponse.getAccounts().getCount(); i++) {
|
||||
if (domainName.equals(listAccountResponse.getAccounts().getAccount(i).getName())) {
|
||||
|
||||
@ -943,7 +943,7 @@ public class DateraUtil {
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks wether a host initiator is present in an initiator group
|
||||
* Checks whether a host initiator is present in an initiator group
|
||||
*
|
||||
* @param initiator Host initiator to check
|
||||
* @param initiatorGroup the initiator group
|
||||
|
||||
@ -255,7 +255,7 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri
|
||||
}
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
s_logger.debug("Unable to destoy volume" + data.getId(), ex);
|
||||
s_logger.debug("Unable to destroy volume" + data.getId(), ex);
|
||||
result.setResult(ex.toString());
|
||||
}
|
||||
callback.complete(result);
|
||||
|
||||
@ -105,7 +105,7 @@ def manageAvailability(path, value):
|
||||
return
|
||||
|
||||
|
||||
def checkVolumeAvailablility(path):
|
||||
def checkVolumeAvailability(path):
|
||||
try:
|
||||
if not isVolumeAvailable(path):
|
||||
# The VHD file is not available on XenSever. The volume is probably
|
||||
@ -172,7 +172,7 @@ def getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI):
|
||||
|
||||
baseCopyUuid = ''
|
||||
if isISCSI:
|
||||
checkVolumeAvailablility(snapshotPath)
|
||||
checkVolumeAvailability(snapshotPath)
|
||||
baseCopyUuid = scanParent(snapshotPath)
|
||||
else:
|
||||
baseCopyUuid = getParent(snapshotPath, isISCSI)
|
||||
|
||||
@ -769,7 +769,7 @@ def add_to_ipset(ipsetname, ips, action):
|
||||
logging.debug("vm ip " + ip)
|
||||
util.pread2(['ipset', action, ipsetname, ip])
|
||||
except:
|
||||
logging.debug("vm ip alreday in ip set" + ip)
|
||||
logging.debug("vm ip already in ip set" + ip)
|
||||
continue
|
||||
|
||||
return result
|
||||
@ -1023,7 +1023,7 @@ def network_rules_for_rebooted_vm(session, vmName):
|
||||
[vm_ip, vm_mac] = get_vm_mac_ip_from_log(vmchain)
|
||||
default_arp_antispoof(vmchain, vifs, vm_ip, vm_mac)
|
||||
|
||||
#check wether the vm has secondary ips
|
||||
#check whether the vm has secondary ips
|
||||
if is_secondary_ips_set(vm_name) == True:
|
||||
vmips = get_vm_sec_ips(vm_name)
|
||||
#add arp rules for the secondaryp ip
|
||||
|
||||
@ -190,7 +190,7 @@ def isfile(path, isISCSI):
|
||||
errMsg = ''
|
||||
exists = True
|
||||
if isISCSI:
|
||||
exists = checkVolumeAvailablility(path)
|
||||
exists = checkVolumeAvailability(path)
|
||||
else:
|
||||
exists = os.path.isfile(path)
|
||||
|
||||
@ -269,7 +269,7 @@ def getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI):
|
||||
|
||||
baseCopyUuid = ''
|
||||
if isISCSI:
|
||||
checkVolumeAvailablility(snapshotPath)
|
||||
checkVolumeAvailability(snapshotPath)
|
||||
baseCopyUuid = scanParent(snapshotPath)
|
||||
else:
|
||||
baseCopyUuid = getParent(snapshotPath, isISCSI)
|
||||
@ -439,7 +439,7 @@ def manageAvailability(path, value):
|
||||
return
|
||||
|
||||
|
||||
def checkVolumeAvailablility(path):
|
||||
def checkVolumeAvailability(path):
|
||||
try:
|
||||
if not isVolumeAvailable(path):
|
||||
# The VHD file is not available on XenSever. The volume is probably
|
||||
|
||||
@ -1729,7 +1729,7 @@ public enum Config {
|
||||
String.class,
|
||||
"baremetal.ipmi.fail.retry",
|
||||
"5",
|
||||
"ipmi interface will be temporary out of order after power opertions(e.g. cycle, on), it leads following commands fail immediately. The value specifies retry times before accounting it as real failure",
|
||||
"ipmi interface will be temporary out of order after power operations(e.g. cycle, on), it leads following commands fail immediately. The value specifies retry times before accounting it as real failure",
|
||||
null),
|
||||
|
||||
ApiLimitEnabled("Advanced", ManagementServer.class, Boolean.class, "api.throttling.enabled", "false", "Enable/disable Api rate limit", null),
|
||||
|
||||
@ -5118,7 +5118,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C
|
||||
List<SecondaryStorageVmVO> ssvms = _stnwMgr.getSSVMWithNoStorageNetwork(network.getDataCenterId());
|
||||
if (!ssvms.isEmpty()) {
|
||||
StringBuilder sb = new StringBuilder("Cannot add " + trafficType
|
||||
+ " traffic type as there are below secondary storage vm still running. Please stop them all and add Storage traffic type again, then destory them all to allow CloudStack recreate them with storage network(If you have added storage network ip range)");
|
||||
+ " traffic type as there are below secondary storage vm still running. Please stop them all and add Storage traffic type again, then destroy them all to allow CloudStack recreate them with storage network(If you have added storage network ip range)");
|
||||
sb.append("SSVMs:");
|
||||
for (SecondaryStorageVmVO ssvm : ssvms) {
|
||||
sb.append(ssvm.getInstanceName()).append(":").append(ssvm.getState());
|
||||
|
||||
@ -2533,7 +2533,7 @@ Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualM
|
||||
boolean revoke = false;
|
||||
if (ip.getState() == IpAddress.State.Releasing ) {
|
||||
// for ips got struck in releasing state we need to delete the rule not add.
|
||||
s_logger.debug("Rule revoke set to true for the ip " + ip.getAddress() +" becasue it is in releasing state");
|
||||
s_logger.debug("Rule revoke set to true for the ip " + ip.getAddress() +" because it is in releasing state");
|
||||
revoke = true;
|
||||
}
|
||||
final StaticNatImpl staticNat = new StaticNatImpl(ip.getAccountId(), ip.getDomainId(), guestNetworkId, ip.getId(), ip.getVmIp(), revoke);
|
||||
|
||||
@ -670,7 +670,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules
|
||||
" as it's already assigned to antoher vm");
|
||||
}
|
||||
|
||||
//check wether the vm ip is alreday associated with any public ip address
|
||||
//check whether the vm ip is already associated with any public ip address
|
||||
IPAddressVO oldIP = _ipAddressDao.findByAssociatedVmIdAndVmIp(vmId, vmIp);
|
||||
|
||||
if (oldIP != null) {
|
||||
|
||||
@ -1170,7 +1170,7 @@ public class NetworkACLServiceImplTest {
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void moveRuleBetweenAclRulesTestThereIsSpaceBetweenPreviousRuleAndNextRuleToAccomodateTheNewRuleWithOtherruleColliding() {
|
||||
public void moveRuleBetweenAclRulesTestThereIsSpaceBetweenPreviousRuleAndNextRuleToAccommodateTheNewRuleWithOtherRuleColliding() {
|
||||
Mockito.when(previousAclRuleMock.getNumber()).thenReturn(10);
|
||||
Mockito.when(nextAclRuleMock.getNumber()).thenReturn(15);
|
||||
|
||||
@ -1186,7 +1186,7 @@ public class NetworkACLServiceImplTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void moveRuleBetweenAclRulesTestThereIsSpaceBetweenPreviousRuleAndNextRuleToAccomodateTheNewRule() {
|
||||
public void moveRuleBetweenAclRulesTestThereIsSpaceBetweenPreviousRuleAndNextRuleToAccommodateTheNewRule() {
|
||||
Mockito.when(previousAclRuleMock.getNumber()).thenReturn(10);
|
||||
Mockito.when(nextAclRuleMock.getNumber()).thenReturn(11);
|
||||
Mockito.when(aclRuleBeingMovedMock.getId()).thenReturn(1l);
|
||||
@ -1218,7 +1218,7 @@ public class NetworkACLServiceImplTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void moveRuleBetweenAclRulesTestThereIsNoSpaceBetweenPreviousRuleAndNextRuleToAccomodateTheNewRule() {
|
||||
public void moveRuleBetweenAclRulesTestThereIsNoSpaceBetweenPreviousRuleAndNextRuleToAccommodateTheNewRule() {
|
||||
Mockito.when(previousAclRuleMock.getNumber()).thenReturn(10);
|
||||
Mockito.when(nextAclRuleMock.getNumber()).thenReturn(15);
|
||||
Mockito.when(aclRuleBeingMovedMock.getNumber()).thenReturn(50);
|
||||
|
||||
@ -1927,7 +1927,7 @@ CREATE TABLE `cloud`.`keystore` (
|
||||
`id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`name` varchar(64) NOT NULL COMMENT 'unique name for the certifiation',
|
||||
`certificate` text NOT NULL COMMENT 'the actual certificate being stored in the db',
|
||||
`key` text COMMENT 'private key associated wih the certificate',
|
||||
`key` text COMMENT 'private key associated with the certificate',
|
||||
`domain_suffix` varchar(256) NOT NULL COMMENT 'DNS domain suffix associated with the certificate',
|
||||
`seq` int,
|
||||
PRIMARY KEY (`id`),
|
||||
|
||||
@ -147,7 +147,7 @@ INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (88,
|
||||
INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (89, UUID(), 6, 'Windows Server 2003 Standard Edition(32-bit)');
|
||||
INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (90, UUID(), 6, 'Windows Server 2003 Standard Edition(64-bit)');
|
||||
INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (91, UUID(), 6, 'Windows Server 2003 Web Edition');
|
||||
INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (92, UUID(), 6, 'Microsoft Small Bussiness Server 2003');
|
||||
INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (92, UUID(), 6, 'Microsoft Small Business Server 2003');
|
||||
INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (93, UUID(), 6, 'Windows XP (32-bit)');
|
||||
INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (94, UUID(), 6, 'Windows XP (64-bit)');
|
||||
INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (95, UUID(), 6, 'Windows 2000 Advanced Server');
|
||||
@ -395,7 +395,7 @@ INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest
|
||||
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Windows Server 2003, Standard Edition (32-bit)', 89);
|
||||
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Windows Server 2003, Standard Edition (64-bit)', 90);
|
||||
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Windows Server 2003, Web Edition', 91);
|
||||
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Small Bussiness Server 2003', 92);
|
||||
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Small Business Server 2003', 92);
|
||||
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Windows Vista (32-bit)', 56);
|
||||
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Windows Vista (64-bit)', 101);
|
||||
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Windows XP Professional (32-bit)', 93);
|
||||
|
||||
@ -2083,9 +2083,9 @@ class TestBrowseUploadVolume(cloudstackTestCase):
|
||||
|
||||
vm4details = self.deploy_vm()
|
||||
|
||||
newvolumetodestoy_VM = self.browse_upload_volume()
|
||||
newvolumetodestroy_VM = self.browse_upload_volume()
|
||||
|
||||
self.attach_volume(vm4details, newvolumetodestoy_VM.id)
|
||||
self.attach_volume(vm4details, newvolumetodestroy_VM.id)
|
||||
|
||||
self.destroy_vm(vm4details)
|
||||
|
||||
@ -2095,7 +2095,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
|
||||
self.expunge_vm(vm4details)
|
||||
|
||||
cmd = deleteVolume.deleteVolumeCmd()
|
||||
cmd.id = newvolumetodestoy_VM.id
|
||||
cmd.id = newvolumetodestroy_VM.id
|
||||
self.apiclient.deleteVolume(cmd)
|
||||
|
||||
self.debug(
|
||||
|
||||
@ -627,7 +627,7 @@ class TestVMLifeCycleVPC(cloudstackTestCase):
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. Destory the virtual machines.
|
||||
# 1. Destroy the virtual machines.
|
||||
# 2. Rules should be still configured on virtual router.
|
||||
# 3. Recover the virtual machines.
|
||||
# 4. Vm should be in stopped state. State both the instances
|
||||
@ -1751,7 +1751,7 @@ class TestVMLifeCycleStoppedVPCVR(cloudstackTestCase):
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. Destory the virtual machines.
|
||||
# 1. Destroy the virtual machines.
|
||||
# 2. Rules should be still configured on virtual router.
|
||||
# 3. Recover the virtual machines.
|
||||
# 4. Vm should be in stopped state. State both the instances
|
||||
@ -2466,7 +2466,7 @@ class TestVMLifeCycleDiffHosts(cloudstackTestCase):
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. Destory the virtual machines.
|
||||
# 1. Destroy the virtual machines.
|
||||
# 2. Rules should be still configured on virtual router.
|
||||
# 3. Recover the virtual machines.
|
||||
# 4. Vm should be in stopped state. State both the instances
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
<dataset>
|
||||
<configuration name="usage.stats.job.aggregation.range" value="600" instance="test"/>
|
||||
|
||||
<vm_instance type="User" id="8" account_id="1" domain_id="1" name="test" instance_name="test" state="destoyed" guest_os_id="1" service_offering_id="1" data_center_id="1" vnc_password="xyz" vm_type="User" created="2019-01-01 00:00:01" removed="2018-01-01 00:00:01" />
|
||||
<vm_instance type="User" id="8" account_id="1" domain_id="1" name="test" instance_name="test" state="destroyed" guest_os_id="1" service_offering_id="1" data_center_id="1" vnc_password="xyz" vm_type="User" created="2019-01-01 00:00:01" removed="2018-01-01 00:00:01" />
|
||||
|
||||
<volumes id="16" account_id="1" domain_id="1" size="1" data_center_id="1" volume_type="root" disk_offering_id="1" removed="2018-01-01 00:00:01"/>
|
||||
<volumes id="17" account_id="1" domain_id="1" size="1" data_center_id="1" volume_type="root" disk_offering_id="1" removed="2019-01-01 00:00:01"/>
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
<dataset>
|
||||
<configuration name="usage.stats.job.aggregation.range" value="600" instance="test" />
|
||||
|
||||
<vm_instance type="User" id="8" account_id="1" domain_id="1" name="test" instance_name="test" state="destoyed" guest_os_id="1" service_offering_id="1" data_center_id="1" vnc_password="xyz" vm_type="User" created="2019-01-01 00:00:01" removed="2018-01-01 00:00:01" />
|
||||
<vm_instance type="User" id="8" account_id="1" domain_id="1" name="test" instance_name="test" state="destroyed" guest_os_id="1" service_offering_id="1" data_center_id="1" vnc_password="xyz" vm_type="User" created="2019-01-01 00:00:01" removed="2018-01-01 00:00:01" />
|
||||
|
||||
<volumes id="16" account_id="1" domain_id="1" size="1" data_center_id="1" volume_type="root" disk_offering_id="1" removed="2018-01-01 00:00:01"/>
|
||||
<volumes id="17" account_id="1" domain_id="1" size="1" data_center_id="1" volume_type="root" disk_offering_id="1" removed="2018-01-01 00:00:01"/>
|
||||
|
||||
@ -33,8 +33,8 @@ public class Ip4AddressTest {
|
||||
|
||||
@Test
|
||||
public void testIsSameAddressAs() {
|
||||
Assert.assertTrue("1 and one should be considdered the same address", new Ip4Address(1L, 5L).isSameAddressAs("0.0.0.1"));
|
||||
Assert.assertFalse("zero and 0L should be considdered the same address but a Long won't be accepted", new Ip4Address("0.0.0.0", "00:00:00:00:00:08").isSameAddressAs(0L));
|
||||
Assert.assertTrue("1 and one should be considered the same address", new Ip4Address(1L, 5L).isSameAddressAs("0.0.0.1"));
|
||||
Assert.assertFalse("zero and 0L should be considered the same address but a Long won't be accepted", new Ip4Address("0.0.0.0", "00:00:00:00:00:08").isSameAddressAs(0L));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -56,8 +56,8 @@ public class IpTest {
|
||||
|
||||
@Test
|
||||
public void testIsSameAddressAs() {
|
||||
Assert.assertTrue("1 and one should be considdered the same address", new Ip(1L).isSameAddressAs("0.0.0.1"));
|
||||
Assert.assertTrue("zero and 0L should be considdered the same address", new Ip("0.0.0.0").isSameAddressAs(0L));
|
||||
Assert.assertTrue("1 and one should be considered the same address", new Ip(1L).isSameAddressAs("0.0.0.1"));
|
||||
Assert.assertTrue("zero and 0L should be considered the same address", new Ip("0.0.0.0").isSameAddressAs(0L));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user