Add upgrade path from 4.3 to 4.4

This commit is contained in:
Wido den Hollander 2013-11-21 15:48:01 +01:00
parent dc037b8d2f
commit 036fffcd7c
4 changed files with 308 additions and 0 deletions

View File

@ -65,6 +65,7 @@ import com.cloud.upgrade.dao.Upgrade40to41;
import com.cloud.upgrade.dao.Upgrade410to420; import com.cloud.upgrade.dao.Upgrade410to420;
import com.cloud.upgrade.dao.Upgrade420to421; import com.cloud.upgrade.dao.Upgrade420to421;
import com.cloud.upgrade.dao.Upgrade421to430; import com.cloud.upgrade.dao.Upgrade421to430;
import com.cloud.upgrade.dao.Upgrade430to440;
import com.cloud.upgrade.dao.UpgradeSnapshot217to224; import com.cloud.upgrade.dao.UpgradeSnapshot217to224;
import com.cloud.upgrade.dao.UpgradeSnapshot223to224; import com.cloud.upgrade.dao.UpgradeSnapshot223to224;
import com.cloud.upgrade.dao.VersionDao; import com.cloud.upgrade.dao.VersionDao;
@ -193,6 +194,8 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
_upgradeMap.put("4.2.1", new DbUpgrade[] {new Upgrade421to430()}); _upgradeMap.put("4.2.1", new DbUpgrade[] {new Upgrade421to430()});
_upgradeMap.put("4.3.0", new DbUpgrade[] {new Upgrade430to440()});
//CP Upgrades //CP Upgrades
_upgradeMap.put("3.0.3", new DbUpgrade[] {new Upgrade303to304(), new Upgrade304to305(), new Upgrade305to306(), new Upgrade306to307(), new Upgrade307to410(), _upgradeMap.put("3.0.3", new DbUpgrade[] {new Upgrade303to304(), new Upgrade304to305(), new Upgrade305to306(), new Upgrade306to307(), new Upgrade307to410(),
new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()});

View File

@ -0,0 +1,69 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.upgrade.dao;
import java.io.File;
import java.sql.Connection;
import org.apache.log4j.Logger;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
public class Upgrade430to440 implements DbUpgrade {
final static Logger s_logger = Logger.getLogger(Upgrade430to440.class);
@Override
public String[] getUpgradableVersionRange() {
return new String[] {"4.3.0", "4.4.0"};
}
@Override
public String getUpgradedVersion() {
return "4.4.0";
}
@Override
public boolean supportsRollingUpgrade() {
return false;
}
@Override
public File[] getPrepareScripts() {
String script = Script.findScript("", "db/schema-430to440.sql");
if (script == null) {
throw new CloudRuntimeException("Unable to find db/schema-4310to440.sql");
}
return new File[] { new File(script) };
}
@Override
public void performDataMigration(Connection conn) {
}
@Override
public File[] getCleanupScripts() {
String script = Script.findScript("", "db/schema-430to440-cleanup.sql");
if (script == null) {
throw new CloudRuntimeException("Unable to find db/schema-430to440-cleanup.sql");
}
return new File[] { new File(script) };
}
}

View File

@ -0,0 +1,22 @@
-- Licensed to the Apache Software Foundation (ASF) under one
-- or more contributor license agreements. See the NOTICE file
-- distributed with this work for additional information
-- regarding copyright ownership. The ASF licenses this file
-- to you under the Apache License, Version 2.0 (the
-- "License"); you may not use this file except in compliance
-- with the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing,
-- software distributed under the License is distributed on an
-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- KIND, either express or implied. See the License for the
-- specific language governing permissions and limitations
-- under the License.
--;
-- Schema cleanup from 4.3.0 to 4.4.0;
--;

View File

@ -0,0 +1,214 @@
-- Licensed to the Apache Software Foundation (ASF) under one
-- or more contributor license agreements. See the NOTICE file
-- distributed with this work for additional information
-- regarding copyright ownership. The ASF licenses this file
-- to you under the Apache License, Version 2.0 (the
-- "License"); you may not use this file except in compliance
-- with the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing,
-- software distributed under the License is distributed on an
-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- KIND, either express or implied. See the License for the
-- specific language governing permissions and limitations
-- under the License.
--;
-- Schema upgrade from 4.3.0 to 4.4.0;
--;
-- Disable foreign key checking
SET foreign_key_checks = 0;
ALTER TABLE `cloud`.`disk_offering` ADD `cache_mode` VARCHAR( 16 ) NOT NULL DEFAULT 'none' COMMENT 'The disk cache mode to use for disks created with this offering';
DROP VIEW IF EXISTS `cloud`.`disk_offering_view`;
CREATE VIEW `cloud`.`disk_offering_view` AS
select
disk_offering.id,
disk_offering.uuid,
disk_offering.name,
disk_offering.display_text,
disk_offering.disk_size,
disk_offering.min_iops,
disk_offering.max_iops,
disk_offering.created,
disk_offering.tags,
disk_offering.customized,
disk_offering.customized_iops,
disk_offering.removed,
disk_offering.use_local_storage,
disk_offering.system_use,
disk_offering.bytes_read_rate,
disk_offering.bytes_write_rate,
disk_offering.iops_read_rate,
disk_offering.iops_write_rate,
disk_offering.cache_mode,
disk_offering.sort_key,
disk_offering.type,
disk_offering.display_offering,
domain.id domain_id,
domain.uuid domain_uuid,
domain.name domain_name,
domain.path domain_path
from
`cloud`.`disk_offering`
left join
`cloud`.`domain` ON disk_offering.domain_id = domain.id
where
disk_offering.state='ACTIVE';
DROP VIEW IF EXISTS `cloud`.`service_offering_view`;
CREATE VIEW `cloud`.`service_offering_view` AS
select
service_offering.id,
disk_offering.uuid,
disk_offering.name,
disk_offering.display_text,
disk_offering.created,
disk_offering.tags,
disk_offering.removed,
disk_offering.use_local_storage,
disk_offering.system_use,
disk_offering.bytes_read_rate,
disk_offering.bytes_write_rate,
disk_offering.iops_read_rate,
disk_offering.iops_write_rate,
disk_offering.cache_mode,
service_offering.cpu,
service_offering.speed,
service_offering.ram_size,
service_offering.nw_rate,
service_offering.mc_rate,
service_offering.ha_enabled,
service_offering.limit_cpu_use,
service_offering.host_tag,
service_offering.default_use,
service_offering.vm_type,
service_offering.sort_key,
service_offering.is_volatile,
service_offering.deployment_planner,
domain.id domain_id,
domain.uuid domain_uuid,
domain.name domain_name,
domain.path domain_path
from
`cloud`.`service_offering`
inner join
`cloud`.`disk_offering` ON service_offering.id = disk_offering.id
left join
`cloud`.`domain` ON disk_offering.domain_id = domain.id
where
disk_offering.state='Active';
DROP VIEW IF EXISTS `cloud`.`volume_view`;
CREATE VIEW `cloud`.`volume_view` AS
select
volumes.id,
volumes.uuid,
volumes.name,
volumes.device_id,
volumes.volume_type,
volumes.size,
volumes.min_iops,
volumes.max_iops,
volumes.created,
volumes.state,
volumes.attached,
volumes.removed,
volumes.pod_id,
volumes.display_volume,
volumes.format,
volumes.path,
account.id account_id,
account.uuid account_uuid,
account.account_name account_name,
account.type account_type,
domain.id domain_id,
domain.uuid domain_uuid,
domain.name domain_name,
domain.path domain_path,
projects.id project_id,
projects.uuid project_uuid,
projects.name project_name,
data_center.id data_center_id,
data_center.uuid data_center_uuid,
data_center.name data_center_name,
data_center.networktype data_center_type,
vm_instance.id vm_id,
vm_instance.uuid vm_uuid,
vm_instance.name vm_name,
vm_instance.state vm_state,
vm_instance.vm_type,
user_vm.display_name vm_display_name,
volume_store_ref.size volume_store_size,
volume_store_ref.download_pct,
volume_store_ref.download_state,
volume_store_ref.error_str,
volume_store_ref.created created_on_store,
disk_offering.id disk_offering_id,
disk_offering.uuid disk_offering_uuid,
disk_offering.name disk_offering_name,
disk_offering.display_text disk_offering_display_text,
disk_offering.use_local_storage,
disk_offering.system_use,
disk_offering.bytes_read_rate,
disk_offering.bytes_write_rate,
disk_offering.iops_read_rate,
disk_offering.iops_write_rate,
disk_offering.cache_mode,
storage_pool.id pool_id,
storage_pool.uuid pool_uuid,
storage_pool.name pool_name,
cluster.hypervisor_type,
vm_template.id template_id,
vm_template.uuid template_uuid,
vm_template.extractable,
vm_template.type template_type,
resource_tags.id tag_id,
resource_tags.uuid tag_uuid,
resource_tags.key tag_key,
resource_tags.value tag_value,
resource_tags.domain_id tag_domain_id,
resource_tags.account_id tag_account_id,
resource_tags.resource_id tag_resource_id,
resource_tags.resource_uuid tag_resource_uuid,
resource_tags.resource_type tag_resource_type,
resource_tags.customer tag_customer,
async_job.id job_id,
async_job.uuid job_uuid,
async_job.job_status job_status,
async_job.account_id job_account_id
from
`cloud`.`volumes`
inner join
`cloud`.`account` ON volumes.account_id = account.id
inner join
`cloud`.`domain` ON volumes.domain_id = domain.id
left join
`cloud`.`projects` ON projects.project_account_id = account.id
left join
`cloud`.`data_center` ON volumes.data_center_id = data_center.id
left join
`cloud`.`vm_instance` ON volumes.instance_id = vm_instance.id
left join
`cloud`.`user_vm` ON user_vm.id = vm_instance.id
left join
`cloud`.`volume_store_ref` ON volumes.id = volume_store_ref.volume_id
left join
`cloud`.`disk_offering` ON volumes.disk_offering_id = disk_offering.id
left join
`cloud`.`storage_pool` ON volumes.pool_id = storage_pool.id
left join
`cloud`.`cluster` ON storage_pool.cluster_id = cluster.id
left join
`cloud`.`vm_template` ON volumes.template_id = vm_template.id OR volumes.iso_id = vm_template.id
left join
`cloud`.`resource_tags` ON resource_tags.resource_id = volumes.id
and resource_tags.resource_type = 'Volume'
left join
`cloud`.`async_job` ON async_job.instance_id = volumes.id
and async_job.instance_type = 'Volume'
and async_job.job_status = 0;