Merge branch 'master' of ssh://git.cloud.com/var/lib/git/cloudstack-oss

This commit is contained in:
edison 2010-09-01 15:40:46 -07:00
commit 20731c90f9
12 changed files with 78 additions and 13 deletions

View File

@ -743,6 +743,28 @@ See the files in the {{{debian/}}} folder.</pre>
<div title="CloudStack run-time dependencies" creator="RuddO" modifier="RuddO" created="201008081310" tags="fixme" changecount="1">
<pre>Not done yet!</pre>
</div>
<div title="Database migration infrastructure" creator="RuddO" modifier="RuddO" created="201009011837" modified="201009011852" changecount="14">
<pre>To support incremental migration from one version to another without having to redeploy the database, the CloudStack supports an incremental schema migration mechanism for the database.
!!!How does it work?
When the database is deployed for the first time with [[waf deploydb]] or the command {{{cloud-setup-databases}}}, a row is written to the {{{configuration}}} table, named {{{schema.level}}} and containing the current schema level. This schema level row comes from the file {{{setup/db/schema-level.sql}}} in the source (refer to the [[Installation paths]] topic to find out where this file is installed in a running system).
This value is used by the database migrator {{{cloud-migrate-databases}}} (source {{{setup/bindir/cloud-migrate-databases.in}}}) to determine the starting schema level. The database migrator has a series of classes -- each class represents a step in the migration process and is usually tied to the execution of a SQL file stored in {{{setup/db}}}. To migrate the database, the database migrator:
# walks the list of steps it knows about,
# generates a list of steps sorted by the order they should be executed in,
# executes each step in order
# at the end of each step, records the new schema level to the database table {{{configuration}}}
For more information, refer to the database migrator source -- it is documented.
!!!What impact does this have on me as a developer?
Whenever you need to evolve the schema of the database:
# write a migration SQL script and store it in {{{setup/db}}},
# include your schema changes in the appropriate SQL file {{{create-*.sql}}} too (as the database is expected to be at its latest evolved schema level right after deploying a fresh database)
# write a class in {{{setup/bindir/cloud-migrate-databases.in}}}, describing the migration step; in detail:
## the schema level your migration step expects the database to be in,
## the schema level your migration step will leave your database in (presumably the latest schema level, which you will have to choose!),
## and the name / description of the step
# bump the schema level in {{{setup/db/schema-level.sql}}} to the latest schema level
Otherwise, ''end-user migration will fail catastrophically''.</pre>
</div>
<div title="DefaultTiddlers" creator="RuddO" modifier="RuddO" created="201008072205" modified="201008072257" changecount="4">
<pre>[[Welcome]]</pre>
</div>
@ -752,7 +774,7 @@ See the files in the {{{debian/}}} folder.</pre>
<div title="Git" creator="RuddO" modifier="RuddO" created="201008081330" tags="fixme" changecount="1">
<pre>Not done yet!</pre>
</div>
<div title="Hacking on the CloudStack" creator="RuddO" modifier="RuddO" created="201008072228" modified="201008081354" changecount="47">
<div title="Hacking on the CloudStack" creator="RuddO" modifier="RuddO" created="201008072228" modified="201009011847" changecount="51">
<pre>Start here if you want to learn the essentials to extend, modify and enhance the CloudStack. This assumes that you've already familiarized yourself with CloudStack concepts, installation and configuration using the [[Getting started|Welcome]] instructions.
* [[Obtain the source|Obtaining the source]]
* [[Prepare your environment|Preparing your development environment]]
@ -764,6 +786,7 @@ Extra developer information:
* [[How to integrate with Eclipse]]
* [[Starting over]]
* [[Making a source release|waf dist]]
* [[How to write database migration scripts|Database migration infrastructure]]
</pre>
</div>
<div title="How to integrate with Eclipse" creator="RuddO" modifier="RuddO" created="201008081029" modified="201008081346" changecount="3">

View File

@ -582,6 +582,9 @@ fi
%{_datadir}/%{name}/setup/index-212to213.sql
%{_datadir}/%{name}/setup/postprocess-20to21.sql
%{_datadir}/%{name}/setup/schema-20to21.sql
%{_datadir}/%{name}/setup/schema-level.sql
%{_datadir}/%{name}/setup/schema-21to22.sql
%{_datadir}/%{name}/setup/data-21to22.sql
%doc README
%doc INSTALL
%doc HACKING

View File

@ -12,3 +12,6 @@
/usr/share/cloud/setup/index-212to213.sql
/usr/share/cloud/setup/postprocess-20to21.sql
/usr/share/cloud/setup/schema-20to21.sql
/usr/share/cloud/setup/schema-level.sql
/usr/share/cloud/setup/schema-21to22.sql
/usr/share/cloud/setup/data-21to22.sql

View File

@ -101,15 +101,15 @@ import com.cloud.async.executor.SecurityGroupParam;
import com.cloud.async.executor.UpdateLoadBalancerParam;
import com.cloud.async.executor.UpgradeVMParam;
import com.cloud.async.executor.VMOperationParam;
import com.cloud.async.executor.VMOperationParam.VmOp;
import com.cloud.async.executor.VolumeOperationParam;
import com.cloud.async.executor.VMOperationParam.VmOp;
import com.cloud.async.executor.VolumeOperationParam.VolumeOp;
import com.cloud.capacity.CapacityVO;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.configuration.ConfigurationManager;
import com.cloud.configuration.ConfigurationVO;
import com.cloud.configuration.ResourceCount.ResourceType;
import com.cloud.configuration.ResourceLimitVO;
import com.cloud.configuration.ResourceCount.ResourceType;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.configuration.dao.ResourceLimitDao;
import com.cloud.consoleproxy.ConsoleProxyManager;
@ -119,8 +119,8 @@ import com.cloud.dc.DataCenterIpAddressVO;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.HostPodVO;
import com.cloud.dc.PodVlanMapVO;
import com.cloud.dc.Vlan.VlanType;
import com.cloud.dc.VlanVO;
import com.cloud.dc.Vlan.VlanType;
import com.cloud.dc.dao.AccountVlanMapDao;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
@ -175,7 +175,6 @@ import com.cloud.network.security.NetworkGroupRulesVO;
import com.cloud.network.security.NetworkGroupVO;
import com.cloud.network.security.dao.NetworkGroupDao;
import com.cloud.offering.NetworkOffering;
import com.cloud.offering.NetworkOffering.GuestIpType;
import com.cloud.offering.ServiceOffering;
import com.cloud.serializer.GsonHelper;
import com.cloud.server.auth.UserAuthenticator;
@ -188,13 +187,10 @@ import com.cloud.storage.GuestOSCategoryVO;
import com.cloud.storage.GuestOSVO;
import com.cloud.storage.LaunchPermissionVO;
import com.cloud.storage.Snapshot;
import com.cloud.storage.Snapshot.SnapshotType;
import com.cloud.storage.SnapshotPolicyVO;
import com.cloud.storage.SnapshotScheduleVO;
import com.cloud.storage.SnapshotVO;
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.FileSystem;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.StoragePoolVO;
@ -202,9 +198,12 @@ import com.cloud.storage.StorageStats;
import com.cloud.storage.VMTemplateHostVO;
import com.cloud.storage.VMTemplateStorageResourceAssoc;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.Volume.VolumeType;
import com.cloud.storage.VolumeStats;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.Snapshot.SnapshotType;
import com.cloud.storage.Storage.FileSystem;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Volume.VolumeType;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.DiskTemplateDao;
import com.cloud.storage.dao.GuestOSCategoryDao;
@ -215,9 +214,9 @@ import com.cloud.storage.dao.SnapshotPolicyDao;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VMTemplateDao.TemplateFilter;
import com.cloud.storage.dao.VMTemplateHostDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.dao.VMTemplateDao.TemplateFilter;
import com.cloud.storage.preallocatedlun.PreallocatedLunVO;
import com.cloud.storage.preallocatedlun.dao.PreallocatedLunDao;
import com.cloud.storage.secondary.SecondaryStorageVmManager;
@ -239,12 +238,12 @@ import com.cloud.user.dao.UserDao;
import com.cloud.user.dao.UserStatisticsDao;
import com.cloud.uservm.UserVm;
import com.cloud.utils.DateUtil;
import com.cloud.utils.DateUtil.IntervalType;
import com.cloud.utils.EnumUtils;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.Pair;
import com.cloud.utils.PasswordGenerator;
import com.cloud.utils.StringUtils;
import com.cloud.utils.DateUtil.IntervalType;
import com.cloud.utils.component.Adapters;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.concurrency.NamedThreadFactory;
@ -921,6 +920,9 @@ public class ManagementServerImpl implements ManagementServer {
// Mark the account's volumes as destroyed
List<VolumeVO> volumes = _volumeDao.findDetachedByAccount(accountId);
for (VolumeVO volume : volumes) {
if(volume.getPoolId()==null){
accountCleanupNeeded = true;
}
_storageMgr.destroyVolume(volume);
}

View File

@ -1006,6 +1006,9 @@ public class SnapshotManagerImpl implements SnapshotManager {
// i.e Call them before the VMs for those volumes are destroyed.
boolean success = true;
for (VolumeVO volume : volumes) {
if(volume.getPoolId()==null){
continue;
}
Long volumeId = volume.getId();
Long dcId = volume.getDataCenterId();
String secondaryStoragePoolURL = _storageMgr.getSecondaryStorageURL(dcId);

View File

@ -150,6 +150,23 @@ class From21datamigratedTo21postprocessed(cloud_utils.MigrationStep):
to_level = "2.1"
def run(self): self.context.run_sql_resource("postprocess-20to21.sql")
class From21To213(cloud_utils.MigrationStep):
def __str__(self): return "Dropping obsolete indexes"
from_level = "2.1"
to_level = "2.1.3"
def run(self): self.context.run_sql_resource("index-212to213.sql")
class From213To22data(cloud_utils.MigrationStep):
def __str__(self): return "Migrating data"
from_level = "2.1.3"
to_level = "2.2-01"
def run(self): self.context.run_sql_resource("data-21to22.sql")
class From22dataTo22(cloud_utils.MigrationStep):
def __str__(self): return "Migrating indexes"
from_level = "2.2-01"
to_level = "2.2"
def run(self): self.context.run_sql_resource("index-21to22.sql")
# command line harness functions

View File

@ -352,3 +352,10 @@ if rootuser:
print "Applying file %s to the database on server %s:%s"%(p,host,port)
try: run_mysql(text,rootuser,rootpassword,host,port)
except CalledProcessError: sys.exit(22)
p = os.path.join(dbfilepath,"schema-level.sql")
if os.path.isfile(p):
text = file(p).read()
print "Applying file %s to the database on server %s:%s"%(p,host,port)
try: run_mysql(text,rootuser,rootpassword,host,port)
except CalledProcessError: sys.exit(22)

View File

@ -0,0 +1 @@
INSERT INTO `cloud`.`configuration` (category, instance, component, name, value, description) VALUES ('Hidden', 'DEFAULT', 'database', 'schema.level', '2.2', 'The schema level of this database');

View File

@ -290,7 +290,7 @@ $(document).ready(function() {
submenuContent.find("#grid_content").prepend(template.fadeIn("slow"));
var username = thisDialog.find("#add_user_username").val();
var password = $.md5(thisDialog.find("#add_user_password").val());
var password = $.md5(encodeURIComponent(thisDialog.find("#add_user_password").val()));
var email = thisDialog.find("#add_user_email").val();
if(email == "")
email = username;
@ -318,7 +318,7 @@ $(document).ready(function() {
$.ajax({
type: "POST",
data: createURL("command=createUser&username="+encodeURIComponent(username)+"&password="+encodeURIComponent(password)+"&email="+encodeURIComponent(email)+"&firstname="+encodeURIComponent(firstname)+"&lastname="+encodeURIComponent(lastname)+"&account="+account+"&accounttype="+accountType+"&domainid="+domainId+moreCriteria.join("")+"&response=json"),
data: createURL("command=createUser&username="+encodeURIComponent(username)+"&password="+password+"&email="+encodeURIComponent(email)+"&firstname="+encodeURIComponent(firstname)+"&lastname="+encodeURIComponent(lastname)+"&account="+account+"&accounttype="+accountType+"&domainid="+domainId+moreCriteria.join("")+"&response=json"),
dataType: "json",
async: false,
success: function(json) {

View File

@ -777,6 +777,12 @@ def deploydb(ctx,virttech=None):
after = after + file(p).read()
Utils.pprint("GREEN","Reading database code from %s"%p)
p = _join("setup","db","schema-level.sql")
if _exists(p):
p = dev_override(p)
after = after + file(p).read()
Utils.pprint("GREEN","Reading database code from %s"%p)
cmd = ["mysql","--user=%s"%dbuser,"-h",dbhost,"--password=%s"%dbpw]
Utils.pprint("GREEN","Deploying post-configuration database scripts to %s (user %s)"%(dbhost,dbuser))
Utils.pprint("BLUE"," ".join(cmd))